intel_dp.c revision 50f3b016b055dbc83094bc2d7a91c3c69edbc88b
1/* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Keith Packard <keithp@keithp.com> 25 * 26 */ 27 28#include <linux/i2c.h> 29#include <linux/slab.h> 30#include <linux/export.h> 31#include <drm/drmP.h> 32#include <drm/drm_crtc.h> 33#include <drm/drm_crtc_helper.h> 34#include <drm/drm_edid.h> 35#include "intel_drv.h" 36#include <drm/i915_drm.h> 37#include "i915_drv.h" 38 39#define DP_LINK_CHECK_TIMEOUT (10 * 1000) 40 41/** 42 * is_edp - is the given port attached to an eDP panel (either CPU or PCH) 43 * @intel_dp: DP struct 44 * 45 * If a CPU or PCH DP output is attached to an eDP panel, this function 46 * will return true, and false otherwise. 47 */ 48static bool is_edp(struct intel_dp *intel_dp) 49{ 50 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 51 52 return intel_dig_port->base.type == INTEL_OUTPUT_EDP; 53} 54 55/** 56 * is_pch_edp - is the port on the PCH and attached to an eDP panel? 57 * @intel_dp: DP struct 58 * 59 * Returns true if the given DP struct corresponds to a PCH DP port attached 60 * to an eDP panel, false otherwise. Helpful for determining whether we 61 * may need FDI resources for a given DP output or not. 62 */ 63static bool is_pch_edp(struct intel_dp *intel_dp) 64{ 65 return intel_dp->is_pch_edp; 66} 67 68/** 69 * is_cpu_edp - is the port on the CPU and attached to an eDP panel? 70 * @intel_dp: DP struct 71 * 72 * Returns true if the given DP struct corresponds to a CPU eDP port. 73 */ 74static bool is_cpu_edp(struct intel_dp *intel_dp) 75{ 76 return is_edp(intel_dp) && !is_pch_edp(intel_dp); 77} 78 79static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp) 80{ 81 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 82 83 return intel_dig_port->base.base.dev; 84} 85 86static struct intel_dp *intel_attached_dp(struct drm_connector *connector) 87{ 88 return enc_to_intel_dp(&intel_attached_encoder(connector)->base); 89} 90 91/** 92 * intel_encoder_is_pch_edp - is the given encoder a PCH attached eDP? 93 * @encoder: DRM encoder 94 * 95 * Return true if @encoder corresponds to a PCH attached eDP panel. Needed 96 * by intel_display.c. 97 */ 98bool intel_encoder_is_pch_edp(struct drm_encoder *encoder) 99{ 100 struct intel_dp *intel_dp; 101 102 if (!encoder) 103 return false; 104 105 intel_dp = enc_to_intel_dp(encoder); 106 107 return is_pch_edp(intel_dp); 108} 109 110static void intel_dp_link_down(struct intel_dp *intel_dp); 111 112void 113intel_edp_link_config(struct intel_encoder *intel_encoder, 114 int *lane_num, int *link_bw) 115{ 116 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 117 118 *lane_num = intel_dp->lane_count; 119 *link_bw = drm_dp_bw_code_to_link_rate(intel_dp->link_bw); 120} 121 122int 123intel_edp_target_clock(struct intel_encoder *intel_encoder, 124 struct drm_display_mode *mode) 125{ 126 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 127 struct intel_connector *intel_connector = intel_dp->attached_connector; 128 129 if (intel_connector->panel.fixed_mode) 130 return intel_connector->panel.fixed_mode->clock; 131 else 132 return mode->clock; 133} 134 135static int 136intel_dp_max_link_bw(struct intel_dp *intel_dp) 137{ 138 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; 139 140 switch (max_link_bw) { 141 case DP_LINK_BW_1_62: 142 case DP_LINK_BW_2_7: 143 break; 144 default: 145 max_link_bw = DP_LINK_BW_1_62; 146 break; 147 } 148 return max_link_bw; 149} 150 151/* 152 * The units on the numbers in the next two are... bizarre. Examples will 153 * make it clearer; this one parallels an example in the eDP spec. 154 * 155 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as: 156 * 157 * 270000 * 1 * 8 / 10 == 216000 158 * 159 * The actual data capacity of that configuration is 2.16Gbit/s, so the 160 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz - 161 * or equivalently, kilopixels per second - so for 1680x1050R it'd be 162 * 119000. At 18bpp that's 2142000 kilobits per second. 163 * 164 * Thus the strange-looking division by 10 in intel_dp_link_required, to 165 * get the result in decakilobits instead of kilobits. 166 */ 167 168static int 169intel_dp_link_required(int pixel_clock, int bpp) 170{ 171 return (pixel_clock * bpp + 9) / 10; 172} 173 174static int 175intel_dp_max_data_rate(int max_link_clock, int max_lanes) 176{ 177 return (max_link_clock * max_lanes * 8) / 10; 178} 179 180static bool 181intel_dp_adjust_dithering(struct intel_dp *intel_dp, 182 struct drm_display_mode *mode, 183 bool adjust_mode) 184{ 185 int max_link_clock = 186 drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp)); 187 int max_lanes = drm_dp_max_lane_count(intel_dp->dpcd); 188 int max_rate, mode_rate; 189 190 mode_rate = intel_dp_link_required(mode->clock, 24); 191 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); 192 193 if (mode_rate > max_rate) { 194 mode_rate = intel_dp_link_required(mode->clock, 18); 195 if (mode_rate > max_rate) 196 return false; 197 198 if (adjust_mode) 199 mode->private_flags 200 |= INTEL_MODE_DP_FORCE_6BPC; 201 202 return true; 203 } 204 205 return true; 206} 207 208static int 209intel_dp_mode_valid(struct drm_connector *connector, 210 struct drm_display_mode *mode) 211{ 212 struct intel_dp *intel_dp = intel_attached_dp(connector); 213 struct intel_connector *intel_connector = to_intel_connector(connector); 214 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; 215 216 if (is_edp(intel_dp) && fixed_mode) { 217 if (mode->hdisplay > fixed_mode->hdisplay) 218 return MODE_PANEL; 219 220 if (mode->vdisplay > fixed_mode->vdisplay) 221 return MODE_PANEL; 222 } 223 224 if (!intel_dp_adjust_dithering(intel_dp, mode, false)) 225 return MODE_CLOCK_HIGH; 226 227 if (mode->clock < 10000) 228 return MODE_CLOCK_LOW; 229 230 if (mode->flags & DRM_MODE_FLAG_DBLCLK) 231 return MODE_H_ILLEGAL; 232 233 return MODE_OK; 234} 235 236static uint32_t 237pack_aux(uint8_t *src, int src_bytes) 238{ 239 int i; 240 uint32_t v = 0; 241 242 if (src_bytes > 4) 243 src_bytes = 4; 244 for (i = 0; i < src_bytes; i++) 245 v |= ((uint32_t) src[i]) << ((3-i) * 8); 246 return v; 247} 248 249static void 250unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) 251{ 252 int i; 253 if (dst_bytes > 4) 254 dst_bytes = 4; 255 for (i = 0; i < dst_bytes; i++) 256 dst[i] = src >> ((3-i) * 8); 257} 258 259/* hrawclock is 1/4 the FSB frequency */ 260static int 261intel_hrawclk(struct drm_device *dev) 262{ 263 struct drm_i915_private *dev_priv = dev->dev_private; 264 uint32_t clkcfg; 265 266 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */ 267 if (IS_VALLEYVIEW(dev)) 268 return 200; 269 270 clkcfg = I915_READ(CLKCFG); 271 switch (clkcfg & CLKCFG_FSB_MASK) { 272 case CLKCFG_FSB_400: 273 return 100; 274 case CLKCFG_FSB_533: 275 return 133; 276 case CLKCFG_FSB_667: 277 return 166; 278 case CLKCFG_FSB_800: 279 return 200; 280 case CLKCFG_FSB_1067: 281 return 266; 282 case CLKCFG_FSB_1333: 283 return 333; 284 /* these two are just a guess; one of them might be right */ 285 case CLKCFG_FSB_1600: 286 case CLKCFG_FSB_1600_ALT: 287 return 400; 288 default: 289 return 133; 290 } 291} 292 293static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp) 294{ 295 struct drm_device *dev = intel_dp_to_dev(intel_dp); 296 struct drm_i915_private *dev_priv = dev->dev_private; 297 298 return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0; 299} 300 301static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp) 302{ 303 struct drm_device *dev = intel_dp_to_dev(intel_dp); 304 struct drm_i915_private *dev_priv = dev->dev_private; 305 306 return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0; 307} 308 309static void 310intel_dp_check_edp(struct intel_dp *intel_dp) 311{ 312 struct drm_device *dev = intel_dp_to_dev(intel_dp); 313 struct drm_i915_private *dev_priv = dev->dev_private; 314 315 if (!is_edp(intel_dp)) 316 return; 317 if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) { 318 WARN(1, "eDP powered off while attempting aux channel communication.\n"); 319 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n", 320 I915_READ(PCH_PP_STATUS), 321 I915_READ(PCH_PP_CONTROL)); 322 } 323} 324 325static uint32_t 326intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq) 327{ 328 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 329 struct drm_device *dev = intel_dig_port->base.base.dev; 330 struct drm_i915_private *dev_priv = dev->dev_private; 331 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg; 332 uint32_t status; 333 bool done; 334 335#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) 336 if (has_aux_irq) 337 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 338 msecs_to_jiffies(10)); 339 else 340 done = wait_for_atomic(C, 10) == 0; 341 if (!done) 342 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n", 343 has_aux_irq); 344#undef C 345 346 return status; 347} 348 349static int 350intel_dp_aux_ch(struct intel_dp *intel_dp, 351 uint8_t *send, int send_bytes, 352 uint8_t *recv, int recv_size) 353{ 354 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 355 struct drm_device *dev = intel_dig_port->base.base.dev; 356 struct drm_i915_private *dev_priv = dev->dev_private; 357 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg; 358 uint32_t ch_data = ch_ctl + 4; 359 int i, ret, recv_bytes; 360 uint32_t status; 361 uint32_t aux_clock_divider; 362 int try, precharge; 363 bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev); 364 365 /* dp aux is extremely sensitive to irq latency, hence request the 366 * lowest possible wakeup latency and so prevent the cpu from going into 367 * deep sleep states. 368 */ 369 pm_qos_update_request(&dev_priv->pm_qos, 0); 370 371 intel_dp_check_edp(intel_dp); 372 /* The clock divider is based off the hrawclk, 373 * and would like to run at 2MHz. So, take the 374 * hrawclk value and divide by 2 and use that 375 * 376 * Note that PCH attached eDP panels should use a 125MHz input 377 * clock divider. 378 */ 379 if (is_cpu_edp(intel_dp)) { 380 if (HAS_DDI(dev)) 381 aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1; 382 else if (IS_VALLEYVIEW(dev)) 383 aux_clock_divider = 100; 384 else if (IS_GEN6(dev) || IS_GEN7(dev)) 385 aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */ 386 else 387 aux_clock_divider = 225; /* eDP input clock at 450Mhz */ 388 } else if (HAS_PCH_SPLIT(dev)) 389 aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2); 390 else 391 aux_clock_divider = intel_hrawclk(dev) / 2; 392 393 if (IS_GEN6(dev)) 394 precharge = 3; 395 else 396 precharge = 5; 397 398 /* Try to wait for any previous AUX channel activity */ 399 for (try = 0; try < 3; try++) { 400 status = I915_READ_NOTRACE(ch_ctl); 401 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 402 break; 403 msleep(1); 404 } 405 406 if (try == 3) { 407 WARN(1, "dp_aux_ch not started status 0x%08x\n", 408 I915_READ(ch_ctl)); 409 ret = -EBUSY; 410 goto out; 411 } 412 413 /* Must try at least 3 times according to DP spec */ 414 for (try = 0; try < 5; try++) { 415 /* Load the send data into the aux channel data registers */ 416 for (i = 0; i < send_bytes; i += 4) 417 I915_WRITE(ch_data + i, 418 pack_aux(send + i, send_bytes - i)); 419 420 /* Send the command and wait for it to complete */ 421 I915_WRITE(ch_ctl, 422 DP_AUX_CH_CTL_SEND_BUSY | 423 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) | 424 DP_AUX_CH_CTL_TIME_OUT_400us | 425 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 426 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 427 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | 428 DP_AUX_CH_CTL_DONE | 429 DP_AUX_CH_CTL_TIME_OUT_ERROR | 430 DP_AUX_CH_CTL_RECEIVE_ERROR); 431 432 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq); 433 434 /* Clear done status and any errors */ 435 I915_WRITE(ch_ctl, 436 status | 437 DP_AUX_CH_CTL_DONE | 438 DP_AUX_CH_CTL_TIME_OUT_ERROR | 439 DP_AUX_CH_CTL_RECEIVE_ERROR); 440 441 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR | 442 DP_AUX_CH_CTL_RECEIVE_ERROR)) 443 continue; 444 if (status & DP_AUX_CH_CTL_DONE) 445 break; 446 } 447 448 if ((status & DP_AUX_CH_CTL_DONE) == 0) { 449 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status); 450 ret = -EBUSY; 451 goto out; 452 } 453 454 /* Check for timeout or receive error. 455 * Timeouts occur when the sink is not connected 456 */ 457 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 458 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status); 459 ret = -EIO; 460 goto out; 461 } 462 463 /* Timeouts occur when the device isn't connected, so they're 464 * "normal" -- don't fill the kernel log with these */ 465 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { 466 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status); 467 ret = -ETIMEDOUT; 468 goto out; 469 } 470 471 /* Unload any bytes sent back from the other side */ 472 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> 473 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); 474 if (recv_bytes > recv_size) 475 recv_bytes = recv_size; 476 477 for (i = 0; i < recv_bytes; i += 4) 478 unpack_aux(I915_READ(ch_data + i), 479 recv + i, recv_bytes - i); 480 481 ret = recv_bytes; 482out: 483 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE); 484 485 return ret; 486} 487 488/* Write data to the aux channel in native mode */ 489static int 490intel_dp_aux_native_write(struct intel_dp *intel_dp, 491 uint16_t address, uint8_t *send, int send_bytes) 492{ 493 int ret; 494 uint8_t msg[20]; 495 int msg_bytes; 496 uint8_t ack; 497 498 intel_dp_check_edp(intel_dp); 499 if (send_bytes > 16) 500 return -1; 501 msg[0] = AUX_NATIVE_WRITE << 4; 502 msg[1] = address >> 8; 503 msg[2] = address & 0xff; 504 msg[3] = send_bytes - 1; 505 memcpy(&msg[4], send, send_bytes); 506 msg_bytes = send_bytes + 4; 507 for (;;) { 508 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1); 509 if (ret < 0) 510 return ret; 511 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) 512 break; 513 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) 514 udelay(100); 515 else 516 return -EIO; 517 } 518 return send_bytes; 519} 520 521/* Write a single byte to the aux channel in native mode */ 522static int 523intel_dp_aux_native_write_1(struct intel_dp *intel_dp, 524 uint16_t address, uint8_t byte) 525{ 526 return intel_dp_aux_native_write(intel_dp, address, &byte, 1); 527} 528 529/* read bytes from a native aux channel */ 530static int 531intel_dp_aux_native_read(struct intel_dp *intel_dp, 532 uint16_t address, uint8_t *recv, int recv_bytes) 533{ 534 uint8_t msg[4]; 535 int msg_bytes; 536 uint8_t reply[20]; 537 int reply_bytes; 538 uint8_t ack; 539 int ret; 540 541 intel_dp_check_edp(intel_dp); 542 msg[0] = AUX_NATIVE_READ << 4; 543 msg[1] = address >> 8; 544 msg[2] = address & 0xff; 545 msg[3] = recv_bytes - 1; 546 547 msg_bytes = 4; 548 reply_bytes = recv_bytes + 1; 549 550 for (;;) { 551 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, 552 reply, reply_bytes); 553 if (ret == 0) 554 return -EPROTO; 555 if (ret < 0) 556 return ret; 557 ack = reply[0]; 558 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) { 559 memcpy(recv, reply + 1, ret - 1); 560 return ret - 1; 561 } 562 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) 563 udelay(100); 564 else 565 return -EIO; 566 } 567} 568 569static int 570intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, 571 uint8_t write_byte, uint8_t *read_byte) 572{ 573 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; 574 struct intel_dp *intel_dp = container_of(adapter, 575 struct intel_dp, 576 adapter); 577 uint16_t address = algo_data->address; 578 uint8_t msg[5]; 579 uint8_t reply[2]; 580 unsigned retry; 581 int msg_bytes; 582 int reply_bytes; 583 int ret; 584 585 intel_dp_check_edp(intel_dp); 586 /* Set up the command byte */ 587 if (mode & MODE_I2C_READ) 588 msg[0] = AUX_I2C_READ << 4; 589 else 590 msg[0] = AUX_I2C_WRITE << 4; 591 592 if (!(mode & MODE_I2C_STOP)) 593 msg[0] |= AUX_I2C_MOT << 4; 594 595 msg[1] = address >> 8; 596 msg[2] = address; 597 598 switch (mode) { 599 case MODE_I2C_WRITE: 600 msg[3] = 0; 601 msg[4] = write_byte; 602 msg_bytes = 5; 603 reply_bytes = 1; 604 break; 605 case MODE_I2C_READ: 606 msg[3] = 0; 607 msg_bytes = 4; 608 reply_bytes = 2; 609 break; 610 default: 611 msg_bytes = 3; 612 reply_bytes = 1; 613 break; 614 } 615 616 for (retry = 0; retry < 5; retry++) { 617 ret = intel_dp_aux_ch(intel_dp, 618 msg, msg_bytes, 619 reply, reply_bytes); 620 if (ret < 0) { 621 DRM_DEBUG_KMS("aux_ch failed %d\n", ret); 622 return ret; 623 } 624 625 switch (reply[0] & AUX_NATIVE_REPLY_MASK) { 626 case AUX_NATIVE_REPLY_ACK: 627 /* I2C-over-AUX Reply field is only valid 628 * when paired with AUX ACK. 629 */ 630 break; 631 case AUX_NATIVE_REPLY_NACK: 632 DRM_DEBUG_KMS("aux_ch native nack\n"); 633 return -EREMOTEIO; 634 case AUX_NATIVE_REPLY_DEFER: 635 udelay(100); 636 continue; 637 default: 638 DRM_ERROR("aux_ch invalid native reply 0x%02x\n", 639 reply[0]); 640 return -EREMOTEIO; 641 } 642 643 switch (reply[0] & AUX_I2C_REPLY_MASK) { 644 case AUX_I2C_REPLY_ACK: 645 if (mode == MODE_I2C_READ) { 646 *read_byte = reply[1]; 647 } 648 return reply_bytes - 1; 649 case AUX_I2C_REPLY_NACK: 650 DRM_DEBUG_KMS("aux_i2c nack\n"); 651 return -EREMOTEIO; 652 case AUX_I2C_REPLY_DEFER: 653 DRM_DEBUG_KMS("aux_i2c defer\n"); 654 udelay(100); 655 break; 656 default: 657 DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]); 658 return -EREMOTEIO; 659 } 660 } 661 662 DRM_ERROR("too many retries, giving up\n"); 663 return -EREMOTEIO; 664} 665 666static int 667intel_dp_i2c_init(struct intel_dp *intel_dp, 668 struct intel_connector *intel_connector, const char *name) 669{ 670 int ret; 671 672 DRM_DEBUG_KMS("i2c_init %s\n", name); 673 intel_dp->algo.running = false; 674 intel_dp->algo.address = 0; 675 intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch; 676 677 memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter)); 678 intel_dp->adapter.owner = THIS_MODULE; 679 intel_dp->adapter.class = I2C_CLASS_DDC; 680 strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1); 681 intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0'; 682 intel_dp->adapter.algo_data = &intel_dp->algo; 683 intel_dp->adapter.dev.parent = &intel_connector->base.kdev; 684 685 ironlake_edp_panel_vdd_on(intel_dp); 686 ret = i2c_dp_aux_add_bus(&intel_dp->adapter); 687 ironlake_edp_panel_vdd_off(intel_dp, false); 688 return ret; 689} 690 691bool 692intel_dp_compute_config(struct intel_encoder *encoder, 693 struct intel_crtc_config *pipe_config) 694{ 695 struct drm_device *dev = encoder->base.dev; 696 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; 697 struct drm_display_mode *mode = &pipe_config->requested_mode; 698 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 699 struct intel_connector *intel_connector = intel_dp->attached_connector; 700 int lane_count, clock; 701 int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd); 702 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; 703 int bpp, mode_rate; 704 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; 705 706 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && !is_cpu_edp(intel_dp)) 707 pipe_config->has_pch_encoder = true; 708 709 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 710 intel_fixed_panel_mode(intel_connector->panel.fixed_mode, 711 adjusted_mode); 712 intel_pch_panel_fitting(dev, 713 intel_connector->panel.fitting_mode, 714 mode, adjusted_mode); 715 } 716 717 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 718 return false; 719 720 DRM_DEBUG_KMS("DP link computation with max lane count %i " 721 "max bw %02x pixel clock %iKHz\n", 722 max_lane_count, bws[max_clock], adjusted_mode->clock); 723 724 if (!intel_dp_adjust_dithering(intel_dp, adjusted_mode, true)) 725 return false; 726 727 bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24; 728 729 if (intel_dp->color_range_auto) { 730 /* 731 * See: 732 * CEA-861-E - 5.1 Default Encoding Parameters 733 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry 734 */ 735 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1) 736 intel_dp->color_range = DP_COLOR_RANGE_16_235; 737 else 738 intel_dp->color_range = 0; 739 } 740 741 if (intel_dp->color_range) 742 pipe_config->limited_color_range = true; 743 744 mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp); 745 746 for (clock = 0; clock <= max_clock; clock++) { 747 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { 748 int link_bw_clock = 749 drm_dp_bw_code_to_link_rate(bws[clock]); 750 int link_avail = intel_dp_max_data_rate(link_bw_clock, 751 lane_count); 752 753 if (mode_rate <= link_avail) { 754 intel_dp->link_bw = bws[clock]; 755 intel_dp->lane_count = lane_count; 756 adjusted_mode->clock = link_bw_clock; 757 DRM_DEBUG_KMS("DP link bw %02x lane " 758 "count %d clock %d bpp %d\n", 759 intel_dp->link_bw, intel_dp->lane_count, 760 adjusted_mode->clock, bpp); 761 DRM_DEBUG_KMS("DP link bw required %i available %i\n", 762 mode_rate, link_avail); 763 return true; 764 } 765 } 766 } 767 768 return false; 769} 770 771void 772intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, 773 struct drm_display_mode *adjusted_mode) 774{ 775 struct drm_device *dev = crtc->dev; 776 struct intel_encoder *intel_encoder; 777 struct intel_dp *intel_dp; 778 struct drm_i915_private *dev_priv = dev->dev_private; 779 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 780 int lane_count = 4; 781 struct intel_link_m_n m_n; 782 int pipe = intel_crtc->pipe; 783 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; 784 785 /* 786 * Find the lane count in the intel_encoder private 787 */ 788 for_each_encoder_on_crtc(dev, crtc, intel_encoder) { 789 intel_dp = enc_to_intel_dp(&intel_encoder->base); 790 791 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || 792 intel_encoder->type == INTEL_OUTPUT_EDP) 793 { 794 lane_count = intel_dp->lane_count; 795 break; 796 } 797 } 798 799 /* 800 * Compute the GMCH and Link ratios. The '3' here is 801 * the number of bytes_per_pixel post-LUT, which we always 802 * set up for 8-bits of R/G/B, or 3 bytes total. 803 */ 804 intel_link_compute_m_n(intel_crtc->bpp, lane_count, 805 mode->clock, adjusted_mode->clock, &m_n); 806 807 if (HAS_DDI(dev)) { 808 I915_WRITE(PIPE_DATA_M1(cpu_transcoder), 809 TU_SIZE(m_n.tu) | m_n.gmch_m); 810 I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n); 811 I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m); 812 I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n); 813 } else if (HAS_PCH_SPLIT(dev)) { 814 I915_WRITE(TRANSDATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); 815 I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n); 816 I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m); 817 I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n); 818 } else if (IS_VALLEYVIEW(dev)) { 819 I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); 820 I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n); 821 I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m); 822 I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n); 823 } else { 824 I915_WRITE(PIPE_GMCH_DATA_M(pipe), 825 TU_SIZE(m_n.tu) | m_n.gmch_m); 826 I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n); 827 I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m); 828 I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n); 829 } 830} 831 832void intel_dp_init_link_config(struct intel_dp *intel_dp) 833{ 834 memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); 835 intel_dp->link_configuration[0] = intel_dp->link_bw; 836 intel_dp->link_configuration[1] = intel_dp->lane_count; 837 intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B; 838 /* 839 * Check for DPCD version > 1.1 and enhanced framing support 840 */ 841 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 842 (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) { 843 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; 844 } 845} 846 847static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock) 848{ 849 struct drm_device *dev = crtc->dev; 850 struct drm_i915_private *dev_priv = dev->dev_private; 851 u32 dpa_ctl; 852 853 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock); 854 dpa_ctl = I915_READ(DP_A); 855 dpa_ctl &= ~DP_PLL_FREQ_MASK; 856 857 if (clock < 200000) { 858 /* For a long time we've carried around a ILK-DevA w/a for the 859 * 160MHz clock. If we're really unlucky, it's still required. 860 */ 861 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n"); 862 dpa_ctl |= DP_PLL_FREQ_160MHZ; 863 } else { 864 dpa_ctl |= DP_PLL_FREQ_270MHZ; 865 } 866 867 I915_WRITE(DP_A, dpa_ctl); 868 869 POSTING_READ(DP_A); 870 udelay(500); 871} 872 873static void 874intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, 875 struct drm_display_mode *adjusted_mode) 876{ 877 struct drm_device *dev = encoder->dev; 878 struct drm_i915_private *dev_priv = dev->dev_private; 879 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 880 struct drm_crtc *crtc = encoder->crtc; 881 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 882 883 /* 884 * There are four kinds of DP registers: 885 * 886 * IBX PCH 887 * SNB CPU 888 * IVB CPU 889 * CPT PCH 890 * 891 * IBX PCH and CPU are the same for almost everything, 892 * except that the CPU DP PLL is configured in this 893 * register 894 * 895 * CPT PCH is quite different, having many bits moved 896 * to the TRANS_DP_CTL register instead. That 897 * configuration happens (oddly) in ironlake_pch_enable 898 */ 899 900 /* Preserve the BIOS-computed detected bit. This is 901 * supposed to be read-only. 902 */ 903 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED; 904 905 /* Handle DP bits in common between all three register formats */ 906 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 907 908 switch (intel_dp->lane_count) { 909 case 1: 910 intel_dp->DP |= DP_PORT_WIDTH_1; 911 break; 912 case 2: 913 intel_dp->DP |= DP_PORT_WIDTH_2; 914 break; 915 case 4: 916 intel_dp->DP |= DP_PORT_WIDTH_4; 917 break; 918 } 919 if (intel_dp->has_audio) { 920 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n", 921 pipe_name(intel_crtc->pipe)); 922 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 923 intel_write_eld(encoder, adjusted_mode); 924 } 925 926 intel_dp_init_link_config(intel_dp); 927 928 /* Split out the IBX/CPU vs CPT settings */ 929 930 if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) { 931 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 932 intel_dp->DP |= DP_SYNC_HS_HIGH; 933 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 934 intel_dp->DP |= DP_SYNC_VS_HIGH; 935 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 936 937 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) 938 intel_dp->DP |= DP_ENHANCED_FRAMING; 939 940 intel_dp->DP |= intel_crtc->pipe << 29; 941 942 /* don't miss out required setting for eDP */ 943 if (adjusted_mode->clock < 200000) 944 intel_dp->DP |= DP_PLL_FREQ_160MHZ; 945 else 946 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 947 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { 948 if (!HAS_PCH_SPLIT(dev)) 949 intel_dp->DP |= intel_dp->color_range; 950 951 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 952 intel_dp->DP |= DP_SYNC_HS_HIGH; 953 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 954 intel_dp->DP |= DP_SYNC_VS_HIGH; 955 intel_dp->DP |= DP_LINK_TRAIN_OFF; 956 957 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) 958 intel_dp->DP |= DP_ENHANCED_FRAMING; 959 960 if (intel_crtc->pipe == 1) 961 intel_dp->DP |= DP_PIPEB_SELECT; 962 963 if (is_cpu_edp(intel_dp)) { 964 /* don't miss out required setting for eDP */ 965 if (adjusted_mode->clock < 200000) 966 intel_dp->DP |= DP_PLL_FREQ_160MHZ; 967 else 968 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 969 } 970 } else { 971 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 972 } 973 974 if (is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) 975 ironlake_set_pll_edp(crtc, adjusted_mode->clock); 976} 977 978#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 979#define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) 980 981#define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 982#define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 983 984#define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) 985#define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 986 987static void ironlake_wait_panel_status(struct intel_dp *intel_dp, 988 u32 mask, 989 u32 value) 990{ 991 struct drm_device *dev = intel_dp_to_dev(intel_dp); 992 struct drm_i915_private *dev_priv = dev->dev_private; 993 994 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n", 995 mask, value, 996 I915_READ(PCH_PP_STATUS), 997 I915_READ(PCH_PP_CONTROL)); 998 999 if (_wait_for((I915_READ(PCH_PP_STATUS) & mask) == value, 5000, 10)) { 1000 DRM_ERROR("Panel status timeout: status %08x control %08x\n", 1001 I915_READ(PCH_PP_STATUS), 1002 I915_READ(PCH_PP_CONTROL)); 1003 } 1004} 1005 1006static void ironlake_wait_panel_on(struct intel_dp *intel_dp) 1007{ 1008 DRM_DEBUG_KMS("Wait for panel power on\n"); 1009 ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); 1010} 1011 1012static void ironlake_wait_panel_off(struct intel_dp *intel_dp) 1013{ 1014 DRM_DEBUG_KMS("Wait for panel power off time\n"); 1015 ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); 1016} 1017 1018static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp) 1019{ 1020 DRM_DEBUG_KMS("Wait for panel power cycle\n"); 1021 ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); 1022} 1023 1024 1025/* Read the current pp_control value, unlocking the register if it 1026 * is locked 1027 */ 1028 1029static u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv) 1030{ 1031 u32 control = I915_READ(PCH_PP_CONTROL); 1032 1033 control &= ~PANEL_UNLOCK_MASK; 1034 control |= PANEL_UNLOCK_REGS; 1035 return control; 1036} 1037 1038void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) 1039{ 1040 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1041 struct drm_i915_private *dev_priv = dev->dev_private; 1042 u32 pp; 1043 1044 if (!is_edp(intel_dp)) 1045 return; 1046 DRM_DEBUG_KMS("Turn eDP VDD on\n"); 1047 1048 WARN(intel_dp->want_panel_vdd, 1049 "eDP VDD already requested on\n"); 1050 1051 intel_dp->want_panel_vdd = true; 1052 1053 if (ironlake_edp_have_panel_vdd(intel_dp)) { 1054 DRM_DEBUG_KMS("eDP VDD already on\n"); 1055 return; 1056 } 1057 1058 if (!ironlake_edp_have_panel_power(intel_dp)) 1059 ironlake_wait_panel_power_cycle(intel_dp); 1060 1061 pp = ironlake_get_pp_control(dev_priv); 1062 pp |= EDP_FORCE_VDD; 1063 I915_WRITE(PCH_PP_CONTROL, pp); 1064 POSTING_READ(PCH_PP_CONTROL); 1065 DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n", 1066 I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL)); 1067 1068 /* 1069 * If the panel wasn't on, delay before accessing aux channel 1070 */ 1071 if (!ironlake_edp_have_panel_power(intel_dp)) { 1072 DRM_DEBUG_KMS("eDP was not running\n"); 1073 msleep(intel_dp->panel_power_up_delay); 1074 } 1075} 1076 1077static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp) 1078{ 1079 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1080 struct drm_i915_private *dev_priv = dev->dev_private; 1081 u32 pp; 1082 1083 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 1084 1085 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) { 1086 pp = ironlake_get_pp_control(dev_priv); 1087 pp &= ~EDP_FORCE_VDD; 1088 I915_WRITE(PCH_PP_CONTROL, pp); 1089 POSTING_READ(PCH_PP_CONTROL); 1090 1091 /* Make sure sequencer is idle before allowing subsequent activity */ 1092 DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n", 1093 I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL)); 1094 1095 msleep(intel_dp->panel_power_down_delay); 1096 } 1097} 1098 1099static void ironlake_panel_vdd_work(struct work_struct *__work) 1100{ 1101 struct intel_dp *intel_dp = container_of(to_delayed_work(__work), 1102 struct intel_dp, panel_vdd_work); 1103 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1104 1105 mutex_lock(&dev->mode_config.mutex); 1106 ironlake_panel_vdd_off_sync(intel_dp); 1107 mutex_unlock(&dev->mode_config.mutex); 1108} 1109 1110void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) 1111{ 1112 if (!is_edp(intel_dp)) 1113 return; 1114 1115 DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd); 1116 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on"); 1117 1118 intel_dp->want_panel_vdd = false; 1119 1120 if (sync) { 1121 ironlake_panel_vdd_off_sync(intel_dp); 1122 } else { 1123 /* 1124 * Queue the timer to fire a long 1125 * time from now (relative to the power down delay) 1126 * to keep the panel power up across a sequence of operations 1127 */ 1128 schedule_delayed_work(&intel_dp->panel_vdd_work, 1129 msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5)); 1130 } 1131} 1132 1133void ironlake_edp_panel_on(struct intel_dp *intel_dp) 1134{ 1135 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1136 struct drm_i915_private *dev_priv = dev->dev_private; 1137 u32 pp; 1138 1139 if (!is_edp(intel_dp)) 1140 return; 1141 1142 DRM_DEBUG_KMS("Turn eDP power on\n"); 1143 1144 if (ironlake_edp_have_panel_power(intel_dp)) { 1145 DRM_DEBUG_KMS("eDP power already on\n"); 1146 return; 1147 } 1148 1149 ironlake_wait_panel_power_cycle(intel_dp); 1150 1151 pp = ironlake_get_pp_control(dev_priv); 1152 if (IS_GEN5(dev)) { 1153 /* ILK workaround: disable reset around power sequence */ 1154 pp &= ~PANEL_POWER_RESET; 1155 I915_WRITE(PCH_PP_CONTROL, pp); 1156 POSTING_READ(PCH_PP_CONTROL); 1157 } 1158 1159 pp |= POWER_TARGET_ON; 1160 if (!IS_GEN5(dev)) 1161 pp |= PANEL_POWER_RESET; 1162 1163 I915_WRITE(PCH_PP_CONTROL, pp); 1164 POSTING_READ(PCH_PP_CONTROL); 1165 1166 ironlake_wait_panel_on(intel_dp); 1167 1168 if (IS_GEN5(dev)) { 1169 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 1170 I915_WRITE(PCH_PP_CONTROL, pp); 1171 POSTING_READ(PCH_PP_CONTROL); 1172 } 1173} 1174 1175void ironlake_edp_panel_off(struct intel_dp *intel_dp) 1176{ 1177 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1178 struct drm_i915_private *dev_priv = dev->dev_private; 1179 u32 pp; 1180 1181 if (!is_edp(intel_dp)) 1182 return; 1183 1184 DRM_DEBUG_KMS("Turn eDP power off\n"); 1185 1186 WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n"); 1187 1188 pp = ironlake_get_pp_control(dev_priv); 1189 /* We need to switch off panel power _and_ force vdd, for otherwise some 1190 * panels get very unhappy and cease to work. */ 1191 pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE); 1192 I915_WRITE(PCH_PP_CONTROL, pp); 1193 POSTING_READ(PCH_PP_CONTROL); 1194 1195 intel_dp->want_panel_vdd = false; 1196 1197 ironlake_wait_panel_off(intel_dp); 1198} 1199 1200void ironlake_edp_backlight_on(struct intel_dp *intel_dp) 1201{ 1202 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1203 struct drm_device *dev = intel_dig_port->base.base.dev; 1204 struct drm_i915_private *dev_priv = dev->dev_private; 1205 int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe; 1206 u32 pp; 1207 1208 if (!is_edp(intel_dp)) 1209 return; 1210 1211 DRM_DEBUG_KMS("\n"); 1212 /* 1213 * If we enable the backlight right away following a panel power 1214 * on, we may see slight flicker as the panel syncs with the eDP 1215 * link. So delay a bit to make sure the image is solid before 1216 * allowing it to appear. 1217 */ 1218 msleep(intel_dp->backlight_on_delay); 1219 pp = ironlake_get_pp_control(dev_priv); 1220 pp |= EDP_BLC_ENABLE; 1221 I915_WRITE(PCH_PP_CONTROL, pp); 1222 POSTING_READ(PCH_PP_CONTROL); 1223 1224 intel_panel_enable_backlight(dev, pipe); 1225} 1226 1227void ironlake_edp_backlight_off(struct intel_dp *intel_dp) 1228{ 1229 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1230 struct drm_i915_private *dev_priv = dev->dev_private; 1231 u32 pp; 1232 1233 if (!is_edp(intel_dp)) 1234 return; 1235 1236 intel_panel_disable_backlight(dev); 1237 1238 DRM_DEBUG_KMS("\n"); 1239 pp = ironlake_get_pp_control(dev_priv); 1240 pp &= ~EDP_BLC_ENABLE; 1241 I915_WRITE(PCH_PP_CONTROL, pp); 1242 POSTING_READ(PCH_PP_CONTROL); 1243 msleep(intel_dp->backlight_off_delay); 1244} 1245 1246static void ironlake_edp_pll_on(struct intel_dp *intel_dp) 1247{ 1248 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1249 struct drm_crtc *crtc = intel_dig_port->base.base.crtc; 1250 struct drm_device *dev = crtc->dev; 1251 struct drm_i915_private *dev_priv = dev->dev_private; 1252 u32 dpa_ctl; 1253 1254 assert_pipe_disabled(dev_priv, 1255 to_intel_crtc(crtc)->pipe); 1256 1257 DRM_DEBUG_KMS("\n"); 1258 dpa_ctl = I915_READ(DP_A); 1259 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n"); 1260 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n"); 1261 1262 /* We don't adjust intel_dp->DP while tearing down the link, to 1263 * facilitate link retraining (e.g. after hotplug). Hence clear all 1264 * enable bits here to ensure that we don't enable too much. */ 1265 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE); 1266 intel_dp->DP |= DP_PLL_ENABLE; 1267 I915_WRITE(DP_A, intel_dp->DP); 1268 POSTING_READ(DP_A); 1269 udelay(200); 1270} 1271 1272static void ironlake_edp_pll_off(struct intel_dp *intel_dp) 1273{ 1274 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1275 struct drm_crtc *crtc = intel_dig_port->base.base.crtc; 1276 struct drm_device *dev = crtc->dev; 1277 struct drm_i915_private *dev_priv = dev->dev_private; 1278 u32 dpa_ctl; 1279 1280 assert_pipe_disabled(dev_priv, 1281 to_intel_crtc(crtc)->pipe); 1282 1283 dpa_ctl = I915_READ(DP_A); 1284 WARN((dpa_ctl & DP_PLL_ENABLE) == 0, 1285 "dp pll off, should be on\n"); 1286 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n"); 1287 1288 /* We can't rely on the value tracked for the DP register in 1289 * intel_dp->DP because link_down must not change that (otherwise link 1290 * re-training will fail. */ 1291 dpa_ctl &= ~DP_PLL_ENABLE; 1292 I915_WRITE(DP_A, dpa_ctl); 1293 POSTING_READ(DP_A); 1294 udelay(200); 1295} 1296 1297/* If the sink supports it, try to set the power state appropriately */ 1298void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) 1299{ 1300 int ret, i; 1301 1302 /* Should have a valid DPCD by this point */ 1303 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 1304 return; 1305 1306 if (mode != DRM_MODE_DPMS_ON) { 1307 ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER, 1308 DP_SET_POWER_D3); 1309 if (ret != 1) 1310 DRM_DEBUG_DRIVER("failed to write sink power state\n"); 1311 } else { 1312 /* 1313 * When turning on, we need to retry for 1ms to give the sink 1314 * time to wake up. 1315 */ 1316 for (i = 0; i < 3; i++) { 1317 ret = intel_dp_aux_native_write_1(intel_dp, 1318 DP_SET_POWER, 1319 DP_SET_POWER_D0); 1320 if (ret == 1) 1321 break; 1322 msleep(1); 1323 } 1324 } 1325} 1326 1327static bool intel_dp_get_hw_state(struct intel_encoder *encoder, 1328 enum pipe *pipe) 1329{ 1330 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1331 struct drm_device *dev = encoder->base.dev; 1332 struct drm_i915_private *dev_priv = dev->dev_private; 1333 u32 tmp = I915_READ(intel_dp->output_reg); 1334 1335 if (!(tmp & DP_PORT_EN)) 1336 return false; 1337 1338 if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) { 1339 *pipe = PORT_TO_PIPE_CPT(tmp); 1340 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { 1341 *pipe = PORT_TO_PIPE(tmp); 1342 } else { 1343 u32 trans_sel; 1344 u32 trans_dp; 1345 int i; 1346 1347 switch (intel_dp->output_reg) { 1348 case PCH_DP_B: 1349 trans_sel = TRANS_DP_PORT_SEL_B; 1350 break; 1351 case PCH_DP_C: 1352 trans_sel = TRANS_DP_PORT_SEL_C; 1353 break; 1354 case PCH_DP_D: 1355 trans_sel = TRANS_DP_PORT_SEL_D; 1356 break; 1357 default: 1358 return true; 1359 } 1360 1361 for_each_pipe(i) { 1362 trans_dp = I915_READ(TRANS_DP_CTL(i)); 1363 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) { 1364 *pipe = i; 1365 return true; 1366 } 1367 } 1368 1369 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", 1370 intel_dp->output_reg); 1371 } 1372 1373 return true; 1374} 1375 1376static void intel_disable_dp(struct intel_encoder *encoder) 1377{ 1378 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1379 1380 /* Make sure the panel is off before trying to change the mode. But also 1381 * ensure that we have vdd while we switch off the panel. */ 1382 ironlake_edp_panel_vdd_on(intel_dp); 1383 ironlake_edp_backlight_off(intel_dp); 1384 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1385 ironlake_edp_panel_off(intel_dp); 1386 1387 /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */ 1388 if (!is_cpu_edp(intel_dp)) 1389 intel_dp_link_down(intel_dp); 1390} 1391 1392static void intel_post_disable_dp(struct intel_encoder *encoder) 1393{ 1394 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1395 1396 if (is_cpu_edp(intel_dp)) { 1397 intel_dp_link_down(intel_dp); 1398 ironlake_edp_pll_off(intel_dp); 1399 } 1400} 1401 1402static void intel_enable_dp(struct intel_encoder *encoder) 1403{ 1404 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1405 struct drm_device *dev = encoder->base.dev; 1406 struct drm_i915_private *dev_priv = dev->dev_private; 1407 uint32_t dp_reg = I915_READ(intel_dp->output_reg); 1408 1409 if (WARN_ON(dp_reg & DP_PORT_EN)) 1410 return; 1411 1412 ironlake_edp_panel_vdd_on(intel_dp); 1413 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1414 intel_dp_start_link_train(intel_dp); 1415 ironlake_edp_panel_on(intel_dp); 1416 ironlake_edp_panel_vdd_off(intel_dp, true); 1417 intel_dp_complete_link_train(intel_dp); 1418 ironlake_edp_backlight_on(intel_dp); 1419} 1420 1421static void intel_pre_enable_dp(struct intel_encoder *encoder) 1422{ 1423 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1424 1425 if (is_cpu_edp(intel_dp)) 1426 ironlake_edp_pll_on(intel_dp); 1427} 1428 1429/* 1430 * Native read with retry for link status and receiver capability reads for 1431 * cases where the sink may still be asleep. 1432 */ 1433static bool 1434intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address, 1435 uint8_t *recv, int recv_bytes) 1436{ 1437 int ret, i; 1438 1439 /* 1440 * Sinks are *supposed* to come up within 1ms from an off state, 1441 * but we're also supposed to retry 3 times per the spec. 1442 */ 1443 for (i = 0; i < 3; i++) { 1444 ret = intel_dp_aux_native_read(intel_dp, address, recv, 1445 recv_bytes); 1446 if (ret == recv_bytes) 1447 return true; 1448 msleep(1); 1449 } 1450 1451 return false; 1452} 1453 1454/* 1455 * Fetch AUX CH registers 0x202 - 0x207 which contain 1456 * link status information 1457 */ 1458static bool 1459intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 1460{ 1461 return intel_dp_aux_native_read_retry(intel_dp, 1462 DP_LANE0_1_STATUS, 1463 link_status, 1464 DP_LINK_STATUS_SIZE); 1465} 1466 1467#if 0 1468static char *voltage_names[] = { 1469 "0.4V", "0.6V", "0.8V", "1.2V" 1470}; 1471static char *pre_emph_names[] = { 1472 "0dB", "3.5dB", "6dB", "9.5dB" 1473}; 1474static char *link_train_names[] = { 1475 "pattern 1", "pattern 2", "idle", "off" 1476}; 1477#endif 1478 1479/* 1480 * These are source-specific values; current Intel hardware supports 1481 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB 1482 */ 1483 1484static uint8_t 1485intel_dp_voltage_max(struct intel_dp *intel_dp) 1486{ 1487 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1488 1489 if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) 1490 return DP_TRAIN_VOLTAGE_SWING_800; 1491 else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) 1492 return DP_TRAIN_VOLTAGE_SWING_1200; 1493 else 1494 return DP_TRAIN_VOLTAGE_SWING_800; 1495} 1496 1497static uint8_t 1498intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) 1499{ 1500 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1501 1502 if (HAS_DDI(dev)) { 1503 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1504 case DP_TRAIN_VOLTAGE_SWING_400: 1505 return DP_TRAIN_PRE_EMPHASIS_9_5; 1506 case DP_TRAIN_VOLTAGE_SWING_600: 1507 return DP_TRAIN_PRE_EMPHASIS_6; 1508 case DP_TRAIN_VOLTAGE_SWING_800: 1509 return DP_TRAIN_PRE_EMPHASIS_3_5; 1510 case DP_TRAIN_VOLTAGE_SWING_1200: 1511 default: 1512 return DP_TRAIN_PRE_EMPHASIS_0; 1513 } 1514 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { 1515 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1516 case DP_TRAIN_VOLTAGE_SWING_400: 1517 return DP_TRAIN_PRE_EMPHASIS_6; 1518 case DP_TRAIN_VOLTAGE_SWING_600: 1519 case DP_TRAIN_VOLTAGE_SWING_800: 1520 return DP_TRAIN_PRE_EMPHASIS_3_5; 1521 default: 1522 return DP_TRAIN_PRE_EMPHASIS_0; 1523 } 1524 } else { 1525 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1526 case DP_TRAIN_VOLTAGE_SWING_400: 1527 return DP_TRAIN_PRE_EMPHASIS_6; 1528 case DP_TRAIN_VOLTAGE_SWING_600: 1529 return DP_TRAIN_PRE_EMPHASIS_6; 1530 case DP_TRAIN_VOLTAGE_SWING_800: 1531 return DP_TRAIN_PRE_EMPHASIS_3_5; 1532 case DP_TRAIN_VOLTAGE_SWING_1200: 1533 default: 1534 return DP_TRAIN_PRE_EMPHASIS_0; 1535 } 1536 } 1537} 1538 1539static void 1540intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 1541{ 1542 uint8_t v = 0; 1543 uint8_t p = 0; 1544 int lane; 1545 uint8_t voltage_max; 1546 uint8_t preemph_max; 1547 1548 for (lane = 0; lane < intel_dp->lane_count; lane++) { 1549 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane); 1550 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); 1551 1552 if (this_v > v) 1553 v = this_v; 1554 if (this_p > p) 1555 p = this_p; 1556 } 1557 1558 voltage_max = intel_dp_voltage_max(intel_dp); 1559 if (v >= voltage_max) 1560 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; 1561 1562 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v); 1563 if (p >= preemph_max) 1564 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; 1565 1566 for (lane = 0; lane < 4; lane++) 1567 intel_dp->train_set[lane] = v | p; 1568} 1569 1570static uint32_t 1571intel_gen4_signal_levels(uint8_t train_set) 1572{ 1573 uint32_t signal_levels = 0; 1574 1575 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 1576 case DP_TRAIN_VOLTAGE_SWING_400: 1577 default: 1578 signal_levels |= DP_VOLTAGE_0_4; 1579 break; 1580 case DP_TRAIN_VOLTAGE_SWING_600: 1581 signal_levels |= DP_VOLTAGE_0_6; 1582 break; 1583 case DP_TRAIN_VOLTAGE_SWING_800: 1584 signal_levels |= DP_VOLTAGE_0_8; 1585 break; 1586 case DP_TRAIN_VOLTAGE_SWING_1200: 1587 signal_levels |= DP_VOLTAGE_1_2; 1588 break; 1589 } 1590 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 1591 case DP_TRAIN_PRE_EMPHASIS_0: 1592 default: 1593 signal_levels |= DP_PRE_EMPHASIS_0; 1594 break; 1595 case DP_TRAIN_PRE_EMPHASIS_3_5: 1596 signal_levels |= DP_PRE_EMPHASIS_3_5; 1597 break; 1598 case DP_TRAIN_PRE_EMPHASIS_6: 1599 signal_levels |= DP_PRE_EMPHASIS_6; 1600 break; 1601 case DP_TRAIN_PRE_EMPHASIS_9_5: 1602 signal_levels |= DP_PRE_EMPHASIS_9_5; 1603 break; 1604 } 1605 return signal_levels; 1606} 1607 1608/* Gen6's DP voltage swing and pre-emphasis control */ 1609static uint32_t 1610intel_gen6_edp_signal_levels(uint8_t train_set) 1611{ 1612 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 1613 DP_TRAIN_PRE_EMPHASIS_MASK); 1614 switch (signal_levels) { 1615 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 1616 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 1617 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 1618 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 1619 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B; 1620 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 1621 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: 1622 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B; 1623 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 1624 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 1625 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B; 1626 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 1627 case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0: 1628 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B; 1629 default: 1630 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 1631 "0x%x\n", signal_levels); 1632 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 1633 } 1634} 1635 1636/* Gen7's DP voltage swing and pre-emphasis control */ 1637static uint32_t 1638intel_gen7_edp_signal_levels(uint8_t train_set) 1639{ 1640 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 1641 DP_TRAIN_PRE_EMPHASIS_MASK); 1642 switch (signal_levels) { 1643 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 1644 return EDP_LINK_TRAIN_400MV_0DB_IVB; 1645 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 1646 return EDP_LINK_TRAIN_400MV_3_5DB_IVB; 1647 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 1648 return EDP_LINK_TRAIN_400MV_6DB_IVB; 1649 1650 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 1651 return EDP_LINK_TRAIN_600MV_0DB_IVB; 1652 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 1653 return EDP_LINK_TRAIN_600MV_3_5DB_IVB; 1654 1655 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 1656 return EDP_LINK_TRAIN_800MV_0DB_IVB; 1657 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 1658 return EDP_LINK_TRAIN_800MV_3_5DB_IVB; 1659 1660 default: 1661 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 1662 "0x%x\n", signal_levels); 1663 return EDP_LINK_TRAIN_500MV_0DB_IVB; 1664 } 1665} 1666 1667/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */ 1668static uint32_t 1669intel_hsw_signal_levels(uint8_t train_set) 1670{ 1671 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 1672 DP_TRAIN_PRE_EMPHASIS_MASK); 1673 switch (signal_levels) { 1674 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 1675 return DDI_BUF_EMP_400MV_0DB_HSW; 1676 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 1677 return DDI_BUF_EMP_400MV_3_5DB_HSW; 1678 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 1679 return DDI_BUF_EMP_400MV_6DB_HSW; 1680 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5: 1681 return DDI_BUF_EMP_400MV_9_5DB_HSW; 1682 1683 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 1684 return DDI_BUF_EMP_600MV_0DB_HSW; 1685 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 1686 return DDI_BUF_EMP_600MV_3_5DB_HSW; 1687 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: 1688 return DDI_BUF_EMP_600MV_6DB_HSW; 1689 1690 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 1691 return DDI_BUF_EMP_800MV_0DB_HSW; 1692 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 1693 return DDI_BUF_EMP_800MV_3_5DB_HSW; 1694 default: 1695 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 1696 "0x%x\n", signal_levels); 1697 return DDI_BUF_EMP_400MV_0DB_HSW; 1698 } 1699} 1700 1701/* Properly updates "DP" with the correct signal levels. */ 1702static void 1703intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP) 1704{ 1705 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1706 struct drm_device *dev = intel_dig_port->base.base.dev; 1707 uint32_t signal_levels, mask; 1708 uint8_t train_set = intel_dp->train_set[0]; 1709 1710 if (HAS_DDI(dev)) { 1711 signal_levels = intel_hsw_signal_levels(train_set); 1712 mask = DDI_BUF_EMP_MASK; 1713 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { 1714 signal_levels = intel_gen7_edp_signal_levels(train_set); 1715 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB; 1716 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { 1717 signal_levels = intel_gen6_edp_signal_levels(train_set); 1718 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB; 1719 } else { 1720 signal_levels = intel_gen4_signal_levels(train_set); 1721 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK; 1722 } 1723 1724 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels); 1725 1726 *DP = (*DP & ~mask) | signal_levels; 1727} 1728 1729static bool 1730intel_dp_set_link_train(struct intel_dp *intel_dp, 1731 uint32_t dp_reg_value, 1732 uint8_t dp_train_pat) 1733{ 1734 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1735 struct drm_device *dev = intel_dig_port->base.base.dev; 1736 struct drm_i915_private *dev_priv = dev->dev_private; 1737 enum port port = intel_dig_port->port; 1738 int ret; 1739 uint32_t temp; 1740 1741 if (HAS_DDI(dev)) { 1742 temp = I915_READ(DP_TP_CTL(port)); 1743 1744 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) 1745 temp |= DP_TP_CTL_SCRAMBLE_DISABLE; 1746 else 1747 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE; 1748 1749 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; 1750 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 1751 case DP_TRAINING_PATTERN_DISABLE: 1752 1753 if (port != PORT_A) { 1754 temp |= DP_TP_CTL_LINK_TRAIN_IDLE; 1755 I915_WRITE(DP_TP_CTL(port), temp); 1756 1757 if (wait_for((I915_READ(DP_TP_STATUS(port)) & 1758 DP_TP_STATUS_IDLE_DONE), 1)) 1759 DRM_ERROR("Timed out waiting for DP idle patterns\n"); 1760 1761 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; 1762 } 1763 1764 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; 1765 1766 break; 1767 case DP_TRAINING_PATTERN_1: 1768 temp |= DP_TP_CTL_LINK_TRAIN_PAT1; 1769 break; 1770 case DP_TRAINING_PATTERN_2: 1771 temp |= DP_TP_CTL_LINK_TRAIN_PAT2; 1772 break; 1773 case DP_TRAINING_PATTERN_3: 1774 temp |= DP_TP_CTL_LINK_TRAIN_PAT3; 1775 break; 1776 } 1777 I915_WRITE(DP_TP_CTL(port), temp); 1778 1779 } else if (HAS_PCH_CPT(dev) && 1780 (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) { 1781 dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT; 1782 1783 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 1784 case DP_TRAINING_PATTERN_DISABLE: 1785 dp_reg_value |= DP_LINK_TRAIN_OFF_CPT; 1786 break; 1787 case DP_TRAINING_PATTERN_1: 1788 dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT; 1789 break; 1790 case DP_TRAINING_PATTERN_2: 1791 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT; 1792 break; 1793 case DP_TRAINING_PATTERN_3: 1794 DRM_ERROR("DP training pattern 3 not supported\n"); 1795 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT; 1796 break; 1797 } 1798 1799 } else { 1800 dp_reg_value &= ~DP_LINK_TRAIN_MASK; 1801 1802 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 1803 case DP_TRAINING_PATTERN_DISABLE: 1804 dp_reg_value |= DP_LINK_TRAIN_OFF; 1805 break; 1806 case DP_TRAINING_PATTERN_1: 1807 dp_reg_value |= DP_LINK_TRAIN_PAT_1; 1808 break; 1809 case DP_TRAINING_PATTERN_2: 1810 dp_reg_value |= DP_LINK_TRAIN_PAT_2; 1811 break; 1812 case DP_TRAINING_PATTERN_3: 1813 DRM_ERROR("DP training pattern 3 not supported\n"); 1814 dp_reg_value |= DP_LINK_TRAIN_PAT_2; 1815 break; 1816 } 1817 } 1818 1819 I915_WRITE(intel_dp->output_reg, dp_reg_value); 1820 POSTING_READ(intel_dp->output_reg); 1821 1822 intel_dp_aux_native_write_1(intel_dp, 1823 DP_TRAINING_PATTERN_SET, 1824 dp_train_pat); 1825 1826 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) != 1827 DP_TRAINING_PATTERN_DISABLE) { 1828 ret = intel_dp_aux_native_write(intel_dp, 1829 DP_TRAINING_LANE0_SET, 1830 intel_dp->train_set, 1831 intel_dp->lane_count); 1832 if (ret != intel_dp->lane_count) 1833 return false; 1834 } 1835 1836 return true; 1837} 1838 1839/* Enable corresponding port and start training pattern 1 */ 1840void 1841intel_dp_start_link_train(struct intel_dp *intel_dp) 1842{ 1843 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base; 1844 struct drm_device *dev = encoder->dev; 1845 int i; 1846 uint8_t voltage; 1847 bool clock_recovery = false; 1848 int voltage_tries, loop_tries; 1849 uint32_t DP = intel_dp->DP; 1850 1851 if (HAS_DDI(dev)) 1852 intel_ddi_prepare_link_retrain(encoder); 1853 1854 /* Write the link configuration data */ 1855 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, 1856 intel_dp->link_configuration, 1857 DP_LINK_CONFIGURATION_SIZE); 1858 1859 DP |= DP_PORT_EN; 1860 1861 memset(intel_dp->train_set, 0, 4); 1862 voltage = 0xff; 1863 voltage_tries = 0; 1864 loop_tries = 0; 1865 clock_recovery = false; 1866 for (;;) { 1867 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 1868 uint8_t link_status[DP_LINK_STATUS_SIZE]; 1869 1870 intel_dp_set_signal_levels(intel_dp, &DP); 1871 1872 /* Set training pattern 1 */ 1873 if (!intel_dp_set_link_train(intel_dp, DP, 1874 DP_TRAINING_PATTERN_1 | 1875 DP_LINK_SCRAMBLING_DISABLE)) 1876 break; 1877 1878 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd); 1879 if (!intel_dp_get_link_status(intel_dp, link_status)) { 1880 DRM_ERROR("failed to get link status\n"); 1881 break; 1882 } 1883 1884 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { 1885 DRM_DEBUG_KMS("clock recovery OK\n"); 1886 clock_recovery = true; 1887 break; 1888 } 1889 1890 /* Check to see if we've tried the max voltage */ 1891 for (i = 0; i < intel_dp->lane_count; i++) 1892 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) 1893 break; 1894 if (i == intel_dp->lane_count && voltage_tries == 5) { 1895 ++loop_tries; 1896 if (loop_tries == 5) { 1897 DRM_DEBUG_KMS("too many full retries, give up\n"); 1898 break; 1899 } 1900 memset(intel_dp->train_set, 0, 4); 1901 voltage_tries = 0; 1902 continue; 1903 } 1904 1905 /* Check to see if we've tried the same voltage 5 times */ 1906 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { 1907 ++voltage_tries; 1908 if (voltage_tries == 5) { 1909 DRM_DEBUG_KMS("too many voltage retries, give up\n"); 1910 break; 1911 } 1912 } else 1913 voltage_tries = 0; 1914 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; 1915 1916 /* Compute new intel_dp->train_set as requested by target */ 1917 intel_get_adjust_train(intel_dp, link_status); 1918 } 1919 1920 intel_dp->DP = DP; 1921} 1922 1923void 1924intel_dp_complete_link_train(struct intel_dp *intel_dp) 1925{ 1926 bool channel_eq = false; 1927 int tries, cr_tries; 1928 uint32_t DP = intel_dp->DP; 1929 1930 /* channel equalization */ 1931 tries = 0; 1932 cr_tries = 0; 1933 channel_eq = false; 1934 for (;;) { 1935 uint8_t link_status[DP_LINK_STATUS_SIZE]; 1936 1937 if (cr_tries > 5) { 1938 DRM_ERROR("failed to train DP, aborting\n"); 1939 intel_dp_link_down(intel_dp); 1940 break; 1941 } 1942 1943 intel_dp_set_signal_levels(intel_dp, &DP); 1944 1945 /* channel eq pattern */ 1946 if (!intel_dp_set_link_train(intel_dp, DP, 1947 DP_TRAINING_PATTERN_2 | 1948 DP_LINK_SCRAMBLING_DISABLE)) 1949 break; 1950 1951 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd); 1952 if (!intel_dp_get_link_status(intel_dp, link_status)) 1953 break; 1954 1955 /* Make sure clock is still ok */ 1956 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { 1957 intel_dp_start_link_train(intel_dp); 1958 cr_tries++; 1959 continue; 1960 } 1961 1962 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { 1963 channel_eq = true; 1964 break; 1965 } 1966 1967 /* Try 5 times, then try clock recovery if that fails */ 1968 if (tries > 5) { 1969 intel_dp_link_down(intel_dp); 1970 intel_dp_start_link_train(intel_dp); 1971 tries = 0; 1972 cr_tries++; 1973 continue; 1974 } 1975 1976 /* Compute new intel_dp->train_set as requested by target */ 1977 intel_get_adjust_train(intel_dp, link_status); 1978 ++tries; 1979 } 1980 1981 if (channel_eq) 1982 DRM_DEBUG_KMS("Channel EQ done. DP Training successfull\n"); 1983 1984 intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE); 1985} 1986 1987static void 1988intel_dp_link_down(struct intel_dp *intel_dp) 1989{ 1990 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1991 struct drm_device *dev = intel_dig_port->base.base.dev; 1992 struct drm_i915_private *dev_priv = dev->dev_private; 1993 struct intel_crtc *intel_crtc = 1994 to_intel_crtc(intel_dig_port->base.base.crtc); 1995 uint32_t DP = intel_dp->DP; 1996 1997 /* 1998 * DDI code has a strict mode set sequence and we should try to respect 1999 * it, otherwise we might hang the machine in many different ways. So we 2000 * really should be disabling the port only on a complete crtc_disable 2001 * sequence. This function is just called under two conditions on DDI 2002 * code: 2003 * - Link train failed while doing crtc_enable, and on this case we 2004 * really should respect the mode set sequence and wait for a 2005 * crtc_disable. 2006 * - Someone turned the monitor off and intel_dp_check_link_status 2007 * called us. We don't need to disable the whole port on this case, so 2008 * when someone turns the monitor on again, 2009 * intel_ddi_prepare_link_retrain will take care of redoing the link 2010 * train. 2011 */ 2012 if (HAS_DDI(dev)) 2013 return; 2014 2015 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)) 2016 return; 2017 2018 DRM_DEBUG_KMS("\n"); 2019 2020 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) { 2021 DP &= ~DP_LINK_TRAIN_MASK_CPT; 2022 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); 2023 } else { 2024 DP &= ~DP_LINK_TRAIN_MASK; 2025 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); 2026 } 2027 POSTING_READ(intel_dp->output_reg); 2028 2029 /* We don't really know why we're doing this */ 2030 intel_wait_for_vblank(dev, intel_crtc->pipe); 2031 2032 if (HAS_PCH_IBX(dev) && 2033 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { 2034 struct drm_crtc *crtc = intel_dig_port->base.base.crtc; 2035 2036 /* Hardware workaround: leaving our transcoder select 2037 * set to transcoder B while it's off will prevent the 2038 * corresponding HDMI output on transcoder A. 2039 * 2040 * Combine this with another hardware workaround: 2041 * transcoder select bit can only be cleared while the 2042 * port is enabled. 2043 */ 2044 DP &= ~DP_PIPEB_SELECT; 2045 I915_WRITE(intel_dp->output_reg, DP); 2046 2047 /* Changes to enable or select take place the vblank 2048 * after being written. 2049 */ 2050 if (WARN_ON(crtc == NULL)) { 2051 /* We should never try to disable a port without a crtc 2052 * attached. For paranoia keep the code around for a 2053 * bit. */ 2054 POSTING_READ(intel_dp->output_reg); 2055 msleep(50); 2056 } else 2057 intel_wait_for_vblank(dev, intel_crtc->pipe); 2058 } 2059 2060 DP &= ~DP_AUDIO_OUTPUT_ENABLE; 2061 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); 2062 POSTING_READ(intel_dp->output_reg); 2063 msleep(intel_dp->panel_power_down_delay); 2064} 2065 2066static bool 2067intel_dp_get_dpcd(struct intel_dp *intel_dp) 2068{ 2069 char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3]; 2070 2071 if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd, 2072 sizeof(intel_dp->dpcd)) == 0) 2073 return false; /* aux transfer failed */ 2074 2075 hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd), 2076 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false); 2077 DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump); 2078 2079 if (intel_dp->dpcd[DP_DPCD_REV] == 0) 2080 return false; /* DPCD not present */ 2081 2082 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 2083 DP_DWN_STRM_PORT_PRESENT)) 2084 return true; /* native DP sink */ 2085 2086 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10) 2087 return true; /* no per-port downstream info */ 2088 2089 if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0, 2090 intel_dp->downstream_ports, 2091 DP_MAX_DOWNSTREAM_PORTS) == 0) 2092 return false; /* downstream port status fetch failed */ 2093 2094 return true; 2095} 2096 2097static void 2098intel_dp_probe_oui(struct intel_dp *intel_dp) 2099{ 2100 u8 buf[3]; 2101 2102 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) 2103 return; 2104 2105 ironlake_edp_panel_vdd_on(intel_dp); 2106 2107 if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3)) 2108 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", 2109 buf[0], buf[1], buf[2]); 2110 2111 if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3)) 2112 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", 2113 buf[0], buf[1], buf[2]); 2114 2115 ironlake_edp_panel_vdd_off(intel_dp, false); 2116} 2117 2118static bool 2119intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector) 2120{ 2121 int ret; 2122 2123 ret = intel_dp_aux_native_read_retry(intel_dp, 2124 DP_DEVICE_SERVICE_IRQ_VECTOR, 2125 sink_irq_vector, 1); 2126 if (!ret) 2127 return false; 2128 2129 return true; 2130} 2131 2132static void 2133intel_dp_handle_test_request(struct intel_dp *intel_dp) 2134{ 2135 /* NAK by default */ 2136 intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK); 2137} 2138 2139/* 2140 * According to DP spec 2141 * 5.1.2: 2142 * 1. Read DPCD 2143 * 2. Configure link according to Receiver Capabilities 2144 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 2145 * 4. Check link status on receipt of hot-plug interrupt 2146 */ 2147 2148void 2149intel_dp_check_link_status(struct intel_dp *intel_dp) 2150{ 2151 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; 2152 u8 sink_irq_vector; 2153 u8 link_status[DP_LINK_STATUS_SIZE]; 2154 2155 if (!intel_encoder->connectors_active) 2156 return; 2157 2158 if (WARN_ON(!intel_encoder->base.crtc)) 2159 return; 2160 2161 /* Try to read receiver status if the link appears to be up */ 2162 if (!intel_dp_get_link_status(intel_dp, link_status)) { 2163 intel_dp_link_down(intel_dp); 2164 return; 2165 } 2166 2167 /* Now read the DPCD to see if it's actually running */ 2168 if (!intel_dp_get_dpcd(intel_dp)) { 2169 intel_dp_link_down(intel_dp); 2170 return; 2171 } 2172 2173 /* Try to read the source of the interrupt */ 2174 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 2175 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) { 2176 /* Clear interrupt source */ 2177 intel_dp_aux_native_write_1(intel_dp, 2178 DP_DEVICE_SERVICE_IRQ_VECTOR, 2179 sink_irq_vector); 2180 2181 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST) 2182 intel_dp_handle_test_request(intel_dp); 2183 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ)) 2184 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); 2185 } 2186 2187 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { 2188 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", 2189 drm_get_encoder_name(&intel_encoder->base)); 2190 intel_dp_start_link_train(intel_dp); 2191 intel_dp_complete_link_train(intel_dp); 2192 } 2193} 2194 2195/* XXX this is probably wrong for multiple downstream ports */ 2196static enum drm_connector_status 2197intel_dp_detect_dpcd(struct intel_dp *intel_dp) 2198{ 2199 uint8_t *dpcd = intel_dp->dpcd; 2200 bool hpd; 2201 uint8_t type; 2202 2203 if (!intel_dp_get_dpcd(intel_dp)) 2204 return connector_status_disconnected; 2205 2206 /* if there's no downstream port, we're done */ 2207 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT)) 2208 return connector_status_connected; 2209 2210 /* If we're HPD-aware, SINK_COUNT changes dynamically */ 2211 hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD); 2212 if (hpd) { 2213 uint8_t reg; 2214 if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT, 2215 ®, 1)) 2216 return connector_status_unknown; 2217 return DP_GET_SINK_COUNT(reg) ? connector_status_connected 2218 : connector_status_disconnected; 2219 } 2220 2221 /* If no HPD, poke DDC gently */ 2222 if (drm_probe_ddc(&intel_dp->adapter)) 2223 return connector_status_connected; 2224 2225 /* Well we tried, say unknown for unreliable port types */ 2226 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 2227 if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID) 2228 return connector_status_unknown; 2229 2230 /* Anything else is out of spec, warn and ignore */ 2231 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n"); 2232 return connector_status_disconnected; 2233} 2234 2235static enum drm_connector_status 2236ironlake_dp_detect(struct intel_dp *intel_dp) 2237{ 2238 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2239 struct drm_i915_private *dev_priv = dev->dev_private; 2240 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2241 enum drm_connector_status status; 2242 2243 /* Can't disconnect eDP, but you can close the lid... */ 2244 if (is_edp(intel_dp)) { 2245 status = intel_panel_detect(dev); 2246 if (status == connector_status_unknown) 2247 status = connector_status_connected; 2248 return status; 2249 } 2250 2251 if (!ibx_digital_port_connected(dev_priv, intel_dig_port)) 2252 return connector_status_disconnected; 2253 2254 return intel_dp_detect_dpcd(intel_dp); 2255} 2256 2257static enum drm_connector_status 2258g4x_dp_detect(struct intel_dp *intel_dp) 2259{ 2260 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2261 struct drm_i915_private *dev_priv = dev->dev_private; 2262 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2263 uint32_t bit; 2264 2265 /* Can't disconnect eDP, but you can close the lid... */ 2266 if (is_edp(intel_dp)) { 2267 enum drm_connector_status status; 2268 2269 status = intel_panel_detect(dev); 2270 if (status == connector_status_unknown) 2271 status = connector_status_connected; 2272 return status; 2273 } 2274 2275 switch (intel_dig_port->port) { 2276 case PORT_B: 2277 bit = PORTB_HOTPLUG_LIVE_STATUS; 2278 break; 2279 case PORT_C: 2280 bit = PORTC_HOTPLUG_LIVE_STATUS; 2281 break; 2282 case PORT_D: 2283 bit = PORTD_HOTPLUG_LIVE_STATUS; 2284 break; 2285 default: 2286 return connector_status_unknown; 2287 } 2288 2289 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0) 2290 return connector_status_disconnected; 2291 2292 return intel_dp_detect_dpcd(intel_dp); 2293} 2294 2295static struct edid * 2296intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) 2297{ 2298 struct intel_connector *intel_connector = to_intel_connector(connector); 2299 2300 /* use cached edid if we have one */ 2301 if (intel_connector->edid) { 2302 struct edid *edid; 2303 int size; 2304 2305 /* invalid edid */ 2306 if (IS_ERR(intel_connector->edid)) 2307 return NULL; 2308 2309 size = (intel_connector->edid->extensions + 1) * EDID_LENGTH; 2310 edid = kmalloc(size, GFP_KERNEL); 2311 if (!edid) 2312 return NULL; 2313 2314 memcpy(edid, intel_connector->edid, size); 2315 return edid; 2316 } 2317 2318 return drm_get_edid(connector, adapter); 2319} 2320 2321static int 2322intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter) 2323{ 2324 struct intel_connector *intel_connector = to_intel_connector(connector); 2325 2326 /* use cached edid if we have one */ 2327 if (intel_connector->edid) { 2328 /* invalid edid */ 2329 if (IS_ERR(intel_connector->edid)) 2330 return 0; 2331 2332 return intel_connector_update_modes(connector, 2333 intel_connector->edid); 2334 } 2335 2336 return intel_ddc_get_modes(connector, adapter); 2337} 2338 2339static enum drm_connector_status 2340intel_dp_detect(struct drm_connector *connector, bool force) 2341{ 2342 struct intel_dp *intel_dp = intel_attached_dp(connector); 2343 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2344 struct intel_encoder *intel_encoder = &intel_dig_port->base; 2345 struct drm_device *dev = connector->dev; 2346 enum drm_connector_status status; 2347 struct edid *edid = NULL; 2348 2349 intel_dp->has_audio = false; 2350 2351 if (HAS_PCH_SPLIT(dev)) 2352 status = ironlake_dp_detect(intel_dp); 2353 else 2354 status = g4x_dp_detect(intel_dp); 2355 2356 if (status != connector_status_connected) 2357 return status; 2358 2359 intel_dp_probe_oui(intel_dp); 2360 2361 if (intel_dp->force_audio != HDMI_AUDIO_AUTO) { 2362 intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON); 2363 } else { 2364 edid = intel_dp_get_edid(connector, &intel_dp->adapter); 2365 if (edid) { 2366 intel_dp->has_audio = drm_detect_monitor_audio(edid); 2367 kfree(edid); 2368 } 2369 } 2370 2371 if (intel_encoder->type != INTEL_OUTPUT_EDP) 2372 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; 2373 return connector_status_connected; 2374} 2375 2376static int intel_dp_get_modes(struct drm_connector *connector) 2377{ 2378 struct intel_dp *intel_dp = intel_attached_dp(connector); 2379 struct intel_connector *intel_connector = to_intel_connector(connector); 2380 struct drm_device *dev = connector->dev; 2381 int ret; 2382 2383 /* We should parse the EDID data and find out if it has an audio sink 2384 */ 2385 2386 ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter); 2387 if (ret) 2388 return ret; 2389 2390 /* if eDP has no EDID, fall back to fixed mode */ 2391 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 2392 struct drm_display_mode *mode; 2393 mode = drm_mode_duplicate(dev, 2394 intel_connector->panel.fixed_mode); 2395 if (mode) { 2396 drm_mode_probed_add(connector, mode); 2397 return 1; 2398 } 2399 } 2400 return 0; 2401} 2402 2403static bool 2404intel_dp_detect_audio(struct drm_connector *connector) 2405{ 2406 struct intel_dp *intel_dp = intel_attached_dp(connector); 2407 struct edid *edid; 2408 bool has_audio = false; 2409 2410 edid = intel_dp_get_edid(connector, &intel_dp->adapter); 2411 if (edid) { 2412 has_audio = drm_detect_monitor_audio(edid); 2413 kfree(edid); 2414 } 2415 2416 return has_audio; 2417} 2418 2419static int 2420intel_dp_set_property(struct drm_connector *connector, 2421 struct drm_property *property, 2422 uint64_t val) 2423{ 2424 struct drm_i915_private *dev_priv = connector->dev->dev_private; 2425 struct intel_connector *intel_connector = to_intel_connector(connector); 2426 struct intel_encoder *intel_encoder = intel_attached_encoder(connector); 2427 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 2428 int ret; 2429 2430 ret = drm_object_property_set_value(&connector->base, property, val); 2431 if (ret) 2432 return ret; 2433 2434 if (property == dev_priv->force_audio_property) { 2435 int i = val; 2436 bool has_audio; 2437 2438 if (i == intel_dp->force_audio) 2439 return 0; 2440 2441 intel_dp->force_audio = i; 2442 2443 if (i == HDMI_AUDIO_AUTO) 2444 has_audio = intel_dp_detect_audio(connector); 2445 else 2446 has_audio = (i == HDMI_AUDIO_ON); 2447 2448 if (has_audio == intel_dp->has_audio) 2449 return 0; 2450 2451 intel_dp->has_audio = has_audio; 2452 goto done; 2453 } 2454 2455 if (property == dev_priv->broadcast_rgb_property) { 2456 switch (val) { 2457 case INTEL_BROADCAST_RGB_AUTO: 2458 intel_dp->color_range_auto = true; 2459 break; 2460 case INTEL_BROADCAST_RGB_FULL: 2461 intel_dp->color_range_auto = false; 2462 intel_dp->color_range = 0; 2463 break; 2464 case INTEL_BROADCAST_RGB_LIMITED: 2465 intel_dp->color_range_auto = false; 2466 intel_dp->color_range = DP_COLOR_RANGE_16_235; 2467 break; 2468 default: 2469 return -EINVAL; 2470 } 2471 goto done; 2472 } 2473 2474 if (is_edp(intel_dp) && 2475 property == connector->dev->mode_config.scaling_mode_property) { 2476 if (val == DRM_MODE_SCALE_NONE) { 2477 DRM_DEBUG_KMS("no scaling not supported\n"); 2478 return -EINVAL; 2479 } 2480 2481 if (intel_connector->panel.fitting_mode == val) { 2482 /* the eDP scaling property is not changed */ 2483 return 0; 2484 } 2485 intel_connector->panel.fitting_mode = val; 2486 2487 goto done; 2488 } 2489 2490 return -EINVAL; 2491 2492done: 2493 if (intel_encoder->base.crtc) 2494 intel_crtc_restore_mode(intel_encoder->base.crtc); 2495 2496 return 0; 2497} 2498 2499static void 2500intel_dp_destroy(struct drm_connector *connector) 2501{ 2502 struct drm_device *dev = connector->dev; 2503 struct intel_dp *intel_dp = intel_attached_dp(connector); 2504 struct intel_connector *intel_connector = to_intel_connector(connector); 2505 2506 if (!IS_ERR_OR_NULL(intel_connector->edid)) 2507 kfree(intel_connector->edid); 2508 2509 if (is_edp(intel_dp)) { 2510 intel_panel_destroy_backlight(dev); 2511 intel_panel_fini(&intel_connector->panel); 2512 } 2513 2514 drm_sysfs_connector_remove(connector); 2515 drm_connector_cleanup(connector); 2516 kfree(connector); 2517} 2518 2519void intel_dp_encoder_destroy(struct drm_encoder *encoder) 2520{ 2521 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 2522 struct intel_dp *intel_dp = &intel_dig_port->dp; 2523 2524 i2c_del_adapter(&intel_dp->adapter); 2525 drm_encoder_cleanup(encoder); 2526 if (is_edp(intel_dp)) { 2527 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 2528 ironlake_panel_vdd_off_sync(intel_dp); 2529 } 2530 kfree(intel_dig_port); 2531} 2532 2533static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { 2534 .mode_set = intel_dp_mode_set, 2535}; 2536 2537static const struct drm_connector_funcs intel_dp_connector_funcs = { 2538 .dpms = intel_connector_dpms, 2539 .detect = intel_dp_detect, 2540 .fill_modes = drm_helper_probe_single_connector_modes, 2541 .set_property = intel_dp_set_property, 2542 .destroy = intel_dp_destroy, 2543}; 2544 2545static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 2546 .get_modes = intel_dp_get_modes, 2547 .mode_valid = intel_dp_mode_valid, 2548 .best_encoder = intel_best_encoder, 2549}; 2550 2551static const struct drm_encoder_funcs intel_dp_enc_funcs = { 2552 .destroy = intel_dp_encoder_destroy, 2553}; 2554 2555static void 2556intel_dp_hot_plug(struct intel_encoder *intel_encoder) 2557{ 2558 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 2559 2560 intel_dp_check_link_status(intel_dp); 2561} 2562 2563/* Return which DP Port should be selected for Transcoder DP control */ 2564int 2565intel_trans_dp_port_sel(struct drm_crtc *crtc) 2566{ 2567 struct drm_device *dev = crtc->dev; 2568 struct intel_encoder *intel_encoder; 2569 struct intel_dp *intel_dp; 2570 2571 for_each_encoder_on_crtc(dev, crtc, intel_encoder) { 2572 intel_dp = enc_to_intel_dp(&intel_encoder->base); 2573 2574 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || 2575 intel_encoder->type == INTEL_OUTPUT_EDP) 2576 return intel_dp->output_reg; 2577 } 2578 2579 return -1; 2580} 2581 2582/* check the VBT to see whether the eDP is on DP-D port */ 2583bool intel_dpd_is_edp(struct drm_device *dev) 2584{ 2585 struct drm_i915_private *dev_priv = dev->dev_private; 2586 struct child_device_config *p_child; 2587 int i; 2588 2589 if (!dev_priv->child_dev_num) 2590 return false; 2591 2592 for (i = 0; i < dev_priv->child_dev_num; i++) { 2593 p_child = dev_priv->child_dev + i; 2594 2595 if (p_child->dvo_port == PORT_IDPD && 2596 p_child->device_type == DEVICE_TYPE_eDP) 2597 return true; 2598 } 2599 return false; 2600} 2601 2602static void 2603intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) 2604{ 2605 struct intel_connector *intel_connector = to_intel_connector(connector); 2606 2607 intel_attach_force_audio_property(connector); 2608 intel_attach_broadcast_rgb_property(connector); 2609 intel_dp->color_range_auto = true; 2610 2611 if (is_edp(intel_dp)) { 2612 drm_mode_create_scaling_mode_property(connector->dev); 2613 drm_object_attach_property( 2614 &connector->base, 2615 connector->dev->mode_config.scaling_mode_property, 2616 DRM_MODE_SCALE_ASPECT); 2617 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT; 2618 } 2619} 2620 2621static void 2622intel_dp_init_panel_power_sequencer(struct drm_device *dev, 2623 struct intel_dp *intel_dp, 2624 struct edp_power_seq *out) 2625{ 2626 struct drm_i915_private *dev_priv = dev->dev_private; 2627 struct edp_power_seq cur, vbt, spec, final; 2628 u32 pp_on, pp_off, pp_div, pp; 2629 2630 /* Workaround: Need to write PP_CONTROL with the unlock key as 2631 * the very first thing. */ 2632 pp = ironlake_get_pp_control(dev_priv); 2633 I915_WRITE(PCH_PP_CONTROL, pp); 2634 2635 pp_on = I915_READ(PCH_PP_ON_DELAYS); 2636 pp_off = I915_READ(PCH_PP_OFF_DELAYS); 2637 pp_div = I915_READ(PCH_PP_DIVISOR); 2638 2639 /* Pull timing values out of registers */ 2640 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> 2641 PANEL_POWER_UP_DELAY_SHIFT; 2642 2643 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> 2644 PANEL_LIGHT_ON_DELAY_SHIFT; 2645 2646 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> 2647 PANEL_LIGHT_OFF_DELAY_SHIFT; 2648 2649 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> 2650 PANEL_POWER_DOWN_DELAY_SHIFT; 2651 2652 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> 2653 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000; 2654 2655 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 2656 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12); 2657 2658 vbt = dev_priv->edp.pps; 2659 2660 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of 2661 * our hw here, which are all in 100usec. */ 2662 spec.t1_t3 = 210 * 10; 2663 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */ 2664 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */ 2665 spec.t10 = 500 * 10; 2666 /* This one is special and actually in units of 100ms, but zero 2667 * based in the hw (so we need to add 100 ms). But the sw vbt 2668 * table multiplies it with 1000 to make it in units of 100usec, 2669 * too. */ 2670 spec.t11_t12 = (510 + 100) * 10; 2671 2672 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 2673 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12); 2674 2675 /* Use the max of the register settings and vbt. If both are 2676 * unset, fall back to the spec limits. */ 2677#define assign_final(field) final.field = (max(cur.field, vbt.field) == 0 ? \ 2678 spec.field : \ 2679 max(cur.field, vbt.field)) 2680 assign_final(t1_t3); 2681 assign_final(t8); 2682 assign_final(t9); 2683 assign_final(t10); 2684 assign_final(t11_t12); 2685#undef assign_final 2686 2687#define get_delay(field) (DIV_ROUND_UP(final.field, 10)) 2688 intel_dp->panel_power_up_delay = get_delay(t1_t3); 2689 intel_dp->backlight_on_delay = get_delay(t8); 2690 intel_dp->backlight_off_delay = get_delay(t9); 2691 intel_dp->panel_power_down_delay = get_delay(t10); 2692 intel_dp->panel_power_cycle_delay = get_delay(t11_t12); 2693#undef get_delay 2694 2695 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n", 2696 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay, 2697 intel_dp->panel_power_cycle_delay); 2698 2699 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", 2700 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); 2701 2702 if (out) 2703 *out = final; 2704} 2705 2706static void 2707intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, 2708 struct intel_dp *intel_dp, 2709 struct edp_power_seq *seq) 2710{ 2711 struct drm_i915_private *dev_priv = dev->dev_private; 2712 u32 pp_on, pp_off, pp_div; 2713 2714 /* And finally store the new values in the power sequencer. */ 2715 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | 2716 (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT); 2717 pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) | 2718 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT); 2719 /* Compute the divisor for the pp clock, simply match the Bspec 2720 * formula. */ 2721 pp_div = ((100 * intel_pch_rawclk(dev))/2 - 1) 2722 << PP_REFERENCE_DIVIDER_SHIFT; 2723 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000) 2724 << PANEL_POWER_CYCLE_DELAY_SHIFT); 2725 2726 /* Haswell doesn't have any port selection bits for the panel 2727 * power sequencer any more. */ 2728 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { 2729 if (is_cpu_edp(intel_dp)) 2730 pp_on |= PANEL_POWER_PORT_DP_A; 2731 else 2732 pp_on |= PANEL_POWER_PORT_DP_D; 2733 } 2734 2735 I915_WRITE(PCH_PP_ON_DELAYS, pp_on); 2736 I915_WRITE(PCH_PP_OFF_DELAYS, pp_off); 2737 I915_WRITE(PCH_PP_DIVISOR, pp_div); 2738 2739 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", 2740 I915_READ(PCH_PP_ON_DELAYS), 2741 I915_READ(PCH_PP_OFF_DELAYS), 2742 I915_READ(PCH_PP_DIVISOR)); 2743} 2744 2745void 2746intel_dp_init_connector(struct intel_digital_port *intel_dig_port, 2747 struct intel_connector *intel_connector) 2748{ 2749 struct drm_connector *connector = &intel_connector->base; 2750 struct intel_dp *intel_dp = &intel_dig_port->dp; 2751 struct intel_encoder *intel_encoder = &intel_dig_port->base; 2752 struct drm_device *dev = intel_encoder->base.dev; 2753 struct drm_i915_private *dev_priv = dev->dev_private; 2754 struct drm_display_mode *fixed_mode = NULL; 2755 struct edp_power_seq power_seq = { 0 }; 2756 enum port port = intel_dig_port->port; 2757 const char *name = NULL; 2758 int type; 2759 2760 /* Preserve the current hw state. */ 2761 intel_dp->DP = I915_READ(intel_dp->output_reg); 2762 intel_dp->attached_connector = intel_connector; 2763 2764 if (HAS_PCH_SPLIT(dev) && port == PORT_D) 2765 if (intel_dpd_is_edp(dev)) 2766 intel_dp->is_pch_edp = true; 2767 2768 /* 2769 * FIXME : We need to initialize built-in panels before external panels. 2770 * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup 2771 */ 2772 if (IS_VALLEYVIEW(dev) && port == PORT_C) { 2773 type = DRM_MODE_CONNECTOR_eDP; 2774 intel_encoder->type = INTEL_OUTPUT_EDP; 2775 } else if (port == PORT_A || is_pch_edp(intel_dp)) { 2776 type = DRM_MODE_CONNECTOR_eDP; 2777 intel_encoder->type = INTEL_OUTPUT_EDP; 2778 } else { 2779 /* The intel_encoder->type value may be INTEL_OUTPUT_UNKNOWN for 2780 * DDI or INTEL_OUTPUT_DISPLAYPORT for the older gens, so don't 2781 * rewrite it. 2782 */ 2783 type = DRM_MODE_CONNECTOR_DisplayPort; 2784 } 2785 2786 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); 2787 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 2788 2789 connector->polled = DRM_CONNECTOR_POLL_HPD; 2790 connector->interlace_allowed = true; 2791 connector->doublescan_allowed = 0; 2792 2793 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, 2794 ironlake_panel_vdd_work); 2795 2796 intel_connector_attach_encoder(intel_connector, intel_encoder); 2797 drm_sysfs_connector_add(connector); 2798 2799 if (HAS_DDI(dev)) 2800 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 2801 else 2802 intel_connector->get_hw_state = intel_connector_get_hw_state; 2803 2804 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10; 2805 if (HAS_DDI(dev)) { 2806 switch (intel_dig_port->port) { 2807 case PORT_A: 2808 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL; 2809 break; 2810 case PORT_B: 2811 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL; 2812 break; 2813 case PORT_C: 2814 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL; 2815 break; 2816 case PORT_D: 2817 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL; 2818 break; 2819 default: 2820 BUG(); 2821 } 2822 } 2823 2824 /* Set up the DDC bus. */ 2825 switch (port) { 2826 case PORT_A: 2827 intel_encoder->hpd_pin = HPD_PORT_A; 2828 name = "DPDDC-A"; 2829 break; 2830 case PORT_B: 2831 intel_encoder->hpd_pin = HPD_PORT_B; 2832 name = "DPDDC-B"; 2833 break; 2834 case PORT_C: 2835 intel_encoder->hpd_pin = HPD_PORT_C; 2836 name = "DPDDC-C"; 2837 break; 2838 case PORT_D: 2839 intel_encoder->hpd_pin = HPD_PORT_D; 2840 name = "DPDDC-D"; 2841 break; 2842 default: 2843 BUG(); 2844 } 2845 2846 if (is_edp(intel_dp)) 2847 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); 2848 2849 intel_dp_i2c_init(intel_dp, intel_connector, name); 2850 2851 /* Cache DPCD and EDID for edp. */ 2852 if (is_edp(intel_dp)) { 2853 bool ret; 2854 struct drm_display_mode *scan; 2855 struct edid *edid; 2856 2857 ironlake_edp_panel_vdd_on(intel_dp); 2858 ret = intel_dp_get_dpcd(intel_dp); 2859 ironlake_edp_panel_vdd_off(intel_dp, false); 2860 2861 if (ret) { 2862 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) 2863 dev_priv->no_aux_handshake = 2864 intel_dp->dpcd[DP_MAX_DOWNSPREAD] & 2865 DP_NO_AUX_HANDSHAKE_LINK_TRAINING; 2866 } else { 2867 /* if this fails, presume the device is a ghost */ 2868 DRM_INFO("failed to retrieve link info, disabling eDP\n"); 2869 intel_dp_encoder_destroy(&intel_encoder->base); 2870 intel_dp_destroy(connector); 2871 return; 2872 } 2873 2874 /* We now know it's not a ghost, init power sequence regs. */ 2875 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, 2876 &power_seq); 2877 2878 ironlake_edp_panel_vdd_on(intel_dp); 2879 edid = drm_get_edid(connector, &intel_dp->adapter); 2880 if (edid) { 2881 if (drm_add_edid_modes(connector, edid)) { 2882 drm_mode_connector_update_edid_property(connector, edid); 2883 drm_edid_to_eld(connector, edid); 2884 } else { 2885 kfree(edid); 2886 edid = ERR_PTR(-EINVAL); 2887 } 2888 } else { 2889 edid = ERR_PTR(-ENOENT); 2890 } 2891 intel_connector->edid = edid; 2892 2893 /* prefer fixed mode from EDID if available */ 2894 list_for_each_entry(scan, &connector->probed_modes, head) { 2895 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) { 2896 fixed_mode = drm_mode_duplicate(dev, scan); 2897 break; 2898 } 2899 } 2900 2901 /* fallback to VBT if available for eDP */ 2902 if (!fixed_mode && dev_priv->lfp_lvds_vbt_mode) { 2903 fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); 2904 if (fixed_mode) 2905 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; 2906 } 2907 2908 ironlake_edp_panel_vdd_off(intel_dp, false); 2909 } 2910 2911 if (is_edp(intel_dp)) { 2912 intel_panel_init(&intel_connector->panel, fixed_mode); 2913 intel_panel_setup_backlight(connector); 2914 } 2915 2916 intel_dp_add_properties(intel_dp, connector); 2917 2918 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 2919 * 0xd. Failure to do so will result in spurious interrupts being 2920 * generated on the port when a cable is not attached. 2921 */ 2922 if (IS_G4X(dev) && !IS_GM45(dev)) { 2923 u32 temp = I915_READ(PEG_BAND_GAP_DATA); 2924 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); 2925 } 2926} 2927 2928void 2929intel_dp_init(struct drm_device *dev, int output_reg, enum port port) 2930{ 2931 struct intel_digital_port *intel_dig_port; 2932 struct intel_encoder *intel_encoder; 2933 struct drm_encoder *encoder; 2934 struct intel_connector *intel_connector; 2935 2936 intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL); 2937 if (!intel_dig_port) 2938 return; 2939 2940 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 2941 if (!intel_connector) { 2942 kfree(intel_dig_port); 2943 return; 2944 } 2945 2946 intel_encoder = &intel_dig_port->base; 2947 encoder = &intel_encoder->base; 2948 2949 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, 2950 DRM_MODE_ENCODER_TMDS); 2951 drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs); 2952 2953 intel_encoder->compute_config = intel_dp_compute_config; 2954 intel_encoder->enable = intel_enable_dp; 2955 intel_encoder->pre_enable = intel_pre_enable_dp; 2956 intel_encoder->disable = intel_disable_dp; 2957 intel_encoder->post_disable = intel_post_disable_dp; 2958 intel_encoder->get_hw_state = intel_dp_get_hw_state; 2959 2960 intel_dig_port->port = port; 2961 intel_dig_port->dp.output_reg = output_reg; 2962 2963 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; 2964 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); 2965 intel_encoder->cloneable = false; 2966 intel_encoder->hot_plug = intel_dp_hot_plug; 2967 2968 intel_dp_init_connector(intel_dig_port, intel_connector); 2969} 2970