intel_dp.c revision ad1c0b1974c31f16407f983b7e6ea3511ec2a726
1/* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Keith Packard <keithp@keithp.com> 25 * 26 */ 27 28#include <linux/i2c.h> 29#include <linux/slab.h> 30#include <linux/export.h> 31#include <drm/drmP.h> 32#include <drm/drm_crtc.h> 33#include <drm/drm_crtc_helper.h> 34#include <drm/drm_edid.h> 35#include "intel_drv.h" 36#include <drm/i915_drm.h> 37#include "i915_drv.h" 38 39#define DP_LINK_CHECK_TIMEOUT (10 * 1000) 40 41/** 42 * is_edp - is the given port attached to an eDP panel (either CPU or PCH) 43 * @intel_dp: DP struct 44 * 45 * If a CPU or PCH DP output is attached to an eDP panel, this function 46 * will return true, and false otherwise. 47 */ 48static bool is_edp(struct intel_dp *intel_dp) 49{ 50 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 51 52 return intel_dig_port->base.type == INTEL_OUTPUT_EDP; 53} 54 55/** 56 * is_pch_edp - is the port on the PCH and attached to an eDP panel? 57 * @intel_dp: DP struct 58 * 59 * Returns true if the given DP struct corresponds to a PCH DP port attached 60 * to an eDP panel, false otherwise. Helpful for determining whether we 61 * may need FDI resources for a given DP output or not. 62 */ 63static bool is_pch_edp(struct intel_dp *intel_dp) 64{ 65 return intel_dp->is_pch_edp; 66} 67 68/** 69 * is_cpu_edp - is the port on the CPU and attached to an eDP panel? 70 * @intel_dp: DP struct 71 * 72 * Returns true if the given DP struct corresponds to a CPU eDP port. 73 */ 74static bool is_cpu_edp(struct intel_dp *intel_dp) 75{ 76 return is_edp(intel_dp) && !is_pch_edp(intel_dp); 77} 78 79static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp) 80{ 81 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 82 83 return intel_dig_port->base.base.dev; 84} 85 86static struct intel_dp *intel_attached_dp(struct drm_connector *connector) 87{ 88 return enc_to_intel_dp(&intel_attached_encoder(connector)->base); 89} 90 91/** 92 * intel_encoder_is_pch_edp - is the given encoder a PCH attached eDP? 93 * @encoder: DRM encoder 94 * 95 * Return true if @encoder corresponds to a PCH attached eDP panel. Needed 96 * by intel_display.c. 97 */ 98bool intel_encoder_is_pch_edp(struct drm_encoder *encoder) 99{ 100 struct intel_dp *intel_dp; 101 102 if (!encoder) 103 return false; 104 105 intel_dp = enc_to_intel_dp(encoder); 106 107 return is_pch_edp(intel_dp); 108} 109 110static void intel_dp_link_down(struct intel_dp *intel_dp); 111 112void 113intel_edp_link_config(struct intel_encoder *intel_encoder, 114 int *lane_num, int *link_bw) 115{ 116 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 117 118 *lane_num = intel_dp->lane_count; 119 *link_bw = drm_dp_bw_code_to_link_rate(intel_dp->link_bw); 120} 121 122int 123intel_edp_target_clock(struct intel_encoder *intel_encoder, 124 struct drm_display_mode *mode) 125{ 126 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 127 struct intel_connector *intel_connector = intel_dp->attached_connector; 128 129 if (intel_connector->panel.fixed_mode) 130 return intel_connector->panel.fixed_mode->clock; 131 else 132 return mode->clock; 133} 134 135static int 136intel_dp_max_link_bw(struct intel_dp *intel_dp) 137{ 138 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; 139 140 switch (max_link_bw) { 141 case DP_LINK_BW_1_62: 142 case DP_LINK_BW_2_7: 143 break; 144 default: 145 max_link_bw = DP_LINK_BW_1_62; 146 break; 147 } 148 return max_link_bw; 149} 150 151/* 152 * The units on the numbers in the next two are... bizarre. Examples will 153 * make it clearer; this one parallels an example in the eDP spec. 154 * 155 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as: 156 * 157 * 270000 * 1 * 8 / 10 == 216000 158 * 159 * The actual data capacity of that configuration is 2.16Gbit/s, so the 160 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz - 161 * or equivalently, kilopixels per second - so for 1680x1050R it'd be 162 * 119000. At 18bpp that's 2142000 kilobits per second. 163 * 164 * Thus the strange-looking division by 10 in intel_dp_link_required, to 165 * get the result in decakilobits instead of kilobits. 166 */ 167 168static int 169intel_dp_link_required(int pixel_clock, int bpp) 170{ 171 return (pixel_clock * bpp + 9) / 10; 172} 173 174static int 175intel_dp_max_data_rate(int max_link_clock, int max_lanes) 176{ 177 return (max_link_clock * max_lanes * 8) / 10; 178} 179 180static bool 181intel_dp_adjust_dithering(struct intel_dp *intel_dp, 182 struct drm_display_mode *mode, 183 bool adjust_mode) 184{ 185 int max_link_clock = 186 drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp)); 187 int max_lanes = drm_dp_max_lane_count(intel_dp->dpcd); 188 int max_rate, mode_rate; 189 190 mode_rate = intel_dp_link_required(mode->clock, 24); 191 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); 192 193 if (mode_rate > max_rate) { 194 mode_rate = intel_dp_link_required(mode->clock, 18); 195 if (mode_rate > max_rate) 196 return false; 197 198 if (adjust_mode) 199 mode->private_flags 200 |= INTEL_MODE_DP_FORCE_6BPC; 201 202 return true; 203 } 204 205 return true; 206} 207 208static int 209intel_dp_mode_valid(struct drm_connector *connector, 210 struct drm_display_mode *mode) 211{ 212 struct intel_dp *intel_dp = intel_attached_dp(connector); 213 struct intel_connector *intel_connector = to_intel_connector(connector); 214 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; 215 216 if (is_edp(intel_dp) && fixed_mode) { 217 if (mode->hdisplay > fixed_mode->hdisplay) 218 return MODE_PANEL; 219 220 if (mode->vdisplay > fixed_mode->vdisplay) 221 return MODE_PANEL; 222 } 223 224 if (!intel_dp_adjust_dithering(intel_dp, mode, false)) 225 return MODE_CLOCK_HIGH; 226 227 if (mode->clock < 10000) 228 return MODE_CLOCK_LOW; 229 230 if (mode->flags & DRM_MODE_FLAG_DBLCLK) 231 return MODE_H_ILLEGAL; 232 233 return MODE_OK; 234} 235 236static uint32_t 237pack_aux(uint8_t *src, int src_bytes) 238{ 239 int i; 240 uint32_t v = 0; 241 242 if (src_bytes > 4) 243 src_bytes = 4; 244 for (i = 0; i < src_bytes; i++) 245 v |= ((uint32_t) src[i]) << ((3-i) * 8); 246 return v; 247} 248 249static void 250unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) 251{ 252 int i; 253 if (dst_bytes > 4) 254 dst_bytes = 4; 255 for (i = 0; i < dst_bytes; i++) 256 dst[i] = src >> ((3-i) * 8); 257} 258 259/* hrawclock is 1/4 the FSB frequency */ 260static int 261intel_hrawclk(struct drm_device *dev) 262{ 263 struct drm_i915_private *dev_priv = dev->dev_private; 264 uint32_t clkcfg; 265 266 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */ 267 if (IS_VALLEYVIEW(dev)) 268 return 200; 269 270 clkcfg = I915_READ(CLKCFG); 271 switch (clkcfg & CLKCFG_FSB_MASK) { 272 case CLKCFG_FSB_400: 273 return 100; 274 case CLKCFG_FSB_533: 275 return 133; 276 case CLKCFG_FSB_667: 277 return 166; 278 case CLKCFG_FSB_800: 279 return 200; 280 case CLKCFG_FSB_1067: 281 return 266; 282 case CLKCFG_FSB_1333: 283 return 333; 284 /* these two are just a guess; one of them might be right */ 285 case CLKCFG_FSB_1600: 286 case CLKCFG_FSB_1600_ALT: 287 return 400; 288 default: 289 return 133; 290 } 291} 292 293static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp) 294{ 295 struct drm_device *dev = intel_dp_to_dev(intel_dp); 296 struct drm_i915_private *dev_priv = dev->dev_private; 297 298 return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0; 299} 300 301static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp) 302{ 303 struct drm_device *dev = intel_dp_to_dev(intel_dp); 304 struct drm_i915_private *dev_priv = dev->dev_private; 305 306 return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0; 307} 308 309static void 310intel_dp_check_edp(struct intel_dp *intel_dp) 311{ 312 struct drm_device *dev = intel_dp_to_dev(intel_dp); 313 struct drm_i915_private *dev_priv = dev->dev_private; 314 315 if (!is_edp(intel_dp)) 316 return; 317 if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) { 318 WARN(1, "eDP powered off while attempting aux channel communication.\n"); 319 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n", 320 I915_READ(PCH_PP_STATUS), 321 I915_READ(PCH_PP_CONTROL)); 322 } 323} 324 325static uint32_t 326intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq) 327{ 328 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 329 struct drm_device *dev = intel_dig_port->base.base.dev; 330 struct drm_i915_private *dev_priv = dev->dev_private; 331 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg; 332 uint32_t status; 333 bool done; 334 335#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) 336 if (has_aux_irq) 337 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 338 msecs_to_jiffies(10)); 339 else 340 done = wait_for_atomic(C, 10) == 0; 341 if (!done) 342 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n", 343 has_aux_irq); 344#undef C 345 346 return status; 347} 348 349static int 350intel_dp_aux_ch(struct intel_dp *intel_dp, 351 uint8_t *send, int send_bytes, 352 uint8_t *recv, int recv_size) 353{ 354 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 355 struct drm_device *dev = intel_dig_port->base.base.dev; 356 struct drm_i915_private *dev_priv = dev->dev_private; 357 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg; 358 uint32_t ch_data = ch_ctl + 4; 359 int i, ret, recv_bytes; 360 uint32_t status; 361 uint32_t aux_clock_divider; 362 int try, precharge; 363 bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev); 364 365 /* dp aux is extremely sensitive to irq latency, hence request the 366 * lowest possible wakeup latency and so prevent the cpu from going into 367 * deep sleep states. 368 */ 369 pm_qos_update_request(&dev_priv->pm_qos, 0); 370 371 intel_dp_check_edp(intel_dp); 372 /* The clock divider is based off the hrawclk, 373 * and would like to run at 2MHz. So, take the 374 * hrawclk value and divide by 2 and use that 375 * 376 * Note that PCH attached eDP panels should use a 125MHz input 377 * clock divider. 378 */ 379 if (is_cpu_edp(intel_dp)) { 380 if (HAS_DDI(dev)) 381 aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1; 382 else if (IS_VALLEYVIEW(dev)) 383 aux_clock_divider = 100; 384 else if (IS_GEN6(dev) || IS_GEN7(dev)) 385 aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */ 386 else 387 aux_clock_divider = 225; /* eDP input clock at 450Mhz */ 388 } else if (HAS_PCH_SPLIT(dev)) 389 aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2); 390 else 391 aux_clock_divider = intel_hrawclk(dev) / 2; 392 393 if (IS_GEN6(dev)) 394 precharge = 3; 395 else 396 precharge = 5; 397 398 /* Try to wait for any previous AUX channel activity */ 399 for (try = 0; try < 3; try++) { 400 status = I915_READ_NOTRACE(ch_ctl); 401 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 402 break; 403 msleep(1); 404 } 405 406 if (try == 3) { 407 WARN(1, "dp_aux_ch not started status 0x%08x\n", 408 I915_READ(ch_ctl)); 409 ret = -EBUSY; 410 goto out; 411 } 412 413 /* Must try at least 3 times according to DP spec */ 414 for (try = 0; try < 5; try++) { 415 /* Load the send data into the aux channel data registers */ 416 for (i = 0; i < send_bytes; i += 4) 417 I915_WRITE(ch_data + i, 418 pack_aux(send + i, send_bytes - i)); 419 420 /* Send the command and wait for it to complete */ 421 I915_WRITE(ch_ctl, 422 DP_AUX_CH_CTL_SEND_BUSY | 423 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) | 424 DP_AUX_CH_CTL_TIME_OUT_400us | 425 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 426 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 427 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | 428 DP_AUX_CH_CTL_DONE | 429 DP_AUX_CH_CTL_TIME_OUT_ERROR | 430 DP_AUX_CH_CTL_RECEIVE_ERROR); 431 432 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq); 433 434 /* Clear done status and any errors */ 435 I915_WRITE(ch_ctl, 436 status | 437 DP_AUX_CH_CTL_DONE | 438 DP_AUX_CH_CTL_TIME_OUT_ERROR | 439 DP_AUX_CH_CTL_RECEIVE_ERROR); 440 441 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR | 442 DP_AUX_CH_CTL_RECEIVE_ERROR)) 443 continue; 444 if (status & DP_AUX_CH_CTL_DONE) 445 break; 446 } 447 448 if ((status & DP_AUX_CH_CTL_DONE) == 0) { 449 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status); 450 ret = -EBUSY; 451 goto out; 452 } 453 454 /* Check for timeout or receive error. 455 * Timeouts occur when the sink is not connected 456 */ 457 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 458 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status); 459 ret = -EIO; 460 goto out; 461 } 462 463 /* Timeouts occur when the device isn't connected, so they're 464 * "normal" -- don't fill the kernel log with these */ 465 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { 466 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status); 467 ret = -ETIMEDOUT; 468 goto out; 469 } 470 471 /* Unload any bytes sent back from the other side */ 472 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> 473 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); 474 if (recv_bytes > recv_size) 475 recv_bytes = recv_size; 476 477 for (i = 0; i < recv_bytes; i += 4) 478 unpack_aux(I915_READ(ch_data + i), 479 recv + i, recv_bytes - i); 480 481 ret = recv_bytes; 482out: 483 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE); 484 485 return ret; 486} 487 488/* Write data to the aux channel in native mode */ 489static int 490intel_dp_aux_native_write(struct intel_dp *intel_dp, 491 uint16_t address, uint8_t *send, int send_bytes) 492{ 493 int ret; 494 uint8_t msg[20]; 495 int msg_bytes; 496 uint8_t ack; 497 498 intel_dp_check_edp(intel_dp); 499 if (send_bytes > 16) 500 return -1; 501 msg[0] = AUX_NATIVE_WRITE << 4; 502 msg[1] = address >> 8; 503 msg[2] = address & 0xff; 504 msg[3] = send_bytes - 1; 505 memcpy(&msg[4], send, send_bytes); 506 msg_bytes = send_bytes + 4; 507 for (;;) { 508 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1); 509 if (ret < 0) 510 return ret; 511 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) 512 break; 513 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) 514 udelay(100); 515 else 516 return -EIO; 517 } 518 return send_bytes; 519} 520 521/* Write a single byte to the aux channel in native mode */ 522static int 523intel_dp_aux_native_write_1(struct intel_dp *intel_dp, 524 uint16_t address, uint8_t byte) 525{ 526 return intel_dp_aux_native_write(intel_dp, address, &byte, 1); 527} 528 529/* read bytes from a native aux channel */ 530static int 531intel_dp_aux_native_read(struct intel_dp *intel_dp, 532 uint16_t address, uint8_t *recv, int recv_bytes) 533{ 534 uint8_t msg[4]; 535 int msg_bytes; 536 uint8_t reply[20]; 537 int reply_bytes; 538 uint8_t ack; 539 int ret; 540 541 intel_dp_check_edp(intel_dp); 542 msg[0] = AUX_NATIVE_READ << 4; 543 msg[1] = address >> 8; 544 msg[2] = address & 0xff; 545 msg[3] = recv_bytes - 1; 546 547 msg_bytes = 4; 548 reply_bytes = recv_bytes + 1; 549 550 for (;;) { 551 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, 552 reply, reply_bytes); 553 if (ret == 0) 554 return -EPROTO; 555 if (ret < 0) 556 return ret; 557 ack = reply[0]; 558 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) { 559 memcpy(recv, reply + 1, ret - 1); 560 return ret - 1; 561 } 562 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) 563 udelay(100); 564 else 565 return -EIO; 566 } 567} 568 569static int 570intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, 571 uint8_t write_byte, uint8_t *read_byte) 572{ 573 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; 574 struct intel_dp *intel_dp = container_of(adapter, 575 struct intel_dp, 576 adapter); 577 uint16_t address = algo_data->address; 578 uint8_t msg[5]; 579 uint8_t reply[2]; 580 unsigned retry; 581 int msg_bytes; 582 int reply_bytes; 583 int ret; 584 585 intel_dp_check_edp(intel_dp); 586 /* Set up the command byte */ 587 if (mode & MODE_I2C_READ) 588 msg[0] = AUX_I2C_READ << 4; 589 else 590 msg[0] = AUX_I2C_WRITE << 4; 591 592 if (!(mode & MODE_I2C_STOP)) 593 msg[0] |= AUX_I2C_MOT << 4; 594 595 msg[1] = address >> 8; 596 msg[2] = address; 597 598 switch (mode) { 599 case MODE_I2C_WRITE: 600 msg[3] = 0; 601 msg[4] = write_byte; 602 msg_bytes = 5; 603 reply_bytes = 1; 604 break; 605 case MODE_I2C_READ: 606 msg[3] = 0; 607 msg_bytes = 4; 608 reply_bytes = 2; 609 break; 610 default: 611 msg_bytes = 3; 612 reply_bytes = 1; 613 break; 614 } 615 616 for (retry = 0; retry < 5; retry++) { 617 ret = intel_dp_aux_ch(intel_dp, 618 msg, msg_bytes, 619 reply, reply_bytes); 620 if (ret < 0) { 621 DRM_DEBUG_KMS("aux_ch failed %d\n", ret); 622 return ret; 623 } 624 625 switch (reply[0] & AUX_NATIVE_REPLY_MASK) { 626 case AUX_NATIVE_REPLY_ACK: 627 /* I2C-over-AUX Reply field is only valid 628 * when paired with AUX ACK. 629 */ 630 break; 631 case AUX_NATIVE_REPLY_NACK: 632 DRM_DEBUG_KMS("aux_ch native nack\n"); 633 return -EREMOTEIO; 634 case AUX_NATIVE_REPLY_DEFER: 635 udelay(100); 636 continue; 637 default: 638 DRM_ERROR("aux_ch invalid native reply 0x%02x\n", 639 reply[0]); 640 return -EREMOTEIO; 641 } 642 643 switch (reply[0] & AUX_I2C_REPLY_MASK) { 644 case AUX_I2C_REPLY_ACK: 645 if (mode == MODE_I2C_READ) { 646 *read_byte = reply[1]; 647 } 648 return reply_bytes - 1; 649 case AUX_I2C_REPLY_NACK: 650 DRM_DEBUG_KMS("aux_i2c nack\n"); 651 return -EREMOTEIO; 652 case AUX_I2C_REPLY_DEFER: 653 DRM_DEBUG_KMS("aux_i2c defer\n"); 654 udelay(100); 655 break; 656 default: 657 DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]); 658 return -EREMOTEIO; 659 } 660 } 661 662 DRM_ERROR("too many retries, giving up\n"); 663 return -EREMOTEIO; 664} 665 666static int 667intel_dp_i2c_init(struct intel_dp *intel_dp, 668 struct intel_connector *intel_connector, const char *name) 669{ 670 int ret; 671 672 DRM_DEBUG_KMS("i2c_init %s\n", name); 673 intel_dp->algo.running = false; 674 intel_dp->algo.address = 0; 675 intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch; 676 677 memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter)); 678 intel_dp->adapter.owner = THIS_MODULE; 679 intel_dp->adapter.class = I2C_CLASS_DDC; 680 strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1); 681 intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0'; 682 intel_dp->adapter.algo_data = &intel_dp->algo; 683 intel_dp->adapter.dev.parent = &intel_connector->base.kdev; 684 685 ironlake_edp_panel_vdd_on(intel_dp); 686 ret = i2c_dp_aux_add_bus(&intel_dp->adapter); 687 ironlake_edp_panel_vdd_off(intel_dp, false); 688 return ret; 689} 690 691bool 692intel_dp_mode_fixup(struct drm_encoder *encoder, 693 const struct drm_display_mode *mode, 694 struct drm_display_mode *adjusted_mode) 695{ 696 struct drm_device *dev = encoder->dev; 697 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 698 struct intel_connector *intel_connector = intel_dp->attached_connector; 699 int lane_count, clock; 700 int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd); 701 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; 702 int bpp, mode_rate; 703 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; 704 705 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 706 intel_fixed_panel_mode(intel_connector->panel.fixed_mode, 707 adjusted_mode); 708 intel_pch_panel_fitting(dev, 709 intel_connector->panel.fitting_mode, 710 mode, adjusted_mode); 711 } 712 713 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 714 return false; 715 716 DRM_DEBUG_KMS("DP link computation with max lane count %i " 717 "max bw %02x pixel clock %iKHz\n", 718 max_lane_count, bws[max_clock], adjusted_mode->clock); 719 720 if (!intel_dp_adjust_dithering(intel_dp, adjusted_mode, true)) 721 return false; 722 723 bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24; 724 725 if (intel_dp->color_range_auto) { 726 /* 727 * See: 728 * CEA-861-E - 5.1 Default Encoding Parameters 729 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry 730 */ 731 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1) 732 intel_dp->color_range = DP_COLOR_RANGE_16_235; 733 else 734 intel_dp->color_range = 0; 735 } 736 737 if (intel_dp->color_range) 738 adjusted_mode->private_flags |= INTEL_MODE_LIMITED_COLOR_RANGE; 739 740 mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp); 741 742 for (clock = 0; clock <= max_clock; clock++) { 743 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { 744 int link_bw_clock = 745 drm_dp_bw_code_to_link_rate(bws[clock]); 746 int link_avail = intel_dp_max_data_rate(link_bw_clock, 747 lane_count); 748 749 if (mode_rate <= link_avail) { 750 intel_dp->link_bw = bws[clock]; 751 intel_dp->lane_count = lane_count; 752 adjusted_mode->clock = link_bw_clock; 753 DRM_DEBUG_KMS("DP link bw %02x lane " 754 "count %d clock %d bpp %d\n", 755 intel_dp->link_bw, intel_dp->lane_count, 756 adjusted_mode->clock, bpp); 757 DRM_DEBUG_KMS("DP link bw required %i available %i\n", 758 mode_rate, link_avail); 759 return true; 760 } 761 } 762 } 763 764 return false; 765} 766 767void 768intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, 769 struct drm_display_mode *adjusted_mode) 770{ 771 struct drm_device *dev = crtc->dev; 772 struct intel_encoder *intel_encoder; 773 struct intel_dp *intel_dp; 774 struct drm_i915_private *dev_priv = dev->dev_private; 775 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 776 int lane_count = 4; 777 struct intel_link_m_n m_n; 778 int pipe = intel_crtc->pipe; 779 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; 780 781 /* 782 * Find the lane count in the intel_encoder private 783 */ 784 for_each_encoder_on_crtc(dev, crtc, intel_encoder) { 785 intel_dp = enc_to_intel_dp(&intel_encoder->base); 786 787 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || 788 intel_encoder->type == INTEL_OUTPUT_EDP) 789 { 790 lane_count = intel_dp->lane_count; 791 break; 792 } 793 } 794 795 /* 796 * Compute the GMCH and Link ratios. The '3' here is 797 * the number of bytes_per_pixel post-LUT, which we always 798 * set up for 8-bits of R/G/B, or 3 bytes total. 799 */ 800 intel_link_compute_m_n(intel_crtc->bpp, lane_count, 801 mode->clock, adjusted_mode->clock, &m_n); 802 803 if (HAS_DDI(dev)) { 804 I915_WRITE(PIPE_DATA_M1(cpu_transcoder), 805 TU_SIZE(m_n.tu) | m_n.gmch_m); 806 I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n); 807 I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m); 808 I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n); 809 } else if (HAS_PCH_SPLIT(dev)) { 810 I915_WRITE(TRANSDATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); 811 I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n); 812 I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m); 813 I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n); 814 } else if (IS_VALLEYVIEW(dev)) { 815 I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); 816 I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n); 817 I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m); 818 I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n); 819 } else { 820 I915_WRITE(PIPE_GMCH_DATA_M(pipe), 821 TU_SIZE(m_n.tu) | m_n.gmch_m); 822 I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n); 823 I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m); 824 I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n); 825 } 826} 827 828void intel_dp_init_link_config(struct intel_dp *intel_dp) 829{ 830 memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); 831 intel_dp->link_configuration[0] = intel_dp->link_bw; 832 intel_dp->link_configuration[1] = intel_dp->lane_count; 833 intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B; 834 /* 835 * Check for DPCD version > 1.1 and enhanced framing support 836 */ 837 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 838 (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) { 839 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; 840 } 841} 842 843static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock) 844{ 845 struct drm_device *dev = crtc->dev; 846 struct drm_i915_private *dev_priv = dev->dev_private; 847 u32 dpa_ctl; 848 849 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock); 850 dpa_ctl = I915_READ(DP_A); 851 dpa_ctl &= ~DP_PLL_FREQ_MASK; 852 853 if (clock < 200000) { 854 /* For a long time we've carried around a ILK-DevA w/a for the 855 * 160MHz clock. If we're really unlucky, it's still required. 856 */ 857 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n"); 858 dpa_ctl |= DP_PLL_FREQ_160MHZ; 859 } else { 860 dpa_ctl |= DP_PLL_FREQ_270MHZ; 861 } 862 863 I915_WRITE(DP_A, dpa_ctl); 864 865 POSTING_READ(DP_A); 866 udelay(500); 867} 868 869static void 870intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, 871 struct drm_display_mode *adjusted_mode) 872{ 873 struct drm_device *dev = encoder->dev; 874 struct drm_i915_private *dev_priv = dev->dev_private; 875 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 876 struct drm_crtc *crtc = encoder->crtc; 877 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 878 879 /* 880 * There are four kinds of DP registers: 881 * 882 * IBX PCH 883 * SNB CPU 884 * IVB CPU 885 * CPT PCH 886 * 887 * IBX PCH and CPU are the same for almost everything, 888 * except that the CPU DP PLL is configured in this 889 * register 890 * 891 * CPT PCH is quite different, having many bits moved 892 * to the TRANS_DP_CTL register instead. That 893 * configuration happens (oddly) in ironlake_pch_enable 894 */ 895 896 /* Preserve the BIOS-computed detected bit. This is 897 * supposed to be read-only. 898 */ 899 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED; 900 901 /* Handle DP bits in common between all three register formats */ 902 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 903 904 switch (intel_dp->lane_count) { 905 case 1: 906 intel_dp->DP |= DP_PORT_WIDTH_1; 907 break; 908 case 2: 909 intel_dp->DP |= DP_PORT_WIDTH_2; 910 break; 911 case 4: 912 intel_dp->DP |= DP_PORT_WIDTH_4; 913 break; 914 } 915 if (intel_dp->has_audio) { 916 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n", 917 pipe_name(intel_crtc->pipe)); 918 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 919 intel_write_eld(encoder, adjusted_mode); 920 } 921 922 intel_dp_init_link_config(intel_dp); 923 924 /* Split out the IBX/CPU vs CPT settings */ 925 926 if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) { 927 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 928 intel_dp->DP |= DP_SYNC_HS_HIGH; 929 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 930 intel_dp->DP |= DP_SYNC_VS_HIGH; 931 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 932 933 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) 934 intel_dp->DP |= DP_ENHANCED_FRAMING; 935 936 intel_dp->DP |= intel_crtc->pipe << 29; 937 938 /* don't miss out required setting for eDP */ 939 if (adjusted_mode->clock < 200000) 940 intel_dp->DP |= DP_PLL_FREQ_160MHZ; 941 else 942 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 943 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { 944 if (!HAS_PCH_SPLIT(dev)) 945 intel_dp->DP |= intel_dp->color_range; 946 947 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 948 intel_dp->DP |= DP_SYNC_HS_HIGH; 949 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 950 intel_dp->DP |= DP_SYNC_VS_HIGH; 951 intel_dp->DP |= DP_LINK_TRAIN_OFF; 952 953 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) 954 intel_dp->DP |= DP_ENHANCED_FRAMING; 955 956 if (intel_crtc->pipe == 1) 957 intel_dp->DP |= DP_PIPEB_SELECT; 958 959 if (is_cpu_edp(intel_dp)) { 960 /* don't miss out required setting for eDP */ 961 if (adjusted_mode->clock < 200000) 962 intel_dp->DP |= DP_PLL_FREQ_160MHZ; 963 else 964 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 965 } 966 } else { 967 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 968 } 969 970 if (is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) 971 ironlake_set_pll_edp(crtc, adjusted_mode->clock); 972} 973 974#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 975#define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) 976 977#define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 978#define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 979 980#define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) 981#define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 982 983static void ironlake_wait_panel_status(struct intel_dp *intel_dp, 984 u32 mask, 985 u32 value) 986{ 987 struct drm_device *dev = intel_dp_to_dev(intel_dp); 988 struct drm_i915_private *dev_priv = dev->dev_private; 989 990 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n", 991 mask, value, 992 I915_READ(PCH_PP_STATUS), 993 I915_READ(PCH_PP_CONTROL)); 994 995 if (_wait_for((I915_READ(PCH_PP_STATUS) & mask) == value, 5000, 10)) { 996 DRM_ERROR("Panel status timeout: status %08x control %08x\n", 997 I915_READ(PCH_PP_STATUS), 998 I915_READ(PCH_PP_CONTROL)); 999 } 1000} 1001 1002static void ironlake_wait_panel_on(struct intel_dp *intel_dp) 1003{ 1004 DRM_DEBUG_KMS("Wait for panel power on\n"); 1005 ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); 1006} 1007 1008static void ironlake_wait_panel_off(struct intel_dp *intel_dp) 1009{ 1010 DRM_DEBUG_KMS("Wait for panel power off time\n"); 1011 ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); 1012} 1013 1014static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp) 1015{ 1016 DRM_DEBUG_KMS("Wait for panel power cycle\n"); 1017 ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); 1018} 1019 1020 1021/* Read the current pp_control value, unlocking the register if it 1022 * is locked 1023 */ 1024 1025static u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv) 1026{ 1027 u32 control = I915_READ(PCH_PP_CONTROL); 1028 1029 control &= ~PANEL_UNLOCK_MASK; 1030 control |= PANEL_UNLOCK_REGS; 1031 return control; 1032} 1033 1034void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) 1035{ 1036 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1037 struct drm_i915_private *dev_priv = dev->dev_private; 1038 u32 pp; 1039 1040 if (!is_edp(intel_dp)) 1041 return; 1042 DRM_DEBUG_KMS("Turn eDP VDD on\n"); 1043 1044 WARN(intel_dp->want_panel_vdd, 1045 "eDP VDD already requested on\n"); 1046 1047 intel_dp->want_panel_vdd = true; 1048 1049 if (ironlake_edp_have_panel_vdd(intel_dp)) { 1050 DRM_DEBUG_KMS("eDP VDD already on\n"); 1051 return; 1052 } 1053 1054 if (!ironlake_edp_have_panel_power(intel_dp)) 1055 ironlake_wait_panel_power_cycle(intel_dp); 1056 1057 pp = ironlake_get_pp_control(dev_priv); 1058 pp |= EDP_FORCE_VDD; 1059 I915_WRITE(PCH_PP_CONTROL, pp); 1060 POSTING_READ(PCH_PP_CONTROL); 1061 DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n", 1062 I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL)); 1063 1064 /* 1065 * If the panel wasn't on, delay before accessing aux channel 1066 */ 1067 if (!ironlake_edp_have_panel_power(intel_dp)) { 1068 DRM_DEBUG_KMS("eDP was not running\n"); 1069 msleep(intel_dp->panel_power_up_delay); 1070 } 1071} 1072 1073static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp) 1074{ 1075 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1076 struct drm_i915_private *dev_priv = dev->dev_private; 1077 u32 pp; 1078 1079 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 1080 1081 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) { 1082 pp = ironlake_get_pp_control(dev_priv); 1083 pp &= ~EDP_FORCE_VDD; 1084 I915_WRITE(PCH_PP_CONTROL, pp); 1085 POSTING_READ(PCH_PP_CONTROL); 1086 1087 /* Make sure sequencer is idle before allowing subsequent activity */ 1088 DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n", 1089 I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL)); 1090 1091 msleep(intel_dp->panel_power_down_delay); 1092 } 1093} 1094 1095static void ironlake_panel_vdd_work(struct work_struct *__work) 1096{ 1097 struct intel_dp *intel_dp = container_of(to_delayed_work(__work), 1098 struct intel_dp, panel_vdd_work); 1099 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1100 1101 mutex_lock(&dev->mode_config.mutex); 1102 ironlake_panel_vdd_off_sync(intel_dp); 1103 mutex_unlock(&dev->mode_config.mutex); 1104} 1105 1106void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) 1107{ 1108 if (!is_edp(intel_dp)) 1109 return; 1110 1111 DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd); 1112 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on"); 1113 1114 intel_dp->want_panel_vdd = false; 1115 1116 if (sync) { 1117 ironlake_panel_vdd_off_sync(intel_dp); 1118 } else { 1119 /* 1120 * Queue the timer to fire a long 1121 * time from now (relative to the power down delay) 1122 * to keep the panel power up across a sequence of operations 1123 */ 1124 schedule_delayed_work(&intel_dp->panel_vdd_work, 1125 msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5)); 1126 } 1127} 1128 1129void ironlake_edp_panel_on(struct intel_dp *intel_dp) 1130{ 1131 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1132 struct drm_i915_private *dev_priv = dev->dev_private; 1133 u32 pp; 1134 1135 if (!is_edp(intel_dp)) 1136 return; 1137 1138 DRM_DEBUG_KMS("Turn eDP power on\n"); 1139 1140 if (ironlake_edp_have_panel_power(intel_dp)) { 1141 DRM_DEBUG_KMS("eDP power already on\n"); 1142 return; 1143 } 1144 1145 ironlake_wait_panel_power_cycle(intel_dp); 1146 1147 pp = ironlake_get_pp_control(dev_priv); 1148 if (IS_GEN5(dev)) { 1149 /* ILK workaround: disable reset around power sequence */ 1150 pp &= ~PANEL_POWER_RESET; 1151 I915_WRITE(PCH_PP_CONTROL, pp); 1152 POSTING_READ(PCH_PP_CONTROL); 1153 } 1154 1155 pp |= POWER_TARGET_ON; 1156 if (!IS_GEN5(dev)) 1157 pp |= PANEL_POWER_RESET; 1158 1159 I915_WRITE(PCH_PP_CONTROL, pp); 1160 POSTING_READ(PCH_PP_CONTROL); 1161 1162 ironlake_wait_panel_on(intel_dp); 1163 1164 if (IS_GEN5(dev)) { 1165 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 1166 I915_WRITE(PCH_PP_CONTROL, pp); 1167 POSTING_READ(PCH_PP_CONTROL); 1168 } 1169} 1170 1171void ironlake_edp_panel_off(struct intel_dp *intel_dp) 1172{ 1173 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1174 struct drm_i915_private *dev_priv = dev->dev_private; 1175 u32 pp; 1176 1177 if (!is_edp(intel_dp)) 1178 return; 1179 1180 DRM_DEBUG_KMS("Turn eDP power off\n"); 1181 1182 WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n"); 1183 1184 pp = ironlake_get_pp_control(dev_priv); 1185 /* We need to switch off panel power _and_ force vdd, for otherwise some 1186 * panels get very unhappy and cease to work. */ 1187 pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE); 1188 I915_WRITE(PCH_PP_CONTROL, pp); 1189 POSTING_READ(PCH_PP_CONTROL); 1190 1191 intel_dp->want_panel_vdd = false; 1192 1193 ironlake_wait_panel_off(intel_dp); 1194} 1195 1196void ironlake_edp_backlight_on(struct intel_dp *intel_dp) 1197{ 1198 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1199 struct drm_device *dev = intel_dig_port->base.base.dev; 1200 struct drm_i915_private *dev_priv = dev->dev_private; 1201 int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe; 1202 u32 pp; 1203 1204 if (!is_edp(intel_dp)) 1205 return; 1206 1207 DRM_DEBUG_KMS("\n"); 1208 /* 1209 * If we enable the backlight right away following a panel power 1210 * on, we may see slight flicker as the panel syncs with the eDP 1211 * link. So delay a bit to make sure the image is solid before 1212 * allowing it to appear. 1213 */ 1214 msleep(intel_dp->backlight_on_delay); 1215 pp = ironlake_get_pp_control(dev_priv); 1216 pp |= EDP_BLC_ENABLE; 1217 I915_WRITE(PCH_PP_CONTROL, pp); 1218 POSTING_READ(PCH_PP_CONTROL); 1219 1220 intel_panel_enable_backlight(dev, pipe); 1221} 1222 1223void ironlake_edp_backlight_off(struct intel_dp *intel_dp) 1224{ 1225 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1226 struct drm_i915_private *dev_priv = dev->dev_private; 1227 u32 pp; 1228 1229 if (!is_edp(intel_dp)) 1230 return; 1231 1232 intel_panel_disable_backlight(dev); 1233 1234 DRM_DEBUG_KMS("\n"); 1235 pp = ironlake_get_pp_control(dev_priv); 1236 pp &= ~EDP_BLC_ENABLE; 1237 I915_WRITE(PCH_PP_CONTROL, pp); 1238 POSTING_READ(PCH_PP_CONTROL); 1239 msleep(intel_dp->backlight_off_delay); 1240} 1241 1242static void ironlake_edp_pll_on(struct intel_dp *intel_dp) 1243{ 1244 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1245 struct drm_crtc *crtc = intel_dig_port->base.base.crtc; 1246 struct drm_device *dev = crtc->dev; 1247 struct drm_i915_private *dev_priv = dev->dev_private; 1248 u32 dpa_ctl; 1249 1250 assert_pipe_disabled(dev_priv, 1251 to_intel_crtc(crtc)->pipe); 1252 1253 DRM_DEBUG_KMS("\n"); 1254 dpa_ctl = I915_READ(DP_A); 1255 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n"); 1256 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n"); 1257 1258 /* We don't adjust intel_dp->DP while tearing down the link, to 1259 * facilitate link retraining (e.g. after hotplug). Hence clear all 1260 * enable bits here to ensure that we don't enable too much. */ 1261 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE); 1262 intel_dp->DP |= DP_PLL_ENABLE; 1263 I915_WRITE(DP_A, intel_dp->DP); 1264 POSTING_READ(DP_A); 1265 udelay(200); 1266} 1267 1268static void ironlake_edp_pll_off(struct intel_dp *intel_dp) 1269{ 1270 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1271 struct drm_crtc *crtc = intel_dig_port->base.base.crtc; 1272 struct drm_device *dev = crtc->dev; 1273 struct drm_i915_private *dev_priv = dev->dev_private; 1274 u32 dpa_ctl; 1275 1276 assert_pipe_disabled(dev_priv, 1277 to_intel_crtc(crtc)->pipe); 1278 1279 dpa_ctl = I915_READ(DP_A); 1280 WARN((dpa_ctl & DP_PLL_ENABLE) == 0, 1281 "dp pll off, should be on\n"); 1282 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n"); 1283 1284 /* We can't rely on the value tracked for the DP register in 1285 * intel_dp->DP because link_down must not change that (otherwise link 1286 * re-training will fail. */ 1287 dpa_ctl &= ~DP_PLL_ENABLE; 1288 I915_WRITE(DP_A, dpa_ctl); 1289 POSTING_READ(DP_A); 1290 udelay(200); 1291} 1292 1293/* If the sink supports it, try to set the power state appropriately */ 1294void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) 1295{ 1296 int ret, i; 1297 1298 /* Should have a valid DPCD by this point */ 1299 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 1300 return; 1301 1302 if (mode != DRM_MODE_DPMS_ON) { 1303 ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER, 1304 DP_SET_POWER_D3); 1305 if (ret != 1) 1306 DRM_DEBUG_DRIVER("failed to write sink power state\n"); 1307 } else { 1308 /* 1309 * When turning on, we need to retry for 1ms to give the sink 1310 * time to wake up. 1311 */ 1312 for (i = 0; i < 3; i++) { 1313 ret = intel_dp_aux_native_write_1(intel_dp, 1314 DP_SET_POWER, 1315 DP_SET_POWER_D0); 1316 if (ret == 1) 1317 break; 1318 msleep(1); 1319 } 1320 } 1321} 1322 1323static bool intel_dp_get_hw_state(struct intel_encoder *encoder, 1324 enum pipe *pipe) 1325{ 1326 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1327 struct drm_device *dev = encoder->base.dev; 1328 struct drm_i915_private *dev_priv = dev->dev_private; 1329 u32 tmp = I915_READ(intel_dp->output_reg); 1330 1331 if (!(tmp & DP_PORT_EN)) 1332 return false; 1333 1334 if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) { 1335 *pipe = PORT_TO_PIPE_CPT(tmp); 1336 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { 1337 *pipe = PORT_TO_PIPE(tmp); 1338 } else { 1339 u32 trans_sel; 1340 u32 trans_dp; 1341 int i; 1342 1343 switch (intel_dp->output_reg) { 1344 case PCH_DP_B: 1345 trans_sel = TRANS_DP_PORT_SEL_B; 1346 break; 1347 case PCH_DP_C: 1348 trans_sel = TRANS_DP_PORT_SEL_C; 1349 break; 1350 case PCH_DP_D: 1351 trans_sel = TRANS_DP_PORT_SEL_D; 1352 break; 1353 default: 1354 return true; 1355 } 1356 1357 for_each_pipe(i) { 1358 trans_dp = I915_READ(TRANS_DP_CTL(i)); 1359 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) { 1360 *pipe = i; 1361 return true; 1362 } 1363 } 1364 1365 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", 1366 intel_dp->output_reg); 1367 } 1368 1369 return true; 1370} 1371 1372static void intel_disable_dp(struct intel_encoder *encoder) 1373{ 1374 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1375 1376 /* Make sure the panel is off before trying to change the mode. But also 1377 * ensure that we have vdd while we switch off the panel. */ 1378 ironlake_edp_panel_vdd_on(intel_dp); 1379 ironlake_edp_backlight_off(intel_dp); 1380 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1381 ironlake_edp_panel_off(intel_dp); 1382 1383 /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */ 1384 if (!is_cpu_edp(intel_dp)) 1385 intel_dp_link_down(intel_dp); 1386} 1387 1388static void intel_post_disable_dp(struct intel_encoder *encoder) 1389{ 1390 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1391 1392 if (is_cpu_edp(intel_dp)) { 1393 intel_dp_link_down(intel_dp); 1394 ironlake_edp_pll_off(intel_dp); 1395 } 1396} 1397 1398static void intel_enable_dp(struct intel_encoder *encoder) 1399{ 1400 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1401 struct drm_device *dev = encoder->base.dev; 1402 struct drm_i915_private *dev_priv = dev->dev_private; 1403 uint32_t dp_reg = I915_READ(intel_dp->output_reg); 1404 1405 if (WARN_ON(dp_reg & DP_PORT_EN)) 1406 return; 1407 1408 ironlake_edp_panel_vdd_on(intel_dp); 1409 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1410 intel_dp_start_link_train(intel_dp); 1411 ironlake_edp_panel_on(intel_dp); 1412 ironlake_edp_panel_vdd_off(intel_dp, true); 1413 intel_dp_complete_link_train(intel_dp); 1414 ironlake_edp_backlight_on(intel_dp); 1415} 1416 1417static void intel_pre_enable_dp(struct intel_encoder *encoder) 1418{ 1419 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1420 1421 if (is_cpu_edp(intel_dp)) 1422 ironlake_edp_pll_on(intel_dp); 1423} 1424 1425/* 1426 * Native read with retry for link status and receiver capability reads for 1427 * cases where the sink may still be asleep. 1428 */ 1429static bool 1430intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address, 1431 uint8_t *recv, int recv_bytes) 1432{ 1433 int ret, i; 1434 1435 /* 1436 * Sinks are *supposed* to come up within 1ms from an off state, 1437 * but we're also supposed to retry 3 times per the spec. 1438 */ 1439 for (i = 0; i < 3; i++) { 1440 ret = intel_dp_aux_native_read(intel_dp, address, recv, 1441 recv_bytes); 1442 if (ret == recv_bytes) 1443 return true; 1444 msleep(1); 1445 } 1446 1447 return false; 1448} 1449 1450/* 1451 * Fetch AUX CH registers 0x202 - 0x207 which contain 1452 * link status information 1453 */ 1454static bool 1455intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 1456{ 1457 return intel_dp_aux_native_read_retry(intel_dp, 1458 DP_LANE0_1_STATUS, 1459 link_status, 1460 DP_LINK_STATUS_SIZE); 1461} 1462 1463#if 0 1464static char *voltage_names[] = { 1465 "0.4V", "0.6V", "0.8V", "1.2V" 1466}; 1467static char *pre_emph_names[] = { 1468 "0dB", "3.5dB", "6dB", "9.5dB" 1469}; 1470static char *link_train_names[] = { 1471 "pattern 1", "pattern 2", "idle", "off" 1472}; 1473#endif 1474 1475/* 1476 * These are source-specific values; current Intel hardware supports 1477 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB 1478 */ 1479 1480static uint8_t 1481intel_dp_voltage_max(struct intel_dp *intel_dp) 1482{ 1483 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1484 1485 if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) 1486 return DP_TRAIN_VOLTAGE_SWING_800; 1487 else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) 1488 return DP_TRAIN_VOLTAGE_SWING_1200; 1489 else 1490 return DP_TRAIN_VOLTAGE_SWING_800; 1491} 1492 1493static uint8_t 1494intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) 1495{ 1496 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1497 1498 if (HAS_DDI(dev)) { 1499 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1500 case DP_TRAIN_VOLTAGE_SWING_400: 1501 return DP_TRAIN_PRE_EMPHASIS_9_5; 1502 case DP_TRAIN_VOLTAGE_SWING_600: 1503 return DP_TRAIN_PRE_EMPHASIS_6; 1504 case DP_TRAIN_VOLTAGE_SWING_800: 1505 return DP_TRAIN_PRE_EMPHASIS_3_5; 1506 case DP_TRAIN_VOLTAGE_SWING_1200: 1507 default: 1508 return DP_TRAIN_PRE_EMPHASIS_0; 1509 } 1510 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { 1511 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1512 case DP_TRAIN_VOLTAGE_SWING_400: 1513 return DP_TRAIN_PRE_EMPHASIS_6; 1514 case DP_TRAIN_VOLTAGE_SWING_600: 1515 case DP_TRAIN_VOLTAGE_SWING_800: 1516 return DP_TRAIN_PRE_EMPHASIS_3_5; 1517 default: 1518 return DP_TRAIN_PRE_EMPHASIS_0; 1519 } 1520 } else { 1521 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1522 case DP_TRAIN_VOLTAGE_SWING_400: 1523 return DP_TRAIN_PRE_EMPHASIS_6; 1524 case DP_TRAIN_VOLTAGE_SWING_600: 1525 return DP_TRAIN_PRE_EMPHASIS_6; 1526 case DP_TRAIN_VOLTAGE_SWING_800: 1527 return DP_TRAIN_PRE_EMPHASIS_3_5; 1528 case DP_TRAIN_VOLTAGE_SWING_1200: 1529 default: 1530 return DP_TRAIN_PRE_EMPHASIS_0; 1531 } 1532 } 1533} 1534 1535static void 1536intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 1537{ 1538 uint8_t v = 0; 1539 uint8_t p = 0; 1540 int lane; 1541 uint8_t voltage_max; 1542 uint8_t preemph_max; 1543 1544 for (lane = 0; lane < intel_dp->lane_count; lane++) { 1545 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane); 1546 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); 1547 1548 if (this_v > v) 1549 v = this_v; 1550 if (this_p > p) 1551 p = this_p; 1552 } 1553 1554 voltage_max = intel_dp_voltage_max(intel_dp); 1555 if (v >= voltage_max) 1556 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; 1557 1558 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v); 1559 if (p >= preemph_max) 1560 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; 1561 1562 for (lane = 0; lane < 4; lane++) 1563 intel_dp->train_set[lane] = v | p; 1564} 1565 1566static uint32_t 1567intel_gen4_signal_levels(uint8_t train_set) 1568{ 1569 uint32_t signal_levels = 0; 1570 1571 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 1572 case DP_TRAIN_VOLTAGE_SWING_400: 1573 default: 1574 signal_levels |= DP_VOLTAGE_0_4; 1575 break; 1576 case DP_TRAIN_VOLTAGE_SWING_600: 1577 signal_levels |= DP_VOLTAGE_0_6; 1578 break; 1579 case DP_TRAIN_VOLTAGE_SWING_800: 1580 signal_levels |= DP_VOLTAGE_0_8; 1581 break; 1582 case DP_TRAIN_VOLTAGE_SWING_1200: 1583 signal_levels |= DP_VOLTAGE_1_2; 1584 break; 1585 } 1586 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 1587 case DP_TRAIN_PRE_EMPHASIS_0: 1588 default: 1589 signal_levels |= DP_PRE_EMPHASIS_0; 1590 break; 1591 case DP_TRAIN_PRE_EMPHASIS_3_5: 1592 signal_levels |= DP_PRE_EMPHASIS_3_5; 1593 break; 1594 case DP_TRAIN_PRE_EMPHASIS_6: 1595 signal_levels |= DP_PRE_EMPHASIS_6; 1596 break; 1597 case DP_TRAIN_PRE_EMPHASIS_9_5: 1598 signal_levels |= DP_PRE_EMPHASIS_9_5; 1599 break; 1600 } 1601 return signal_levels; 1602} 1603 1604/* Gen6's DP voltage swing and pre-emphasis control */ 1605static uint32_t 1606intel_gen6_edp_signal_levels(uint8_t train_set) 1607{ 1608 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 1609 DP_TRAIN_PRE_EMPHASIS_MASK); 1610 switch (signal_levels) { 1611 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 1612 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 1613 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 1614 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 1615 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B; 1616 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 1617 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: 1618 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B; 1619 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 1620 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 1621 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B; 1622 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 1623 case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0: 1624 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B; 1625 default: 1626 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 1627 "0x%x\n", signal_levels); 1628 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 1629 } 1630} 1631 1632/* Gen7's DP voltage swing and pre-emphasis control */ 1633static uint32_t 1634intel_gen7_edp_signal_levels(uint8_t train_set) 1635{ 1636 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 1637 DP_TRAIN_PRE_EMPHASIS_MASK); 1638 switch (signal_levels) { 1639 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 1640 return EDP_LINK_TRAIN_400MV_0DB_IVB; 1641 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 1642 return EDP_LINK_TRAIN_400MV_3_5DB_IVB; 1643 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 1644 return EDP_LINK_TRAIN_400MV_6DB_IVB; 1645 1646 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 1647 return EDP_LINK_TRAIN_600MV_0DB_IVB; 1648 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 1649 return EDP_LINK_TRAIN_600MV_3_5DB_IVB; 1650 1651 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 1652 return EDP_LINK_TRAIN_800MV_0DB_IVB; 1653 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 1654 return EDP_LINK_TRAIN_800MV_3_5DB_IVB; 1655 1656 default: 1657 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 1658 "0x%x\n", signal_levels); 1659 return EDP_LINK_TRAIN_500MV_0DB_IVB; 1660 } 1661} 1662 1663/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */ 1664static uint32_t 1665intel_hsw_signal_levels(uint8_t train_set) 1666{ 1667 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 1668 DP_TRAIN_PRE_EMPHASIS_MASK); 1669 switch (signal_levels) { 1670 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 1671 return DDI_BUF_EMP_400MV_0DB_HSW; 1672 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 1673 return DDI_BUF_EMP_400MV_3_5DB_HSW; 1674 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 1675 return DDI_BUF_EMP_400MV_6DB_HSW; 1676 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5: 1677 return DDI_BUF_EMP_400MV_9_5DB_HSW; 1678 1679 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 1680 return DDI_BUF_EMP_600MV_0DB_HSW; 1681 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 1682 return DDI_BUF_EMP_600MV_3_5DB_HSW; 1683 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: 1684 return DDI_BUF_EMP_600MV_6DB_HSW; 1685 1686 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 1687 return DDI_BUF_EMP_800MV_0DB_HSW; 1688 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 1689 return DDI_BUF_EMP_800MV_3_5DB_HSW; 1690 default: 1691 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 1692 "0x%x\n", signal_levels); 1693 return DDI_BUF_EMP_400MV_0DB_HSW; 1694 } 1695} 1696 1697/* Properly updates "DP" with the correct signal levels. */ 1698static void 1699intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP) 1700{ 1701 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1702 struct drm_device *dev = intel_dig_port->base.base.dev; 1703 uint32_t signal_levels, mask; 1704 uint8_t train_set = intel_dp->train_set[0]; 1705 1706 if (HAS_DDI(dev)) { 1707 signal_levels = intel_hsw_signal_levels(train_set); 1708 mask = DDI_BUF_EMP_MASK; 1709 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { 1710 signal_levels = intel_gen7_edp_signal_levels(train_set); 1711 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB; 1712 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { 1713 signal_levels = intel_gen6_edp_signal_levels(train_set); 1714 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB; 1715 } else { 1716 signal_levels = intel_gen4_signal_levels(train_set); 1717 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK; 1718 } 1719 1720 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels); 1721 1722 *DP = (*DP & ~mask) | signal_levels; 1723} 1724 1725static bool 1726intel_dp_set_link_train(struct intel_dp *intel_dp, 1727 uint32_t dp_reg_value, 1728 uint8_t dp_train_pat) 1729{ 1730 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1731 struct drm_device *dev = intel_dig_port->base.base.dev; 1732 struct drm_i915_private *dev_priv = dev->dev_private; 1733 enum port port = intel_dig_port->port; 1734 int ret; 1735 uint32_t temp; 1736 1737 if (HAS_DDI(dev)) { 1738 temp = I915_READ(DP_TP_CTL(port)); 1739 1740 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) 1741 temp |= DP_TP_CTL_SCRAMBLE_DISABLE; 1742 else 1743 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE; 1744 1745 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; 1746 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 1747 case DP_TRAINING_PATTERN_DISABLE: 1748 1749 if (port != PORT_A) { 1750 temp |= DP_TP_CTL_LINK_TRAIN_IDLE; 1751 I915_WRITE(DP_TP_CTL(port), temp); 1752 1753 if (wait_for((I915_READ(DP_TP_STATUS(port)) & 1754 DP_TP_STATUS_IDLE_DONE), 1)) 1755 DRM_ERROR("Timed out waiting for DP idle patterns\n"); 1756 1757 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; 1758 } 1759 1760 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; 1761 1762 break; 1763 case DP_TRAINING_PATTERN_1: 1764 temp |= DP_TP_CTL_LINK_TRAIN_PAT1; 1765 break; 1766 case DP_TRAINING_PATTERN_2: 1767 temp |= DP_TP_CTL_LINK_TRAIN_PAT2; 1768 break; 1769 case DP_TRAINING_PATTERN_3: 1770 temp |= DP_TP_CTL_LINK_TRAIN_PAT3; 1771 break; 1772 } 1773 I915_WRITE(DP_TP_CTL(port), temp); 1774 1775 } else if (HAS_PCH_CPT(dev) && 1776 (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) { 1777 dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT; 1778 1779 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 1780 case DP_TRAINING_PATTERN_DISABLE: 1781 dp_reg_value |= DP_LINK_TRAIN_OFF_CPT; 1782 break; 1783 case DP_TRAINING_PATTERN_1: 1784 dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT; 1785 break; 1786 case DP_TRAINING_PATTERN_2: 1787 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT; 1788 break; 1789 case DP_TRAINING_PATTERN_3: 1790 DRM_ERROR("DP training pattern 3 not supported\n"); 1791 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT; 1792 break; 1793 } 1794 1795 } else { 1796 dp_reg_value &= ~DP_LINK_TRAIN_MASK; 1797 1798 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 1799 case DP_TRAINING_PATTERN_DISABLE: 1800 dp_reg_value |= DP_LINK_TRAIN_OFF; 1801 break; 1802 case DP_TRAINING_PATTERN_1: 1803 dp_reg_value |= DP_LINK_TRAIN_PAT_1; 1804 break; 1805 case DP_TRAINING_PATTERN_2: 1806 dp_reg_value |= DP_LINK_TRAIN_PAT_2; 1807 break; 1808 case DP_TRAINING_PATTERN_3: 1809 DRM_ERROR("DP training pattern 3 not supported\n"); 1810 dp_reg_value |= DP_LINK_TRAIN_PAT_2; 1811 break; 1812 } 1813 } 1814 1815 I915_WRITE(intel_dp->output_reg, dp_reg_value); 1816 POSTING_READ(intel_dp->output_reg); 1817 1818 intel_dp_aux_native_write_1(intel_dp, 1819 DP_TRAINING_PATTERN_SET, 1820 dp_train_pat); 1821 1822 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) != 1823 DP_TRAINING_PATTERN_DISABLE) { 1824 ret = intel_dp_aux_native_write(intel_dp, 1825 DP_TRAINING_LANE0_SET, 1826 intel_dp->train_set, 1827 intel_dp->lane_count); 1828 if (ret != intel_dp->lane_count) 1829 return false; 1830 } 1831 1832 return true; 1833} 1834 1835/* Enable corresponding port and start training pattern 1 */ 1836void 1837intel_dp_start_link_train(struct intel_dp *intel_dp) 1838{ 1839 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base; 1840 struct drm_device *dev = encoder->dev; 1841 int i; 1842 uint8_t voltage; 1843 bool clock_recovery = false; 1844 int voltage_tries, loop_tries; 1845 uint32_t DP = intel_dp->DP; 1846 1847 if (HAS_DDI(dev)) 1848 intel_ddi_prepare_link_retrain(encoder); 1849 1850 /* Write the link configuration data */ 1851 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, 1852 intel_dp->link_configuration, 1853 DP_LINK_CONFIGURATION_SIZE); 1854 1855 DP |= DP_PORT_EN; 1856 1857 memset(intel_dp->train_set, 0, 4); 1858 voltage = 0xff; 1859 voltage_tries = 0; 1860 loop_tries = 0; 1861 clock_recovery = false; 1862 for (;;) { 1863 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 1864 uint8_t link_status[DP_LINK_STATUS_SIZE]; 1865 1866 intel_dp_set_signal_levels(intel_dp, &DP); 1867 1868 /* Set training pattern 1 */ 1869 if (!intel_dp_set_link_train(intel_dp, DP, 1870 DP_TRAINING_PATTERN_1 | 1871 DP_LINK_SCRAMBLING_DISABLE)) 1872 break; 1873 1874 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd); 1875 if (!intel_dp_get_link_status(intel_dp, link_status)) { 1876 DRM_ERROR("failed to get link status\n"); 1877 break; 1878 } 1879 1880 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { 1881 DRM_DEBUG_KMS("clock recovery OK\n"); 1882 clock_recovery = true; 1883 break; 1884 } 1885 1886 /* Check to see if we've tried the max voltage */ 1887 for (i = 0; i < intel_dp->lane_count; i++) 1888 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) 1889 break; 1890 if (i == intel_dp->lane_count && voltage_tries == 5) { 1891 ++loop_tries; 1892 if (loop_tries == 5) { 1893 DRM_DEBUG_KMS("too many full retries, give up\n"); 1894 break; 1895 } 1896 memset(intel_dp->train_set, 0, 4); 1897 voltage_tries = 0; 1898 continue; 1899 } 1900 1901 /* Check to see if we've tried the same voltage 5 times */ 1902 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { 1903 ++voltage_tries; 1904 if (voltage_tries == 5) { 1905 DRM_DEBUG_KMS("too many voltage retries, give up\n"); 1906 break; 1907 } 1908 } else 1909 voltage_tries = 0; 1910 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; 1911 1912 /* Compute new intel_dp->train_set as requested by target */ 1913 intel_get_adjust_train(intel_dp, link_status); 1914 } 1915 1916 intel_dp->DP = DP; 1917} 1918 1919void 1920intel_dp_complete_link_train(struct intel_dp *intel_dp) 1921{ 1922 bool channel_eq = false; 1923 int tries, cr_tries; 1924 uint32_t DP = intel_dp->DP; 1925 1926 /* channel equalization */ 1927 tries = 0; 1928 cr_tries = 0; 1929 channel_eq = false; 1930 for (;;) { 1931 uint8_t link_status[DP_LINK_STATUS_SIZE]; 1932 1933 if (cr_tries > 5) { 1934 DRM_ERROR("failed to train DP, aborting\n"); 1935 intel_dp_link_down(intel_dp); 1936 break; 1937 } 1938 1939 intel_dp_set_signal_levels(intel_dp, &DP); 1940 1941 /* channel eq pattern */ 1942 if (!intel_dp_set_link_train(intel_dp, DP, 1943 DP_TRAINING_PATTERN_2 | 1944 DP_LINK_SCRAMBLING_DISABLE)) 1945 break; 1946 1947 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd); 1948 if (!intel_dp_get_link_status(intel_dp, link_status)) 1949 break; 1950 1951 /* Make sure clock is still ok */ 1952 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { 1953 intel_dp_start_link_train(intel_dp); 1954 cr_tries++; 1955 continue; 1956 } 1957 1958 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { 1959 channel_eq = true; 1960 break; 1961 } 1962 1963 /* Try 5 times, then try clock recovery if that fails */ 1964 if (tries > 5) { 1965 intel_dp_link_down(intel_dp); 1966 intel_dp_start_link_train(intel_dp); 1967 tries = 0; 1968 cr_tries++; 1969 continue; 1970 } 1971 1972 /* Compute new intel_dp->train_set as requested by target */ 1973 intel_get_adjust_train(intel_dp, link_status); 1974 ++tries; 1975 } 1976 1977 if (channel_eq) 1978 DRM_DEBUG_KMS("Channel EQ done. DP Training successfull\n"); 1979 1980 intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE); 1981} 1982 1983static void 1984intel_dp_link_down(struct intel_dp *intel_dp) 1985{ 1986 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1987 struct drm_device *dev = intel_dig_port->base.base.dev; 1988 struct drm_i915_private *dev_priv = dev->dev_private; 1989 struct intel_crtc *intel_crtc = 1990 to_intel_crtc(intel_dig_port->base.base.crtc); 1991 uint32_t DP = intel_dp->DP; 1992 1993 /* 1994 * DDI code has a strict mode set sequence and we should try to respect 1995 * it, otherwise we might hang the machine in many different ways. So we 1996 * really should be disabling the port only on a complete crtc_disable 1997 * sequence. This function is just called under two conditions on DDI 1998 * code: 1999 * - Link train failed while doing crtc_enable, and on this case we 2000 * really should respect the mode set sequence and wait for a 2001 * crtc_disable. 2002 * - Someone turned the monitor off and intel_dp_check_link_status 2003 * called us. We don't need to disable the whole port on this case, so 2004 * when someone turns the monitor on again, 2005 * intel_ddi_prepare_link_retrain will take care of redoing the link 2006 * train. 2007 */ 2008 if (HAS_DDI(dev)) 2009 return; 2010 2011 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)) 2012 return; 2013 2014 DRM_DEBUG_KMS("\n"); 2015 2016 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) { 2017 DP &= ~DP_LINK_TRAIN_MASK_CPT; 2018 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); 2019 } else { 2020 DP &= ~DP_LINK_TRAIN_MASK; 2021 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); 2022 } 2023 POSTING_READ(intel_dp->output_reg); 2024 2025 /* We don't really know why we're doing this */ 2026 intel_wait_for_vblank(dev, intel_crtc->pipe); 2027 2028 if (HAS_PCH_IBX(dev) && 2029 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { 2030 struct drm_crtc *crtc = intel_dig_port->base.base.crtc; 2031 2032 /* Hardware workaround: leaving our transcoder select 2033 * set to transcoder B while it's off will prevent the 2034 * corresponding HDMI output on transcoder A. 2035 * 2036 * Combine this with another hardware workaround: 2037 * transcoder select bit can only be cleared while the 2038 * port is enabled. 2039 */ 2040 DP &= ~DP_PIPEB_SELECT; 2041 I915_WRITE(intel_dp->output_reg, DP); 2042 2043 /* Changes to enable or select take place the vblank 2044 * after being written. 2045 */ 2046 if (WARN_ON(crtc == NULL)) { 2047 /* We should never try to disable a port without a crtc 2048 * attached. For paranoia keep the code around for a 2049 * bit. */ 2050 POSTING_READ(intel_dp->output_reg); 2051 msleep(50); 2052 } else 2053 intel_wait_for_vblank(dev, intel_crtc->pipe); 2054 } 2055 2056 DP &= ~DP_AUDIO_OUTPUT_ENABLE; 2057 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); 2058 POSTING_READ(intel_dp->output_reg); 2059 msleep(intel_dp->panel_power_down_delay); 2060} 2061 2062static bool 2063intel_dp_get_dpcd(struct intel_dp *intel_dp) 2064{ 2065 char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3]; 2066 2067 if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd, 2068 sizeof(intel_dp->dpcd)) == 0) 2069 return false; /* aux transfer failed */ 2070 2071 hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd), 2072 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false); 2073 DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump); 2074 2075 if (intel_dp->dpcd[DP_DPCD_REV] == 0) 2076 return false; /* DPCD not present */ 2077 2078 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 2079 DP_DWN_STRM_PORT_PRESENT)) 2080 return true; /* native DP sink */ 2081 2082 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10) 2083 return true; /* no per-port downstream info */ 2084 2085 if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0, 2086 intel_dp->downstream_ports, 2087 DP_MAX_DOWNSTREAM_PORTS) == 0) 2088 return false; /* downstream port status fetch failed */ 2089 2090 return true; 2091} 2092 2093static void 2094intel_dp_probe_oui(struct intel_dp *intel_dp) 2095{ 2096 u8 buf[3]; 2097 2098 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) 2099 return; 2100 2101 ironlake_edp_panel_vdd_on(intel_dp); 2102 2103 if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3)) 2104 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", 2105 buf[0], buf[1], buf[2]); 2106 2107 if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3)) 2108 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", 2109 buf[0], buf[1], buf[2]); 2110 2111 ironlake_edp_panel_vdd_off(intel_dp, false); 2112} 2113 2114static bool 2115intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector) 2116{ 2117 int ret; 2118 2119 ret = intel_dp_aux_native_read_retry(intel_dp, 2120 DP_DEVICE_SERVICE_IRQ_VECTOR, 2121 sink_irq_vector, 1); 2122 if (!ret) 2123 return false; 2124 2125 return true; 2126} 2127 2128static void 2129intel_dp_handle_test_request(struct intel_dp *intel_dp) 2130{ 2131 /* NAK by default */ 2132 intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK); 2133} 2134 2135/* 2136 * According to DP spec 2137 * 5.1.2: 2138 * 1. Read DPCD 2139 * 2. Configure link according to Receiver Capabilities 2140 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 2141 * 4. Check link status on receipt of hot-plug interrupt 2142 */ 2143 2144void 2145intel_dp_check_link_status(struct intel_dp *intel_dp) 2146{ 2147 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; 2148 u8 sink_irq_vector; 2149 u8 link_status[DP_LINK_STATUS_SIZE]; 2150 2151 if (!intel_encoder->connectors_active) 2152 return; 2153 2154 if (WARN_ON(!intel_encoder->base.crtc)) 2155 return; 2156 2157 /* Try to read receiver status if the link appears to be up */ 2158 if (!intel_dp_get_link_status(intel_dp, link_status)) { 2159 intel_dp_link_down(intel_dp); 2160 return; 2161 } 2162 2163 /* Now read the DPCD to see if it's actually running */ 2164 if (!intel_dp_get_dpcd(intel_dp)) { 2165 intel_dp_link_down(intel_dp); 2166 return; 2167 } 2168 2169 /* Try to read the source of the interrupt */ 2170 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 2171 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) { 2172 /* Clear interrupt source */ 2173 intel_dp_aux_native_write_1(intel_dp, 2174 DP_DEVICE_SERVICE_IRQ_VECTOR, 2175 sink_irq_vector); 2176 2177 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST) 2178 intel_dp_handle_test_request(intel_dp); 2179 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ)) 2180 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); 2181 } 2182 2183 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { 2184 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", 2185 drm_get_encoder_name(&intel_encoder->base)); 2186 intel_dp_start_link_train(intel_dp); 2187 intel_dp_complete_link_train(intel_dp); 2188 } 2189} 2190 2191/* XXX this is probably wrong for multiple downstream ports */ 2192static enum drm_connector_status 2193intel_dp_detect_dpcd(struct intel_dp *intel_dp) 2194{ 2195 uint8_t *dpcd = intel_dp->dpcd; 2196 bool hpd; 2197 uint8_t type; 2198 2199 if (!intel_dp_get_dpcd(intel_dp)) 2200 return connector_status_disconnected; 2201 2202 /* if there's no downstream port, we're done */ 2203 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT)) 2204 return connector_status_connected; 2205 2206 /* If we're HPD-aware, SINK_COUNT changes dynamically */ 2207 hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD); 2208 if (hpd) { 2209 uint8_t reg; 2210 if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT, 2211 ®, 1)) 2212 return connector_status_unknown; 2213 return DP_GET_SINK_COUNT(reg) ? connector_status_connected 2214 : connector_status_disconnected; 2215 } 2216 2217 /* If no HPD, poke DDC gently */ 2218 if (drm_probe_ddc(&intel_dp->adapter)) 2219 return connector_status_connected; 2220 2221 /* Well we tried, say unknown for unreliable port types */ 2222 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 2223 if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID) 2224 return connector_status_unknown; 2225 2226 /* Anything else is out of spec, warn and ignore */ 2227 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n"); 2228 return connector_status_disconnected; 2229} 2230 2231static enum drm_connector_status 2232ironlake_dp_detect(struct intel_dp *intel_dp) 2233{ 2234 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2235 struct drm_i915_private *dev_priv = dev->dev_private; 2236 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2237 enum drm_connector_status status; 2238 2239 /* Can't disconnect eDP, but you can close the lid... */ 2240 if (is_edp(intel_dp)) { 2241 status = intel_panel_detect(dev); 2242 if (status == connector_status_unknown) 2243 status = connector_status_connected; 2244 return status; 2245 } 2246 2247 if (!ibx_digital_port_connected(dev_priv, intel_dig_port)) 2248 return connector_status_disconnected; 2249 2250 return intel_dp_detect_dpcd(intel_dp); 2251} 2252 2253static enum drm_connector_status 2254g4x_dp_detect(struct intel_dp *intel_dp) 2255{ 2256 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2257 struct drm_i915_private *dev_priv = dev->dev_private; 2258 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2259 uint32_t bit; 2260 2261 /* Can't disconnect eDP, but you can close the lid... */ 2262 if (is_edp(intel_dp)) { 2263 enum drm_connector_status status; 2264 2265 status = intel_panel_detect(dev); 2266 if (status == connector_status_unknown) 2267 status = connector_status_connected; 2268 return status; 2269 } 2270 2271 switch (intel_dig_port->port) { 2272 case PORT_B: 2273 bit = PORTB_HOTPLUG_LIVE_STATUS; 2274 break; 2275 case PORT_C: 2276 bit = PORTC_HOTPLUG_LIVE_STATUS; 2277 break; 2278 case PORT_D: 2279 bit = PORTD_HOTPLUG_LIVE_STATUS; 2280 break; 2281 default: 2282 return connector_status_unknown; 2283 } 2284 2285 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0) 2286 return connector_status_disconnected; 2287 2288 return intel_dp_detect_dpcd(intel_dp); 2289} 2290 2291static struct edid * 2292intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) 2293{ 2294 struct intel_connector *intel_connector = to_intel_connector(connector); 2295 2296 /* use cached edid if we have one */ 2297 if (intel_connector->edid) { 2298 struct edid *edid; 2299 int size; 2300 2301 /* invalid edid */ 2302 if (IS_ERR(intel_connector->edid)) 2303 return NULL; 2304 2305 size = (intel_connector->edid->extensions + 1) * EDID_LENGTH; 2306 edid = kmalloc(size, GFP_KERNEL); 2307 if (!edid) 2308 return NULL; 2309 2310 memcpy(edid, intel_connector->edid, size); 2311 return edid; 2312 } 2313 2314 return drm_get_edid(connector, adapter); 2315} 2316 2317static int 2318intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter) 2319{ 2320 struct intel_connector *intel_connector = to_intel_connector(connector); 2321 2322 /* use cached edid if we have one */ 2323 if (intel_connector->edid) { 2324 /* invalid edid */ 2325 if (IS_ERR(intel_connector->edid)) 2326 return 0; 2327 2328 return intel_connector_update_modes(connector, 2329 intel_connector->edid); 2330 } 2331 2332 return intel_ddc_get_modes(connector, adapter); 2333} 2334 2335static enum drm_connector_status 2336intel_dp_detect(struct drm_connector *connector, bool force) 2337{ 2338 struct intel_dp *intel_dp = intel_attached_dp(connector); 2339 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2340 struct intel_encoder *intel_encoder = &intel_dig_port->base; 2341 struct drm_device *dev = connector->dev; 2342 enum drm_connector_status status; 2343 struct edid *edid = NULL; 2344 2345 intel_dp->has_audio = false; 2346 2347 if (HAS_PCH_SPLIT(dev)) 2348 status = ironlake_dp_detect(intel_dp); 2349 else 2350 status = g4x_dp_detect(intel_dp); 2351 2352 if (status != connector_status_connected) 2353 return status; 2354 2355 intel_dp_probe_oui(intel_dp); 2356 2357 if (intel_dp->force_audio != HDMI_AUDIO_AUTO) { 2358 intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON); 2359 } else { 2360 edid = intel_dp_get_edid(connector, &intel_dp->adapter); 2361 if (edid) { 2362 intel_dp->has_audio = drm_detect_monitor_audio(edid); 2363 kfree(edid); 2364 } 2365 } 2366 2367 if (intel_encoder->type != INTEL_OUTPUT_EDP) 2368 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; 2369 return connector_status_connected; 2370} 2371 2372static int intel_dp_get_modes(struct drm_connector *connector) 2373{ 2374 struct intel_dp *intel_dp = intel_attached_dp(connector); 2375 struct intel_connector *intel_connector = to_intel_connector(connector); 2376 struct drm_device *dev = connector->dev; 2377 int ret; 2378 2379 /* We should parse the EDID data and find out if it has an audio sink 2380 */ 2381 2382 ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter); 2383 if (ret) 2384 return ret; 2385 2386 /* if eDP has no EDID, fall back to fixed mode */ 2387 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 2388 struct drm_display_mode *mode; 2389 mode = drm_mode_duplicate(dev, 2390 intel_connector->panel.fixed_mode); 2391 if (mode) { 2392 drm_mode_probed_add(connector, mode); 2393 return 1; 2394 } 2395 } 2396 return 0; 2397} 2398 2399static bool 2400intel_dp_detect_audio(struct drm_connector *connector) 2401{ 2402 struct intel_dp *intel_dp = intel_attached_dp(connector); 2403 struct edid *edid; 2404 bool has_audio = false; 2405 2406 edid = intel_dp_get_edid(connector, &intel_dp->adapter); 2407 if (edid) { 2408 has_audio = drm_detect_monitor_audio(edid); 2409 kfree(edid); 2410 } 2411 2412 return has_audio; 2413} 2414 2415static int 2416intel_dp_set_property(struct drm_connector *connector, 2417 struct drm_property *property, 2418 uint64_t val) 2419{ 2420 struct drm_i915_private *dev_priv = connector->dev->dev_private; 2421 struct intel_connector *intel_connector = to_intel_connector(connector); 2422 struct intel_encoder *intel_encoder = intel_attached_encoder(connector); 2423 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 2424 int ret; 2425 2426 ret = drm_object_property_set_value(&connector->base, property, val); 2427 if (ret) 2428 return ret; 2429 2430 if (property == dev_priv->force_audio_property) { 2431 int i = val; 2432 bool has_audio; 2433 2434 if (i == intel_dp->force_audio) 2435 return 0; 2436 2437 intel_dp->force_audio = i; 2438 2439 if (i == HDMI_AUDIO_AUTO) 2440 has_audio = intel_dp_detect_audio(connector); 2441 else 2442 has_audio = (i == HDMI_AUDIO_ON); 2443 2444 if (has_audio == intel_dp->has_audio) 2445 return 0; 2446 2447 intel_dp->has_audio = has_audio; 2448 goto done; 2449 } 2450 2451 if (property == dev_priv->broadcast_rgb_property) { 2452 switch (val) { 2453 case INTEL_BROADCAST_RGB_AUTO: 2454 intel_dp->color_range_auto = true; 2455 break; 2456 case INTEL_BROADCAST_RGB_FULL: 2457 intel_dp->color_range_auto = false; 2458 intel_dp->color_range = 0; 2459 break; 2460 case INTEL_BROADCAST_RGB_LIMITED: 2461 intel_dp->color_range_auto = false; 2462 intel_dp->color_range = DP_COLOR_RANGE_16_235; 2463 break; 2464 default: 2465 return -EINVAL; 2466 } 2467 goto done; 2468 } 2469 2470 if (is_edp(intel_dp) && 2471 property == connector->dev->mode_config.scaling_mode_property) { 2472 if (val == DRM_MODE_SCALE_NONE) { 2473 DRM_DEBUG_KMS("no scaling not supported\n"); 2474 return -EINVAL; 2475 } 2476 2477 if (intel_connector->panel.fitting_mode == val) { 2478 /* the eDP scaling property is not changed */ 2479 return 0; 2480 } 2481 intel_connector->panel.fitting_mode = val; 2482 2483 goto done; 2484 } 2485 2486 return -EINVAL; 2487 2488done: 2489 if (intel_encoder->base.crtc) 2490 intel_crtc_restore_mode(intel_encoder->base.crtc); 2491 2492 return 0; 2493} 2494 2495static void 2496intel_dp_destroy(struct drm_connector *connector) 2497{ 2498 struct drm_device *dev = connector->dev; 2499 struct intel_dp *intel_dp = intel_attached_dp(connector); 2500 struct intel_connector *intel_connector = to_intel_connector(connector); 2501 2502 if (!IS_ERR_OR_NULL(intel_connector->edid)) 2503 kfree(intel_connector->edid); 2504 2505 if (is_edp(intel_dp)) { 2506 intel_panel_destroy_backlight(dev); 2507 intel_panel_fini(&intel_connector->panel); 2508 } 2509 2510 drm_sysfs_connector_remove(connector); 2511 drm_connector_cleanup(connector); 2512 kfree(connector); 2513} 2514 2515void intel_dp_encoder_destroy(struct drm_encoder *encoder) 2516{ 2517 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 2518 struct intel_dp *intel_dp = &intel_dig_port->dp; 2519 2520 i2c_del_adapter(&intel_dp->adapter); 2521 drm_encoder_cleanup(encoder); 2522 if (is_edp(intel_dp)) { 2523 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 2524 ironlake_panel_vdd_off_sync(intel_dp); 2525 } 2526 kfree(intel_dig_port); 2527} 2528 2529static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { 2530 .mode_fixup = intel_dp_mode_fixup, 2531 .mode_set = intel_dp_mode_set, 2532}; 2533 2534static const struct drm_connector_funcs intel_dp_connector_funcs = { 2535 .dpms = intel_connector_dpms, 2536 .detect = intel_dp_detect, 2537 .fill_modes = drm_helper_probe_single_connector_modes, 2538 .set_property = intel_dp_set_property, 2539 .destroy = intel_dp_destroy, 2540}; 2541 2542static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 2543 .get_modes = intel_dp_get_modes, 2544 .mode_valid = intel_dp_mode_valid, 2545 .best_encoder = intel_best_encoder, 2546}; 2547 2548static const struct drm_encoder_funcs intel_dp_enc_funcs = { 2549 .destroy = intel_dp_encoder_destroy, 2550}; 2551 2552static void 2553intel_dp_hot_plug(struct intel_encoder *intel_encoder) 2554{ 2555 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 2556 2557 intel_dp_check_link_status(intel_dp); 2558} 2559 2560/* Return which DP Port should be selected for Transcoder DP control */ 2561int 2562intel_trans_dp_port_sel(struct drm_crtc *crtc) 2563{ 2564 struct drm_device *dev = crtc->dev; 2565 struct intel_encoder *intel_encoder; 2566 struct intel_dp *intel_dp; 2567 2568 for_each_encoder_on_crtc(dev, crtc, intel_encoder) { 2569 intel_dp = enc_to_intel_dp(&intel_encoder->base); 2570 2571 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || 2572 intel_encoder->type == INTEL_OUTPUT_EDP) 2573 return intel_dp->output_reg; 2574 } 2575 2576 return -1; 2577} 2578 2579/* check the VBT to see whether the eDP is on DP-D port */ 2580bool intel_dpd_is_edp(struct drm_device *dev) 2581{ 2582 struct drm_i915_private *dev_priv = dev->dev_private; 2583 struct child_device_config *p_child; 2584 int i; 2585 2586 if (!dev_priv->child_dev_num) 2587 return false; 2588 2589 for (i = 0; i < dev_priv->child_dev_num; i++) { 2590 p_child = dev_priv->child_dev + i; 2591 2592 if (p_child->dvo_port == PORT_IDPD && 2593 p_child->device_type == DEVICE_TYPE_eDP) 2594 return true; 2595 } 2596 return false; 2597} 2598 2599static void 2600intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) 2601{ 2602 struct intel_connector *intel_connector = to_intel_connector(connector); 2603 2604 intel_attach_force_audio_property(connector); 2605 intel_attach_broadcast_rgb_property(connector); 2606 intel_dp->color_range_auto = true; 2607 2608 if (is_edp(intel_dp)) { 2609 drm_mode_create_scaling_mode_property(connector->dev); 2610 drm_object_attach_property( 2611 &connector->base, 2612 connector->dev->mode_config.scaling_mode_property, 2613 DRM_MODE_SCALE_ASPECT); 2614 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT; 2615 } 2616} 2617 2618static void 2619intel_dp_init_panel_power_sequencer(struct drm_device *dev, 2620 struct intel_dp *intel_dp, 2621 struct edp_power_seq *out) 2622{ 2623 struct drm_i915_private *dev_priv = dev->dev_private; 2624 struct edp_power_seq cur, vbt, spec, final; 2625 u32 pp_on, pp_off, pp_div, pp; 2626 2627 /* Workaround: Need to write PP_CONTROL with the unlock key as 2628 * the very first thing. */ 2629 pp = ironlake_get_pp_control(dev_priv); 2630 I915_WRITE(PCH_PP_CONTROL, pp); 2631 2632 pp_on = I915_READ(PCH_PP_ON_DELAYS); 2633 pp_off = I915_READ(PCH_PP_OFF_DELAYS); 2634 pp_div = I915_READ(PCH_PP_DIVISOR); 2635 2636 /* Pull timing values out of registers */ 2637 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> 2638 PANEL_POWER_UP_DELAY_SHIFT; 2639 2640 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> 2641 PANEL_LIGHT_ON_DELAY_SHIFT; 2642 2643 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> 2644 PANEL_LIGHT_OFF_DELAY_SHIFT; 2645 2646 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> 2647 PANEL_POWER_DOWN_DELAY_SHIFT; 2648 2649 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> 2650 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000; 2651 2652 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 2653 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12); 2654 2655 vbt = dev_priv->edp.pps; 2656 2657 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of 2658 * our hw here, which are all in 100usec. */ 2659 spec.t1_t3 = 210 * 10; 2660 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */ 2661 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */ 2662 spec.t10 = 500 * 10; 2663 /* This one is special and actually in units of 100ms, but zero 2664 * based in the hw (so we need to add 100 ms). But the sw vbt 2665 * table multiplies it with 1000 to make it in units of 100usec, 2666 * too. */ 2667 spec.t11_t12 = (510 + 100) * 10; 2668 2669 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 2670 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12); 2671 2672 /* Use the max of the register settings and vbt. If both are 2673 * unset, fall back to the spec limits. */ 2674#define assign_final(field) final.field = (max(cur.field, vbt.field) == 0 ? \ 2675 spec.field : \ 2676 max(cur.field, vbt.field)) 2677 assign_final(t1_t3); 2678 assign_final(t8); 2679 assign_final(t9); 2680 assign_final(t10); 2681 assign_final(t11_t12); 2682#undef assign_final 2683 2684#define get_delay(field) (DIV_ROUND_UP(final.field, 10)) 2685 intel_dp->panel_power_up_delay = get_delay(t1_t3); 2686 intel_dp->backlight_on_delay = get_delay(t8); 2687 intel_dp->backlight_off_delay = get_delay(t9); 2688 intel_dp->panel_power_down_delay = get_delay(t10); 2689 intel_dp->panel_power_cycle_delay = get_delay(t11_t12); 2690#undef get_delay 2691 2692 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n", 2693 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay, 2694 intel_dp->panel_power_cycle_delay); 2695 2696 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", 2697 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); 2698 2699 if (out) 2700 *out = final; 2701} 2702 2703static void 2704intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, 2705 struct intel_dp *intel_dp, 2706 struct edp_power_seq *seq) 2707{ 2708 struct drm_i915_private *dev_priv = dev->dev_private; 2709 u32 pp_on, pp_off, pp_div; 2710 2711 /* And finally store the new values in the power sequencer. */ 2712 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | 2713 (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT); 2714 pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) | 2715 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT); 2716 /* Compute the divisor for the pp clock, simply match the Bspec 2717 * formula. */ 2718 pp_div = ((100 * intel_pch_rawclk(dev))/2 - 1) 2719 << PP_REFERENCE_DIVIDER_SHIFT; 2720 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000) 2721 << PANEL_POWER_CYCLE_DELAY_SHIFT); 2722 2723 /* Haswell doesn't have any port selection bits for the panel 2724 * power sequencer any more. */ 2725 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { 2726 if (is_cpu_edp(intel_dp)) 2727 pp_on |= PANEL_POWER_PORT_DP_A; 2728 else 2729 pp_on |= PANEL_POWER_PORT_DP_D; 2730 } 2731 2732 I915_WRITE(PCH_PP_ON_DELAYS, pp_on); 2733 I915_WRITE(PCH_PP_OFF_DELAYS, pp_off); 2734 I915_WRITE(PCH_PP_DIVISOR, pp_div); 2735 2736 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", 2737 I915_READ(PCH_PP_ON_DELAYS), 2738 I915_READ(PCH_PP_OFF_DELAYS), 2739 I915_READ(PCH_PP_DIVISOR)); 2740} 2741 2742void 2743intel_dp_init_connector(struct intel_digital_port *intel_dig_port, 2744 struct intel_connector *intel_connector) 2745{ 2746 struct drm_connector *connector = &intel_connector->base; 2747 struct intel_dp *intel_dp = &intel_dig_port->dp; 2748 struct intel_encoder *intel_encoder = &intel_dig_port->base; 2749 struct drm_device *dev = intel_encoder->base.dev; 2750 struct drm_i915_private *dev_priv = dev->dev_private; 2751 struct drm_display_mode *fixed_mode = NULL; 2752 struct edp_power_seq power_seq = { 0 }; 2753 enum port port = intel_dig_port->port; 2754 const char *name = NULL; 2755 int type; 2756 2757 /* Preserve the current hw state. */ 2758 intel_dp->DP = I915_READ(intel_dp->output_reg); 2759 intel_dp->attached_connector = intel_connector; 2760 2761 if (HAS_PCH_SPLIT(dev) && port == PORT_D) 2762 if (intel_dpd_is_edp(dev)) 2763 intel_dp->is_pch_edp = true; 2764 2765 /* 2766 * FIXME : We need to initialize built-in panels before external panels. 2767 * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup 2768 */ 2769 if (IS_VALLEYVIEW(dev) && port == PORT_C) { 2770 type = DRM_MODE_CONNECTOR_eDP; 2771 intel_encoder->type = INTEL_OUTPUT_EDP; 2772 } else if (port == PORT_A || is_pch_edp(intel_dp)) { 2773 type = DRM_MODE_CONNECTOR_eDP; 2774 intel_encoder->type = INTEL_OUTPUT_EDP; 2775 } else { 2776 /* The intel_encoder->type value may be INTEL_OUTPUT_UNKNOWN for 2777 * DDI or INTEL_OUTPUT_DISPLAYPORT for the older gens, so don't 2778 * rewrite it. 2779 */ 2780 type = DRM_MODE_CONNECTOR_DisplayPort; 2781 } 2782 2783 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); 2784 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 2785 2786 connector->polled = DRM_CONNECTOR_POLL_HPD; 2787 connector->interlace_allowed = true; 2788 connector->doublescan_allowed = 0; 2789 2790 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, 2791 ironlake_panel_vdd_work); 2792 2793 intel_connector_attach_encoder(intel_connector, intel_encoder); 2794 drm_sysfs_connector_add(connector); 2795 2796 if (HAS_DDI(dev)) 2797 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 2798 else 2799 intel_connector->get_hw_state = intel_connector_get_hw_state; 2800 2801 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10; 2802 if (HAS_DDI(dev)) { 2803 switch (intel_dig_port->port) { 2804 case PORT_A: 2805 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL; 2806 break; 2807 case PORT_B: 2808 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL; 2809 break; 2810 case PORT_C: 2811 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL; 2812 break; 2813 case PORT_D: 2814 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL; 2815 break; 2816 default: 2817 BUG(); 2818 } 2819 } 2820 2821 /* Set up the DDC bus. */ 2822 switch (port) { 2823 case PORT_A: 2824 name = "DPDDC-A"; 2825 break; 2826 case PORT_B: 2827 dev_priv->hotplug_supported_mask |= PORTB_HOTPLUG_INT_STATUS; 2828 name = "DPDDC-B"; 2829 break; 2830 case PORT_C: 2831 dev_priv->hotplug_supported_mask |= PORTC_HOTPLUG_INT_STATUS; 2832 name = "DPDDC-C"; 2833 break; 2834 case PORT_D: 2835 dev_priv->hotplug_supported_mask |= PORTD_HOTPLUG_INT_STATUS; 2836 name = "DPDDC-D"; 2837 break; 2838 default: 2839 BUG(); 2840 } 2841 2842 if (is_edp(intel_dp)) 2843 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); 2844 2845 intel_dp_i2c_init(intel_dp, intel_connector, name); 2846 2847 /* Cache DPCD and EDID for edp. */ 2848 if (is_edp(intel_dp)) { 2849 bool ret; 2850 struct drm_display_mode *scan; 2851 struct edid *edid; 2852 2853 ironlake_edp_panel_vdd_on(intel_dp); 2854 ret = intel_dp_get_dpcd(intel_dp); 2855 ironlake_edp_panel_vdd_off(intel_dp, false); 2856 2857 if (ret) { 2858 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) 2859 dev_priv->no_aux_handshake = 2860 intel_dp->dpcd[DP_MAX_DOWNSPREAD] & 2861 DP_NO_AUX_HANDSHAKE_LINK_TRAINING; 2862 } else { 2863 /* if this fails, presume the device is a ghost */ 2864 DRM_INFO("failed to retrieve link info, disabling eDP\n"); 2865 intel_dp_encoder_destroy(&intel_encoder->base); 2866 intel_dp_destroy(connector); 2867 return; 2868 } 2869 2870 /* We now know it's not a ghost, init power sequence regs. */ 2871 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, 2872 &power_seq); 2873 2874 ironlake_edp_panel_vdd_on(intel_dp); 2875 edid = drm_get_edid(connector, &intel_dp->adapter); 2876 if (edid) { 2877 if (drm_add_edid_modes(connector, edid)) { 2878 drm_mode_connector_update_edid_property(connector, edid); 2879 drm_edid_to_eld(connector, edid); 2880 } else { 2881 kfree(edid); 2882 edid = ERR_PTR(-EINVAL); 2883 } 2884 } else { 2885 edid = ERR_PTR(-ENOENT); 2886 } 2887 intel_connector->edid = edid; 2888 2889 /* prefer fixed mode from EDID if available */ 2890 list_for_each_entry(scan, &connector->probed_modes, head) { 2891 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) { 2892 fixed_mode = drm_mode_duplicate(dev, scan); 2893 break; 2894 } 2895 } 2896 2897 /* fallback to VBT if available for eDP */ 2898 if (!fixed_mode && dev_priv->lfp_lvds_vbt_mode) { 2899 fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); 2900 if (fixed_mode) 2901 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; 2902 } 2903 2904 ironlake_edp_panel_vdd_off(intel_dp, false); 2905 } 2906 2907 if (is_edp(intel_dp)) { 2908 intel_panel_init(&intel_connector->panel, fixed_mode); 2909 intel_panel_setup_backlight(connector); 2910 } 2911 2912 intel_dp_add_properties(intel_dp, connector); 2913 2914 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 2915 * 0xd. Failure to do so will result in spurious interrupts being 2916 * generated on the port when a cable is not attached. 2917 */ 2918 if (IS_G4X(dev) && !IS_GM45(dev)) { 2919 u32 temp = I915_READ(PEG_BAND_GAP_DATA); 2920 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); 2921 } 2922} 2923 2924void 2925intel_dp_init(struct drm_device *dev, int output_reg, enum port port) 2926{ 2927 struct intel_digital_port *intel_dig_port; 2928 struct intel_encoder *intel_encoder; 2929 struct drm_encoder *encoder; 2930 struct intel_connector *intel_connector; 2931 2932 intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL); 2933 if (!intel_dig_port) 2934 return; 2935 2936 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 2937 if (!intel_connector) { 2938 kfree(intel_dig_port); 2939 return; 2940 } 2941 2942 intel_encoder = &intel_dig_port->base; 2943 encoder = &intel_encoder->base; 2944 2945 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, 2946 DRM_MODE_ENCODER_TMDS); 2947 drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs); 2948 2949 intel_encoder->enable = intel_enable_dp; 2950 intel_encoder->pre_enable = intel_pre_enable_dp; 2951 intel_encoder->disable = intel_disable_dp; 2952 intel_encoder->post_disable = intel_post_disable_dp; 2953 intel_encoder->get_hw_state = intel_dp_get_hw_state; 2954 2955 intel_dig_port->port = port; 2956 intel_dig_port->dp.output_reg = output_reg; 2957 2958 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; 2959 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); 2960 intel_encoder->cloneable = false; 2961 intel_encoder->hot_plug = intel_dp_hot_plug; 2962 2963 intel_dp_init_connector(intel_dig_port, intel_connector); 2964} 2965