intel_dp.c revision 750eb99e0ec12f9a13446284d493d35a60866624
1/* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Keith Packard <keithp@keithp.com> 25 * 26 */ 27 28#include <linux/i2c.h> 29#include <linux/slab.h> 30#include <linux/export.h> 31#include "drmP.h" 32#include "drm.h" 33#include "drm_crtc.h" 34#include "drm_crtc_helper.h" 35#include "drm_edid.h" 36#include "intel_drv.h" 37#include "i915_drm.h" 38#include "i915_drv.h" 39 40#define DP_RECEIVER_CAP_SIZE 0xf 41#define DP_LINK_STATUS_SIZE 6 42#define DP_LINK_CHECK_TIMEOUT (10 * 1000) 43 44/** 45 * is_edp - is the given port attached to an eDP panel (either CPU or PCH) 46 * @intel_dp: DP struct 47 * 48 * If a CPU or PCH DP output is attached to an eDP panel, this function 49 * will return true, and false otherwise. 50 */ 51static bool is_edp(struct intel_dp *intel_dp) 52{ 53 return intel_dp->base.type == INTEL_OUTPUT_EDP; 54} 55 56/** 57 * is_pch_edp - is the port on the PCH and attached to an eDP panel? 58 * @intel_dp: DP struct 59 * 60 * Returns true if the given DP struct corresponds to a PCH DP port attached 61 * to an eDP panel, false otherwise. Helpful for determining whether we 62 * may need FDI resources for a given DP output or not. 63 */ 64static bool is_pch_edp(struct intel_dp *intel_dp) 65{ 66 return intel_dp->is_pch_edp; 67} 68 69/** 70 * is_cpu_edp - is the port on the CPU and attached to an eDP panel? 71 * @intel_dp: DP struct 72 * 73 * Returns true if the given DP struct corresponds to a CPU eDP port. 74 */ 75static bool is_cpu_edp(struct intel_dp *intel_dp) 76{ 77 return is_edp(intel_dp) && !is_pch_edp(intel_dp); 78} 79 80static struct intel_dp *intel_attached_dp(struct drm_connector *connector) 81{ 82 return container_of(intel_attached_encoder(connector), 83 struct intel_dp, base); 84} 85 86/** 87 * intel_encoder_is_pch_edp - is the given encoder a PCH attached eDP? 88 * @encoder: DRM encoder 89 * 90 * Return true if @encoder corresponds to a PCH attached eDP panel. Needed 91 * by intel_display.c. 92 */ 93bool intel_encoder_is_pch_edp(struct drm_encoder *encoder) 94{ 95 struct intel_dp *intel_dp; 96 97 if (!encoder) 98 return false; 99 100 intel_dp = enc_to_intel_dp(encoder); 101 102 return is_pch_edp(intel_dp); 103} 104 105static void intel_dp_start_link_train(struct intel_dp *intel_dp); 106static void intel_dp_complete_link_train(struct intel_dp *intel_dp); 107static void intel_dp_link_down(struct intel_dp *intel_dp); 108 109void 110intel_edp_link_config(struct intel_encoder *intel_encoder, 111 int *lane_num, int *link_bw) 112{ 113 struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); 114 115 *lane_num = intel_dp->lane_count; 116 if (intel_dp->link_bw == DP_LINK_BW_1_62) 117 *link_bw = 162000; 118 else if (intel_dp->link_bw == DP_LINK_BW_2_7) 119 *link_bw = 270000; 120} 121 122int 123intel_edp_target_clock(struct intel_encoder *intel_encoder, 124 struct drm_display_mode *mode) 125{ 126 struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); 127 128 if (intel_dp->panel_fixed_mode) 129 return intel_dp->panel_fixed_mode->clock; 130 else 131 return mode->clock; 132} 133 134static int 135intel_dp_max_lane_count(struct intel_dp *intel_dp) 136{ 137 int max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f; 138 switch (max_lane_count) { 139 case 1: case 2: case 4: 140 break; 141 default: 142 max_lane_count = 4; 143 } 144 return max_lane_count; 145} 146 147static int 148intel_dp_max_link_bw(struct intel_dp *intel_dp) 149{ 150 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; 151 152 switch (max_link_bw) { 153 case DP_LINK_BW_1_62: 154 case DP_LINK_BW_2_7: 155 break; 156 default: 157 max_link_bw = DP_LINK_BW_1_62; 158 break; 159 } 160 return max_link_bw; 161} 162 163static int 164intel_dp_link_clock(uint8_t link_bw) 165{ 166 if (link_bw == DP_LINK_BW_2_7) 167 return 270000; 168 else 169 return 162000; 170} 171 172/* 173 * The units on the numbers in the next two are... bizarre. Examples will 174 * make it clearer; this one parallels an example in the eDP spec. 175 * 176 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as: 177 * 178 * 270000 * 1 * 8 / 10 == 216000 179 * 180 * The actual data capacity of that configuration is 2.16Gbit/s, so the 181 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz - 182 * or equivalently, kilopixels per second - so for 1680x1050R it'd be 183 * 119000. At 18bpp that's 2142000 kilobits per second. 184 * 185 * Thus the strange-looking division by 10 in intel_dp_link_required, to 186 * get the result in decakilobits instead of kilobits. 187 */ 188 189static int 190intel_dp_link_required(int pixel_clock, int bpp) 191{ 192 return (pixel_clock * bpp + 9) / 10; 193} 194 195static int 196intel_dp_max_data_rate(int max_link_clock, int max_lanes) 197{ 198 return (max_link_clock * max_lanes * 8) / 10; 199} 200 201static bool 202intel_dp_adjust_dithering(struct intel_dp *intel_dp, 203 struct drm_display_mode *mode, 204 bool adjust_mode) 205{ 206 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); 207 int max_lanes = intel_dp_max_lane_count(intel_dp); 208 int max_rate, mode_rate; 209 210 mode_rate = intel_dp_link_required(mode->clock, 24); 211 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); 212 213 if (mode_rate > max_rate) { 214 mode_rate = intel_dp_link_required(mode->clock, 18); 215 if (mode_rate > max_rate) 216 return false; 217 218 if (adjust_mode) 219 mode->private_flags 220 |= INTEL_MODE_DP_FORCE_6BPC; 221 222 return true; 223 } 224 225 return true; 226} 227 228static int 229intel_dp_mode_valid(struct drm_connector *connector, 230 struct drm_display_mode *mode) 231{ 232 struct intel_dp *intel_dp = intel_attached_dp(connector); 233 234 if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) { 235 if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay) 236 return MODE_PANEL; 237 238 if (mode->vdisplay > intel_dp->panel_fixed_mode->vdisplay) 239 return MODE_PANEL; 240 } 241 242 if (!intel_dp_adjust_dithering(intel_dp, mode, false)) 243 return MODE_CLOCK_HIGH; 244 245 if (mode->clock < 10000) 246 return MODE_CLOCK_LOW; 247 248 if (mode->flags & DRM_MODE_FLAG_DBLCLK) 249 return MODE_H_ILLEGAL; 250 251 return MODE_OK; 252} 253 254static uint32_t 255pack_aux(uint8_t *src, int src_bytes) 256{ 257 int i; 258 uint32_t v = 0; 259 260 if (src_bytes > 4) 261 src_bytes = 4; 262 for (i = 0; i < src_bytes; i++) 263 v |= ((uint32_t) src[i]) << ((3-i) * 8); 264 return v; 265} 266 267static void 268unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) 269{ 270 int i; 271 if (dst_bytes > 4) 272 dst_bytes = 4; 273 for (i = 0; i < dst_bytes; i++) 274 dst[i] = src >> ((3-i) * 8); 275} 276 277/* hrawclock is 1/4 the FSB frequency */ 278static int 279intel_hrawclk(struct drm_device *dev) 280{ 281 struct drm_i915_private *dev_priv = dev->dev_private; 282 uint32_t clkcfg; 283 284 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */ 285 if (IS_VALLEYVIEW(dev)) 286 return 200; 287 288 clkcfg = I915_READ(CLKCFG); 289 switch (clkcfg & CLKCFG_FSB_MASK) { 290 case CLKCFG_FSB_400: 291 return 100; 292 case CLKCFG_FSB_533: 293 return 133; 294 case CLKCFG_FSB_667: 295 return 166; 296 case CLKCFG_FSB_800: 297 return 200; 298 case CLKCFG_FSB_1067: 299 return 266; 300 case CLKCFG_FSB_1333: 301 return 333; 302 /* these two are just a guess; one of them might be right */ 303 case CLKCFG_FSB_1600: 304 case CLKCFG_FSB_1600_ALT: 305 return 400; 306 default: 307 return 133; 308 } 309} 310 311static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp) 312{ 313 struct drm_device *dev = intel_dp->base.base.dev; 314 struct drm_i915_private *dev_priv = dev->dev_private; 315 316 return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0; 317} 318 319static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp) 320{ 321 struct drm_device *dev = intel_dp->base.base.dev; 322 struct drm_i915_private *dev_priv = dev->dev_private; 323 324 return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0; 325} 326 327static void 328intel_dp_check_edp(struct intel_dp *intel_dp) 329{ 330 struct drm_device *dev = intel_dp->base.base.dev; 331 struct drm_i915_private *dev_priv = dev->dev_private; 332 333 if (!is_edp(intel_dp)) 334 return; 335 if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) { 336 WARN(1, "eDP powered off while attempting aux channel communication.\n"); 337 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n", 338 I915_READ(PCH_PP_STATUS), 339 I915_READ(PCH_PP_CONTROL)); 340 } 341} 342 343static int 344intel_dp_aux_ch(struct intel_dp *intel_dp, 345 uint8_t *send, int send_bytes, 346 uint8_t *recv, int recv_size) 347{ 348 uint32_t output_reg = intel_dp->output_reg; 349 struct drm_device *dev = intel_dp->base.base.dev; 350 struct drm_i915_private *dev_priv = dev->dev_private; 351 uint32_t ch_ctl = output_reg + 0x10; 352 uint32_t ch_data = ch_ctl + 4; 353 int i; 354 int recv_bytes; 355 uint32_t status; 356 uint32_t aux_clock_divider; 357 int try, precharge; 358 359 if (IS_HASWELL(dev)) { 360 switch (intel_dp->port) { 361 case PORT_A: 362 ch_ctl = DPA_AUX_CH_CTL; 363 ch_data = DPA_AUX_CH_DATA1; 364 break; 365 case PORT_B: 366 ch_ctl = PCH_DPB_AUX_CH_CTL; 367 ch_data = PCH_DPB_AUX_CH_DATA1; 368 break; 369 case PORT_C: 370 ch_ctl = PCH_DPC_AUX_CH_CTL; 371 ch_data = PCH_DPC_AUX_CH_DATA1; 372 break; 373 case PORT_D: 374 ch_ctl = PCH_DPD_AUX_CH_CTL; 375 ch_data = PCH_DPD_AUX_CH_DATA1; 376 break; 377 default: 378 BUG(); 379 } 380 } 381 382 intel_dp_check_edp(intel_dp); 383 /* The clock divider is based off the hrawclk, 384 * and would like to run at 2MHz. So, take the 385 * hrawclk value and divide by 2 and use that 386 * 387 * Note that PCH attached eDP panels should use a 125MHz input 388 * clock divider. 389 */ 390 if (is_cpu_edp(intel_dp)) { 391 if (IS_VALLEYVIEW(dev)) 392 aux_clock_divider = 100; 393 else if (IS_GEN6(dev) || IS_GEN7(dev)) 394 aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */ 395 else 396 aux_clock_divider = 225; /* eDP input clock at 450Mhz */ 397 } else if (HAS_PCH_SPLIT(dev)) 398 aux_clock_divider = 63; /* IRL input clock fixed at 125Mhz */ 399 else 400 aux_clock_divider = intel_hrawclk(dev) / 2; 401 402 if (IS_GEN6(dev)) 403 precharge = 3; 404 else 405 precharge = 5; 406 407 /* Try to wait for any previous AUX channel activity */ 408 for (try = 0; try < 3; try++) { 409 status = I915_READ(ch_ctl); 410 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 411 break; 412 msleep(1); 413 } 414 415 if (try == 3) { 416 WARN(1, "dp_aux_ch not started status 0x%08x\n", 417 I915_READ(ch_ctl)); 418 return -EBUSY; 419 } 420 421 /* Must try at least 3 times according to DP spec */ 422 for (try = 0; try < 5; try++) { 423 /* Load the send data into the aux channel data registers */ 424 for (i = 0; i < send_bytes; i += 4) 425 I915_WRITE(ch_data + i, 426 pack_aux(send + i, send_bytes - i)); 427 428 /* Send the command and wait for it to complete */ 429 I915_WRITE(ch_ctl, 430 DP_AUX_CH_CTL_SEND_BUSY | 431 DP_AUX_CH_CTL_TIME_OUT_400us | 432 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 433 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 434 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | 435 DP_AUX_CH_CTL_DONE | 436 DP_AUX_CH_CTL_TIME_OUT_ERROR | 437 DP_AUX_CH_CTL_RECEIVE_ERROR); 438 for (;;) { 439 status = I915_READ(ch_ctl); 440 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 441 break; 442 udelay(100); 443 } 444 445 /* Clear done status and any errors */ 446 I915_WRITE(ch_ctl, 447 status | 448 DP_AUX_CH_CTL_DONE | 449 DP_AUX_CH_CTL_TIME_OUT_ERROR | 450 DP_AUX_CH_CTL_RECEIVE_ERROR); 451 452 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR | 453 DP_AUX_CH_CTL_RECEIVE_ERROR)) 454 continue; 455 if (status & DP_AUX_CH_CTL_DONE) 456 break; 457 } 458 459 if ((status & DP_AUX_CH_CTL_DONE) == 0) { 460 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status); 461 return -EBUSY; 462 } 463 464 /* Check for timeout or receive error. 465 * Timeouts occur when the sink is not connected 466 */ 467 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 468 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status); 469 return -EIO; 470 } 471 472 /* Timeouts occur when the device isn't connected, so they're 473 * "normal" -- don't fill the kernel log with these */ 474 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { 475 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status); 476 return -ETIMEDOUT; 477 } 478 479 /* Unload any bytes sent back from the other side */ 480 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> 481 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); 482 if (recv_bytes > recv_size) 483 recv_bytes = recv_size; 484 485 for (i = 0; i < recv_bytes; i += 4) 486 unpack_aux(I915_READ(ch_data + i), 487 recv + i, recv_bytes - i); 488 489 return recv_bytes; 490} 491 492/* Write data to the aux channel in native mode */ 493static int 494intel_dp_aux_native_write(struct intel_dp *intel_dp, 495 uint16_t address, uint8_t *send, int send_bytes) 496{ 497 int ret; 498 uint8_t msg[20]; 499 int msg_bytes; 500 uint8_t ack; 501 502 intel_dp_check_edp(intel_dp); 503 if (send_bytes > 16) 504 return -1; 505 msg[0] = AUX_NATIVE_WRITE << 4; 506 msg[1] = address >> 8; 507 msg[2] = address & 0xff; 508 msg[3] = send_bytes - 1; 509 memcpy(&msg[4], send, send_bytes); 510 msg_bytes = send_bytes + 4; 511 for (;;) { 512 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1); 513 if (ret < 0) 514 return ret; 515 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) 516 break; 517 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) 518 udelay(100); 519 else 520 return -EIO; 521 } 522 return send_bytes; 523} 524 525/* Write a single byte to the aux channel in native mode */ 526static int 527intel_dp_aux_native_write_1(struct intel_dp *intel_dp, 528 uint16_t address, uint8_t byte) 529{ 530 return intel_dp_aux_native_write(intel_dp, address, &byte, 1); 531} 532 533/* read bytes from a native aux channel */ 534static int 535intel_dp_aux_native_read(struct intel_dp *intel_dp, 536 uint16_t address, uint8_t *recv, int recv_bytes) 537{ 538 uint8_t msg[4]; 539 int msg_bytes; 540 uint8_t reply[20]; 541 int reply_bytes; 542 uint8_t ack; 543 int ret; 544 545 intel_dp_check_edp(intel_dp); 546 msg[0] = AUX_NATIVE_READ << 4; 547 msg[1] = address >> 8; 548 msg[2] = address & 0xff; 549 msg[3] = recv_bytes - 1; 550 551 msg_bytes = 4; 552 reply_bytes = recv_bytes + 1; 553 554 for (;;) { 555 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, 556 reply, reply_bytes); 557 if (ret == 0) 558 return -EPROTO; 559 if (ret < 0) 560 return ret; 561 ack = reply[0]; 562 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) { 563 memcpy(recv, reply + 1, ret - 1); 564 return ret - 1; 565 } 566 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) 567 udelay(100); 568 else 569 return -EIO; 570 } 571} 572 573static int 574intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, 575 uint8_t write_byte, uint8_t *read_byte) 576{ 577 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; 578 struct intel_dp *intel_dp = container_of(adapter, 579 struct intel_dp, 580 adapter); 581 uint16_t address = algo_data->address; 582 uint8_t msg[5]; 583 uint8_t reply[2]; 584 unsigned retry; 585 int msg_bytes; 586 int reply_bytes; 587 int ret; 588 589 intel_dp_check_edp(intel_dp); 590 /* Set up the command byte */ 591 if (mode & MODE_I2C_READ) 592 msg[0] = AUX_I2C_READ << 4; 593 else 594 msg[0] = AUX_I2C_WRITE << 4; 595 596 if (!(mode & MODE_I2C_STOP)) 597 msg[0] |= AUX_I2C_MOT << 4; 598 599 msg[1] = address >> 8; 600 msg[2] = address; 601 602 switch (mode) { 603 case MODE_I2C_WRITE: 604 msg[3] = 0; 605 msg[4] = write_byte; 606 msg_bytes = 5; 607 reply_bytes = 1; 608 break; 609 case MODE_I2C_READ: 610 msg[3] = 0; 611 msg_bytes = 4; 612 reply_bytes = 2; 613 break; 614 default: 615 msg_bytes = 3; 616 reply_bytes = 1; 617 break; 618 } 619 620 for (retry = 0; retry < 5; retry++) { 621 ret = intel_dp_aux_ch(intel_dp, 622 msg, msg_bytes, 623 reply, reply_bytes); 624 if (ret < 0) { 625 DRM_DEBUG_KMS("aux_ch failed %d\n", ret); 626 return ret; 627 } 628 629 switch (reply[0] & AUX_NATIVE_REPLY_MASK) { 630 case AUX_NATIVE_REPLY_ACK: 631 /* I2C-over-AUX Reply field is only valid 632 * when paired with AUX ACK. 633 */ 634 break; 635 case AUX_NATIVE_REPLY_NACK: 636 DRM_DEBUG_KMS("aux_ch native nack\n"); 637 return -EREMOTEIO; 638 case AUX_NATIVE_REPLY_DEFER: 639 udelay(100); 640 continue; 641 default: 642 DRM_ERROR("aux_ch invalid native reply 0x%02x\n", 643 reply[0]); 644 return -EREMOTEIO; 645 } 646 647 switch (reply[0] & AUX_I2C_REPLY_MASK) { 648 case AUX_I2C_REPLY_ACK: 649 if (mode == MODE_I2C_READ) { 650 *read_byte = reply[1]; 651 } 652 return reply_bytes - 1; 653 case AUX_I2C_REPLY_NACK: 654 DRM_DEBUG_KMS("aux_i2c nack\n"); 655 return -EREMOTEIO; 656 case AUX_I2C_REPLY_DEFER: 657 DRM_DEBUG_KMS("aux_i2c defer\n"); 658 udelay(100); 659 break; 660 default: 661 DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]); 662 return -EREMOTEIO; 663 } 664 } 665 666 DRM_ERROR("too many retries, giving up\n"); 667 return -EREMOTEIO; 668} 669 670static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp); 671static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); 672 673static int 674intel_dp_i2c_init(struct intel_dp *intel_dp, 675 struct intel_connector *intel_connector, const char *name) 676{ 677 int ret; 678 679 DRM_DEBUG_KMS("i2c_init %s\n", name); 680 intel_dp->algo.running = false; 681 intel_dp->algo.address = 0; 682 intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch; 683 684 memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter)); 685 intel_dp->adapter.owner = THIS_MODULE; 686 intel_dp->adapter.class = I2C_CLASS_DDC; 687 strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1); 688 intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0'; 689 intel_dp->adapter.algo_data = &intel_dp->algo; 690 intel_dp->adapter.dev.parent = &intel_connector->base.kdev; 691 692 ironlake_edp_panel_vdd_on(intel_dp); 693 ret = i2c_dp_aux_add_bus(&intel_dp->adapter); 694 ironlake_edp_panel_vdd_off(intel_dp, false); 695 return ret; 696} 697 698static bool 699intel_dp_mode_fixup(struct drm_encoder *encoder, 700 const struct drm_display_mode *mode, 701 struct drm_display_mode *adjusted_mode) 702{ 703 struct drm_device *dev = encoder->dev; 704 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 705 int lane_count, clock; 706 int max_lane_count = intel_dp_max_lane_count(intel_dp); 707 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; 708 int bpp, mode_rate; 709 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; 710 711 if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) { 712 intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode); 713 intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN, 714 mode, adjusted_mode); 715 } 716 717 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 718 return false; 719 720 DRM_DEBUG_KMS("DP link computation with max lane count %i " 721 "max bw %02x pixel clock %iKHz\n", 722 max_lane_count, bws[max_clock], adjusted_mode->clock); 723 724 if (!intel_dp_adjust_dithering(intel_dp, adjusted_mode, true)) 725 return false; 726 727 bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24; 728 mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp); 729 730 for (clock = 0; clock <= max_clock; clock++) { 731 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { 732 int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); 733 734 if (mode_rate <= link_avail) { 735 intel_dp->link_bw = bws[clock]; 736 intel_dp->lane_count = lane_count; 737 adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw); 738 DRM_DEBUG_KMS("DP link bw %02x lane " 739 "count %d clock %d bpp %d\n", 740 intel_dp->link_bw, intel_dp->lane_count, 741 adjusted_mode->clock, bpp); 742 DRM_DEBUG_KMS("DP link bw required %i available %i\n", 743 mode_rate, link_avail); 744 return true; 745 } 746 } 747 } 748 749 return false; 750} 751 752struct intel_dp_m_n { 753 uint32_t tu; 754 uint32_t gmch_m; 755 uint32_t gmch_n; 756 uint32_t link_m; 757 uint32_t link_n; 758}; 759 760static void 761intel_reduce_ratio(uint32_t *num, uint32_t *den) 762{ 763 while (*num > 0xffffff || *den > 0xffffff) { 764 *num >>= 1; 765 *den >>= 1; 766 } 767} 768 769static void 770intel_dp_compute_m_n(int bpp, 771 int nlanes, 772 int pixel_clock, 773 int link_clock, 774 struct intel_dp_m_n *m_n) 775{ 776 m_n->tu = 64; 777 m_n->gmch_m = (pixel_clock * bpp) >> 3; 778 m_n->gmch_n = link_clock * nlanes; 779 intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); 780 m_n->link_m = pixel_clock; 781 m_n->link_n = link_clock; 782 intel_reduce_ratio(&m_n->link_m, &m_n->link_n); 783} 784 785void 786intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, 787 struct drm_display_mode *adjusted_mode) 788{ 789 struct drm_device *dev = crtc->dev; 790 struct intel_encoder *encoder; 791 struct drm_i915_private *dev_priv = dev->dev_private; 792 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 793 int lane_count = 4; 794 struct intel_dp_m_n m_n; 795 int pipe = intel_crtc->pipe; 796 797 /* 798 * Find the lane count in the intel_encoder private 799 */ 800 for_each_encoder_on_crtc(dev, crtc, encoder) { 801 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 802 803 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT || 804 intel_dp->base.type == INTEL_OUTPUT_EDP) 805 { 806 lane_count = intel_dp->lane_count; 807 break; 808 } 809 } 810 811 /* 812 * Compute the GMCH and Link ratios. The '3' here is 813 * the number of bytes_per_pixel post-LUT, which we always 814 * set up for 8-bits of R/G/B, or 3 bytes total. 815 */ 816 intel_dp_compute_m_n(intel_crtc->bpp, lane_count, 817 mode->clock, adjusted_mode->clock, &m_n); 818 819 if (HAS_PCH_SPLIT(dev)) { 820 I915_WRITE(TRANSDATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); 821 I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n); 822 I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m); 823 I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n); 824 } else if (IS_VALLEYVIEW(dev)) { 825 I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); 826 I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n); 827 I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m); 828 I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n); 829 } else { 830 I915_WRITE(PIPE_GMCH_DATA_M(pipe), 831 TU_SIZE(m_n.tu) | m_n.gmch_m); 832 I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n); 833 I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m); 834 I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n); 835 } 836} 837 838static void 839intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, 840 struct drm_display_mode *adjusted_mode) 841{ 842 struct drm_device *dev = encoder->dev; 843 struct drm_i915_private *dev_priv = dev->dev_private; 844 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 845 struct drm_crtc *crtc = intel_dp->base.base.crtc; 846 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 847 848 /* 849 * There are four kinds of DP registers: 850 * 851 * IBX PCH 852 * SNB CPU 853 * IVB CPU 854 * CPT PCH 855 * 856 * IBX PCH and CPU are the same for almost everything, 857 * except that the CPU DP PLL is configured in this 858 * register 859 * 860 * CPT PCH is quite different, having many bits moved 861 * to the TRANS_DP_CTL register instead. That 862 * configuration happens (oddly) in ironlake_pch_enable 863 */ 864 865 /* Preserve the BIOS-computed detected bit. This is 866 * supposed to be read-only. 867 */ 868 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED; 869 870 /* Handle DP bits in common between all three register formats */ 871 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 872 873 switch (intel_dp->lane_count) { 874 case 1: 875 intel_dp->DP |= DP_PORT_WIDTH_1; 876 break; 877 case 2: 878 intel_dp->DP |= DP_PORT_WIDTH_2; 879 break; 880 case 4: 881 intel_dp->DP |= DP_PORT_WIDTH_4; 882 break; 883 } 884 if (intel_dp->has_audio) { 885 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n", 886 pipe_name(intel_crtc->pipe)); 887 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 888 intel_write_eld(encoder, adjusted_mode); 889 } 890 memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); 891 intel_dp->link_configuration[0] = intel_dp->link_bw; 892 intel_dp->link_configuration[1] = intel_dp->lane_count; 893 intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B; 894 /* 895 * Check for DPCD version > 1.1 and enhanced framing support 896 */ 897 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 898 (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) { 899 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; 900 } 901 902 /* Split out the IBX/CPU vs CPT settings */ 903 904 if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) { 905 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 906 intel_dp->DP |= DP_SYNC_HS_HIGH; 907 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 908 intel_dp->DP |= DP_SYNC_VS_HIGH; 909 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 910 911 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) 912 intel_dp->DP |= DP_ENHANCED_FRAMING; 913 914 intel_dp->DP |= intel_crtc->pipe << 29; 915 916 /* don't miss out required setting for eDP */ 917 if (adjusted_mode->clock < 200000) 918 intel_dp->DP |= DP_PLL_FREQ_160MHZ; 919 else 920 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 921 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { 922 intel_dp->DP |= intel_dp->color_range; 923 924 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 925 intel_dp->DP |= DP_SYNC_HS_HIGH; 926 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 927 intel_dp->DP |= DP_SYNC_VS_HIGH; 928 intel_dp->DP |= DP_LINK_TRAIN_OFF; 929 930 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) 931 intel_dp->DP |= DP_ENHANCED_FRAMING; 932 933 if (intel_crtc->pipe == 1) 934 intel_dp->DP |= DP_PIPEB_SELECT; 935 936 if (is_cpu_edp(intel_dp)) { 937 /* don't miss out required setting for eDP */ 938 if (adjusted_mode->clock < 200000) 939 intel_dp->DP |= DP_PLL_FREQ_160MHZ; 940 else 941 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 942 } 943 } else { 944 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 945 } 946} 947 948#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 949#define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) 950 951#define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 952#define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 953 954#define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) 955#define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 956 957static void ironlake_wait_panel_status(struct intel_dp *intel_dp, 958 u32 mask, 959 u32 value) 960{ 961 struct drm_device *dev = intel_dp->base.base.dev; 962 struct drm_i915_private *dev_priv = dev->dev_private; 963 964 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n", 965 mask, value, 966 I915_READ(PCH_PP_STATUS), 967 I915_READ(PCH_PP_CONTROL)); 968 969 if (_wait_for((I915_READ(PCH_PP_STATUS) & mask) == value, 5000, 10)) { 970 DRM_ERROR("Panel status timeout: status %08x control %08x\n", 971 I915_READ(PCH_PP_STATUS), 972 I915_READ(PCH_PP_CONTROL)); 973 } 974} 975 976static void ironlake_wait_panel_on(struct intel_dp *intel_dp) 977{ 978 DRM_DEBUG_KMS("Wait for panel power on\n"); 979 ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); 980} 981 982static void ironlake_wait_panel_off(struct intel_dp *intel_dp) 983{ 984 DRM_DEBUG_KMS("Wait for panel power off time\n"); 985 ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); 986} 987 988static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp) 989{ 990 DRM_DEBUG_KMS("Wait for panel power cycle\n"); 991 ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); 992} 993 994 995/* Read the current pp_control value, unlocking the register if it 996 * is locked 997 */ 998 999static u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv) 1000{ 1001 u32 control = I915_READ(PCH_PP_CONTROL); 1002 1003 control &= ~PANEL_UNLOCK_MASK; 1004 control |= PANEL_UNLOCK_REGS; 1005 return control; 1006} 1007 1008static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) 1009{ 1010 struct drm_device *dev = intel_dp->base.base.dev; 1011 struct drm_i915_private *dev_priv = dev->dev_private; 1012 u32 pp; 1013 1014 if (!is_edp(intel_dp)) 1015 return; 1016 DRM_DEBUG_KMS("Turn eDP VDD on\n"); 1017 1018 WARN(intel_dp->want_panel_vdd, 1019 "eDP VDD already requested on\n"); 1020 1021 intel_dp->want_panel_vdd = true; 1022 1023 if (ironlake_edp_have_panel_vdd(intel_dp)) { 1024 DRM_DEBUG_KMS("eDP VDD already on\n"); 1025 return; 1026 } 1027 1028 if (!ironlake_edp_have_panel_power(intel_dp)) 1029 ironlake_wait_panel_power_cycle(intel_dp); 1030 1031 pp = ironlake_get_pp_control(dev_priv); 1032 pp |= EDP_FORCE_VDD; 1033 I915_WRITE(PCH_PP_CONTROL, pp); 1034 POSTING_READ(PCH_PP_CONTROL); 1035 DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n", 1036 I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL)); 1037 1038 /* 1039 * If the panel wasn't on, delay before accessing aux channel 1040 */ 1041 if (!ironlake_edp_have_panel_power(intel_dp)) { 1042 DRM_DEBUG_KMS("eDP was not running\n"); 1043 msleep(intel_dp->panel_power_up_delay); 1044 } 1045} 1046 1047static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp) 1048{ 1049 struct drm_device *dev = intel_dp->base.base.dev; 1050 struct drm_i915_private *dev_priv = dev->dev_private; 1051 u32 pp; 1052 1053 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) { 1054 pp = ironlake_get_pp_control(dev_priv); 1055 pp &= ~EDP_FORCE_VDD; 1056 I915_WRITE(PCH_PP_CONTROL, pp); 1057 POSTING_READ(PCH_PP_CONTROL); 1058 1059 /* Make sure sequencer is idle before allowing subsequent activity */ 1060 DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n", 1061 I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL)); 1062 1063 msleep(intel_dp->panel_power_down_delay); 1064 } 1065} 1066 1067static void ironlake_panel_vdd_work(struct work_struct *__work) 1068{ 1069 struct intel_dp *intel_dp = container_of(to_delayed_work(__work), 1070 struct intel_dp, panel_vdd_work); 1071 struct drm_device *dev = intel_dp->base.base.dev; 1072 1073 mutex_lock(&dev->mode_config.mutex); 1074 ironlake_panel_vdd_off_sync(intel_dp); 1075 mutex_unlock(&dev->mode_config.mutex); 1076} 1077 1078static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) 1079{ 1080 if (!is_edp(intel_dp)) 1081 return; 1082 1083 DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd); 1084 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on"); 1085 1086 intel_dp->want_panel_vdd = false; 1087 1088 if (sync) { 1089 ironlake_panel_vdd_off_sync(intel_dp); 1090 } else { 1091 /* 1092 * Queue the timer to fire a long 1093 * time from now (relative to the power down delay) 1094 * to keep the panel power up across a sequence of operations 1095 */ 1096 schedule_delayed_work(&intel_dp->panel_vdd_work, 1097 msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5)); 1098 } 1099} 1100 1101static void ironlake_edp_panel_on(struct intel_dp *intel_dp) 1102{ 1103 struct drm_device *dev = intel_dp->base.base.dev; 1104 struct drm_i915_private *dev_priv = dev->dev_private; 1105 u32 pp; 1106 1107 if (!is_edp(intel_dp)) 1108 return; 1109 1110 DRM_DEBUG_KMS("Turn eDP power on\n"); 1111 1112 if (ironlake_edp_have_panel_power(intel_dp)) { 1113 DRM_DEBUG_KMS("eDP power already on\n"); 1114 return; 1115 } 1116 1117 ironlake_wait_panel_power_cycle(intel_dp); 1118 1119 pp = ironlake_get_pp_control(dev_priv); 1120 if (IS_GEN5(dev)) { 1121 /* ILK workaround: disable reset around power sequence */ 1122 pp &= ~PANEL_POWER_RESET; 1123 I915_WRITE(PCH_PP_CONTROL, pp); 1124 POSTING_READ(PCH_PP_CONTROL); 1125 } 1126 1127 pp |= POWER_TARGET_ON; 1128 if (!IS_GEN5(dev)) 1129 pp |= PANEL_POWER_RESET; 1130 1131 I915_WRITE(PCH_PP_CONTROL, pp); 1132 POSTING_READ(PCH_PP_CONTROL); 1133 1134 ironlake_wait_panel_on(intel_dp); 1135 1136 if (IS_GEN5(dev)) { 1137 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 1138 I915_WRITE(PCH_PP_CONTROL, pp); 1139 POSTING_READ(PCH_PP_CONTROL); 1140 } 1141} 1142 1143static void ironlake_edp_panel_off(struct intel_dp *intel_dp) 1144{ 1145 struct drm_device *dev = intel_dp->base.base.dev; 1146 struct drm_i915_private *dev_priv = dev->dev_private; 1147 u32 pp; 1148 1149 if (!is_edp(intel_dp)) 1150 return; 1151 1152 DRM_DEBUG_KMS("Turn eDP power off\n"); 1153 1154 WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n"); 1155 1156 pp = ironlake_get_pp_control(dev_priv); 1157 /* We need to switch off panel power _and_ force vdd, for otherwise some 1158 * panels get very unhappy and cease to work. */ 1159 pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE); 1160 I915_WRITE(PCH_PP_CONTROL, pp); 1161 POSTING_READ(PCH_PP_CONTROL); 1162 1163 intel_dp->want_panel_vdd = false; 1164 1165 ironlake_wait_panel_off(intel_dp); 1166} 1167 1168static void ironlake_edp_backlight_on(struct intel_dp *intel_dp) 1169{ 1170 struct drm_device *dev = intel_dp->base.base.dev; 1171 struct drm_i915_private *dev_priv = dev->dev_private; 1172 u32 pp; 1173 1174 if (!is_edp(intel_dp)) 1175 return; 1176 1177 DRM_DEBUG_KMS("\n"); 1178 /* 1179 * If we enable the backlight right away following a panel power 1180 * on, we may see slight flicker as the panel syncs with the eDP 1181 * link. So delay a bit to make sure the image is solid before 1182 * allowing it to appear. 1183 */ 1184 msleep(intel_dp->backlight_on_delay); 1185 pp = ironlake_get_pp_control(dev_priv); 1186 pp |= EDP_BLC_ENABLE; 1187 I915_WRITE(PCH_PP_CONTROL, pp); 1188 POSTING_READ(PCH_PP_CONTROL); 1189} 1190 1191static void ironlake_edp_backlight_off(struct intel_dp *intel_dp) 1192{ 1193 struct drm_device *dev = intel_dp->base.base.dev; 1194 struct drm_i915_private *dev_priv = dev->dev_private; 1195 u32 pp; 1196 1197 if (!is_edp(intel_dp)) 1198 return; 1199 1200 DRM_DEBUG_KMS("\n"); 1201 pp = ironlake_get_pp_control(dev_priv); 1202 pp &= ~EDP_BLC_ENABLE; 1203 I915_WRITE(PCH_PP_CONTROL, pp); 1204 POSTING_READ(PCH_PP_CONTROL); 1205 msleep(intel_dp->backlight_off_delay); 1206} 1207 1208static void ironlake_edp_pll_on(struct intel_dp *intel_dp) 1209{ 1210 struct drm_device *dev = intel_dp->base.base.dev; 1211 struct drm_crtc *crtc = intel_dp->base.base.crtc; 1212 struct drm_i915_private *dev_priv = dev->dev_private; 1213 u32 dpa_ctl; 1214 1215 assert_pipe_disabled(dev_priv, 1216 to_intel_crtc(crtc)->pipe); 1217 1218 DRM_DEBUG_KMS("\n"); 1219 dpa_ctl = I915_READ(DP_A); 1220 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n"); 1221 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n"); 1222 1223 /* We don't adjust intel_dp->DP while tearing down the link, to 1224 * facilitate link retraining (e.g. after hotplug). Hence clear all 1225 * enable bits here to ensure that we don't enable too much. */ 1226 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE); 1227 intel_dp->DP |= DP_PLL_ENABLE; 1228 I915_WRITE(DP_A, intel_dp->DP); 1229 POSTING_READ(DP_A); 1230 udelay(200); 1231} 1232 1233static void ironlake_edp_pll_off(struct intel_dp *intel_dp) 1234{ 1235 struct drm_device *dev = intel_dp->base.base.dev; 1236 struct drm_crtc *crtc = intel_dp->base.base.crtc; 1237 struct drm_i915_private *dev_priv = dev->dev_private; 1238 u32 dpa_ctl; 1239 1240 assert_pipe_disabled(dev_priv, 1241 to_intel_crtc(crtc)->pipe); 1242 1243 dpa_ctl = I915_READ(DP_A); 1244 WARN((dpa_ctl & DP_PLL_ENABLE) == 0, 1245 "dp pll off, should be on\n"); 1246 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n"); 1247 1248 /* We can't rely on the value tracked for the DP register in 1249 * intel_dp->DP because link_down must not change that (otherwise link 1250 * re-training will fail. */ 1251 dpa_ctl &= ~DP_PLL_ENABLE; 1252 I915_WRITE(DP_A, dpa_ctl); 1253 POSTING_READ(DP_A); 1254 udelay(200); 1255} 1256 1257/* If the sink supports it, try to set the power state appropriately */ 1258static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) 1259{ 1260 int ret, i; 1261 1262 /* Should have a valid DPCD by this point */ 1263 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 1264 return; 1265 1266 if (mode != DRM_MODE_DPMS_ON) { 1267 ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER, 1268 DP_SET_POWER_D3); 1269 if (ret != 1) 1270 DRM_DEBUG_DRIVER("failed to write sink power state\n"); 1271 } else { 1272 /* 1273 * When turning on, we need to retry for 1ms to give the sink 1274 * time to wake up. 1275 */ 1276 for (i = 0; i < 3; i++) { 1277 ret = intel_dp_aux_native_write_1(intel_dp, 1278 DP_SET_POWER, 1279 DP_SET_POWER_D0); 1280 if (ret == 1) 1281 break; 1282 msleep(1); 1283 } 1284 } 1285} 1286 1287static bool intel_dp_get_hw_state(struct intel_encoder *encoder, 1288 enum pipe *pipe) 1289{ 1290 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1291 struct drm_device *dev = encoder->base.dev; 1292 struct drm_i915_private *dev_priv = dev->dev_private; 1293 u32 tmp = I915_READ(intel_dp->output_reg); 1294 1295 if (!(tmp & DP_PORT_EN)) 1296 return false; 1297 1298 if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) { 1299 *pipe = PORT_TO_PIPE_CPT(tmp); 1300 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { 1301 *pipe = PORT_TO_PIPE(tmp); 1302 } else { 1303 u32 trans_sel; 1304 u32 trans_dp; 1305 int i; 1306 1307 switch (intel_dp->output_reg) { 1308 case PCH_DP_B: 1309 trans_sel = TRANS_DP_PORT_SEL_B; 1310 break; 1311 case PCH_DP_C: 1312 trans_sel = TRANS_DP_PORT_SEL_C; 1313 break; 1314 case PCH_DP_D: 1315 trans_sel = TRANS_DP_PORT_SEL_D; 1316 break; 1317 default: 1318 return true; 1319 } 1320 1321 for_each_pipe(i) { 1322 trans_dp = I915_READ(TRANS_DP_CTL(i)); 1323 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) { 1324 *pipe = i; 1325 return true; 1326 } 1327 } 1328 } 1329 1330 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", intel_dp->output_reg); 1331 1332 return true; 1333} 1334 1335static void intel_disable_dp(struct intel_encoder *encoder) 1336{ 1337 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1338 1339 /* Make sure the panel is off before trying to change the mode. But also 1340 * ensure that we have vdd while we switch off the panel. */ 1341 ironlake_edp_panel_vdd_on(intel_dp); 1342 ironlake_edp_backlight_off(intel_dp); 1343 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1344 ironlake_edp_panel_off(intel_dp); 1345 1346 /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */ 1347 if (!is_cpu_edp(intel_dp)) 1348 intel_dp_link_down(intel_dp); 1349} 1350 1351static void intel_post_disable_dp(struct intel_encoder *encoder) 1352{ 1353 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1354 1355 if (is_cpu_edp(intel_dp)) { 1356 intel_dp_link_down(intel_dp); 1357 ironlake_edp_pll_off(intel_dp); 1358 } 1359} 1360 1361static void intel_enable_dp(struct intel_encoder *encoder) 1362{ 1363 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1364 struct drm_device *dev = encoder->base.dev; 1365 struct drm_i915_private *dev_priv = dev->dev_private; 1366 uint32_t dp_reg = I915_READ(intel_dp->output_reg); 1367 1368 if (WARN_ON(dp_reg & DP_PORT_EN)) 1369 return; 1370 1371 ironlake_edp_panel_vdd_on(intel_dp); 1372 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1373 intel_dp_start_link_train(intel_dp); 1374 ironlake_edp_panel_on(intel_dp); 1375 ironlake_edp_panel_vdd_off(intel_dp, true); 1376 intel_dp_complete_link_train(intel_dp); 1377 ironlake_edp_backlight_on(intel_dp); 1378} 1379 1380static void intel_pre_enable_dp(struct intel_encoder *encoder) 1381{ 1382 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1383 1384 if (is_cpu_edp(intel_dp)) 1385 ironlake_edp_pll_on(intel_dp); 1386} 1387 1388/* 1389 * Native read with retry for link status and receiver capability reads for 1390 * cases where the sink may still be asleep. 1391 */ 1392static bool 1393intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address, 1394 uint8_t *recv, int recv_bytes) 1395{ 1396 int ret, i; 1397 1398 /* 1399 * Sinks are *supposed* to come up within 1ms from an off state, 1400 * but we're also supposed to retry 3 times per the spec. 1401 */ 1402 for (i = 0; i < 3; i++) { 1403 ret = intel_dp_aux_native_read(intel_dp, address, recv, 1404 recv_bytes); 1405 if (ret == recv_bytes) 1406 return true; 1407 msleep(1); 1408 } 1409 1410 return false; 1411} 1412 1413/* 1414 * Fetch AUX CH registers 0x202 - 0x207 which contain 1415 * link status information 1416 */ 1417static bool 1418intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 1419{ 1420 return intel_dp_aux_native_read_retry(intel_dp, 1421 DP_LANE0_1_STATUS, 1422 link_status, 1423 DP_LINK_STATUS_SIZE); 1424} 1425 1426static uint8_t 1427intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE], 1428 int r) 1429{ 1430 return link_status[r - DP_LANE0_1_STATUS]; 1431} 1432 1433static uint8_t 1434intel_get_adjust_request_voltage(uint8_t adjust_request[2], 1435 int lane) 1436{ 1437 int s = ((lane & 1) ? 1438 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : 1439 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT); 1440 uint8_t l = adjust_request[lane>>1]; 1441 1442 return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; 1443} 1444 1445static uint8_t 1446intel_get_adjust_request_pre_emphasis(uint8_t adjust_request[2], 1447 int lane) 1448{ 1449 int s = ((lane & 1) ? 1450 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT : 1451 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT); 1452 uint8_t l = adjust_request[lane>>1]; 1453 1454 return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; 1455} 1456 1457 1458#if 0 1459static char *voltage_names[] = { 1460 "0.4V", "0.6V", "0.8V", "1.2V" 1461}; 1462static char *pre_emph_names[] = { 1463 "0dB", "3.5dB", "6dB", "9.5dB" 1464}; 1465static char *link_train_names[] = { 1466 "pattern 1", "pattern 2", "idle", "off" 1467}; 1468#endif 1469 1470/* 1471 * These are source-specific values; current Intel hardware supports 1472 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB 1473 */ 1474 1475static uint8_t 1476intel_dp_voltage_max(struct intel_dp *intel_dp) 1477{ 1478 struct drm_device *dev = intel_dp->base.base.dev; 1479 1480 if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) 1481 return DP_TRAIN_VOLTAGE_SWING_800; 1482 else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) 1483 return DP_TRAIN_VOLTAGE_SWING_1200; 1484 else 1485 return DP_TRAIN_VOLTAGE_SWING_800; 1486} 1487 1488static uint8_t 1489intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) 1490{ 1491 struct drm_device *dev = intel_dp->base.base.dev; 1492 1493 if (IS_HASWELL(dev)) { 1494 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1495 case DP_TRAIN_VOLTAGE_SWING_400: 1496 return DP_TRAIN_PRE_EMPHASIS_9_5; 1497 case DP_TRAIN_VOLTAGE_SWING_600: 1498 return DP_TRAIN_PRE_EMPHASIS_6; 1499 case DP_TRAIN_VOLTAGE_SWING_800: 1500 return DP_TRAIN_PRE_EMPHASIS_3_5; 1501 case DP_TRAIN_VOLTAGE_SWING_1200: 1502 default: 1503 return DP_TRAIN_PRE_EMPHASIS_0; 1504 } 1505 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { 1506 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1507 case DP_TRAIN_VOLTAGE_SWING_400: 1508 return DP_TRAIN_PRE_EMPHASIS_6; 1509 case DP_TRAIN_VOLTAGE_SWING_600: 1510 case DP_TRAIN_VOLTAGE_SWING_800: 1511 return DP_TRAIN_PRE_EMPHASIS_3_5; 1512 default: 1513 return DP_TRAIN_PRE_EMPHASIS_0; 1514 } 1515 } else { 1516 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1517 case DP_TRAIN_VOLTAGE_SWING_400: 1518 return DP_TRAIN_PRE_EMPHASIS_6; 1519 case DP_TRAIN_VOLTAGE_SWING_600: 1520 return DP_TRAIN_PRE_EMPHASIS_6; 1521 case DP_TRAIN_VOLTAGE_SWING_800: 1522 return DP_TRAIN_PRE_EMPHASIS_3_5; 1523 case DP_TRAIN_VOLTAGE_SWING_1200: 1524 default: 1525 return DP_TRAIN_PRE_EMPHASIS_0; 1526 } 1527 } 1528} 1529 1530static void 1531intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 1532{ 1533 uint8_t v = 0; 1534 uint8_t p = 0; 1535 int lane; 1536 uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS); 1537 uint8_t voltage_max; 1538 uint8_t preemph_max; 1539 1540 for (lane = 0; lane < intel_dp->lane_count; lane++) { 1541 uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane); 1542 uint8_t this_p = intel_get_adjust_request_pre_emphasis(adjust_request, lane); 1543 1544 if (this_v > v) 1545 v = this_v; 1546 if (this_p > p) 1547 p = this_p; 1548 } 1549 1550 voltage_max = intel_dp_voltage_max(intel_dp); 1551 if (v >= voltage_max) 1552 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; 1553 1554 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v); 1555 if (p >= preemph_max) 1556 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; 1557 1558 for (lane = 0; lane < 4; lane++) 1559 intel_dp->train_set[lane] = v | p; 1560} 1561 1562static uint32_t 1563intel_dp_signal_levels(uint8_t train_set) 1564{ 1565 uint32_t signal_levels = 0; 1566 1567 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 1568 case DP_TRAIN_VOLTAGE_SWING_400: 1569 default: 1570 signal_levels |= DP_VOLTAGE_0_4; 1571 break; 1572 case DP_TRAIN_VOLTAGE_SWING_600: 1573 signal_levels |= DP_VOLTAGE_0_6; 1574 break; 1575 case DP_TRAIN_VOLTAGE_SWING_800: 1576 signal_levels |= DP_VOLTAGE_0_8; 1577 break; 1578 case DP_TRAIN_VOLTAGE_SWING_1200: 1579 signal_levels |= DP_VOLTAGE_1_2; 1580 break; 1581 } 1582 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 1583 case DP_TRAIN_PRE_EMPHASIS_0: 1584 default: 1585 signal_levels |= DP_PRE_EMPHASIS_0; 1586 break; 1587 case DP_TRAIN_PRE_EMPHASIS_3_5: 1588 signal_levels |= DP_PRE_EMPHASIS_3_5; 1589 break; 1590 case DP_TRAIN_PRE_EMPHASIS_6: 1591 signal_levels |= DP_PRE_EMPHASIS_6; 1592 break; 1593 case DP_TRAIN_PRE_EMPHASIS_9_5: 1594 signal_levels |= DP_PRE_EMPHASIS_9_5; 1595 break; 1596 } 1597 return signal_levels; 1598} 1599 1600/* Gen6's DP voltage swing and pre-emphasis control */ 1601static uint32_t 1602intel_gen6_edp_signal_levels(uint8_t train_set) 1603{ 1604 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 1605 DP_TRAIN_PRE_EMPHASIS_MASK); 1606 switch (signal_levels) { 1607 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 1608 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 1609 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 1610 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 1611 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B; 1612 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 1613 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: 1614 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B; 1615 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 1616 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 1617 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B; 1618 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 1619 case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0: 1620 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B; 1621 default: 1622 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 1623 "0x%x\n", signal_levels); 1624 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 1625 } 1626} 1627 1628/* Gen7's DP voltage swing and pre-emphasis control */ 1629static uint32_t 1630intel_gen7_edp_signal_levels(uint8_t train_set) 1631{ 1632 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 1633 DP_TRAIN_PRE_EMPHASIS_MASK); 1634 switch (signal_levels) { 1635 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 1636 return EDP_LINK_TRAIN_400MV_0DB_IVB; 1637 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 1638 return EDP_LINK_TRAIN_400MV_3_5DB_IVB; 1639 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 1640 return EDP_LINK_TRAIN_400MV_6DB_IVB; 1641 1642 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 1643 return EDP_LINK_TRAIN_600MV_0DB_IVB; 1644 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 1645 return EDP_LINK_TRAIN_600MV_3_5DB_IVB; 1646 1647 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 1648 return EDP_LINK_TRAIN_800MV_0DB_IVB; 1649 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 1650 return EDP_LINK_TRAIN_800MV_3_5DB_IVB; 1651 1652 default: 1653 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 1654 "0x%x\n", signal_levels); 1655 return EDP_LINK_TRAIN_500MV_0DB_IVB; 1656 } 1657} 1658 1659/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */ 1660static uint32_t 1661intel_dp_signal_levels_hsw(uint8_t train_set) 1662{ 1663 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 1664 DP_TRAIN_PRE_EMPHASIS_MASK); 1665 switch (signal_levels) { 1666 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 1667 return DDI_BUF_EMP_400MV_0DB_HSW; 1668 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 1669 return DDI_BUF_EMP_400MV_3_5DB_HSW; 1670 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 1671 return DDI_BUF_EMP_400MV_6DB_HSW; 1672 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5: 1673 return DDI_BUF_EMP_400MV_9_5DB_HSW; 1674 1675 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 1676 return DDI_BUF_EMP_600MV_0DB_HSW; 1677 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 1678 return DDI_BUF_EMP_600MV_3_5DB_HSW; 1679 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: 1680 return DDI_BUF_EMP_600MV_6DB_HSW; 1681 1682 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 1683 return DDI_BUF_EMP_800MV_0DB_HSW; 1684 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 1685 return DDI_BUF_EMP_800MV_3_5DB_HSW; 1686 default: 1687 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 1688 "0x%x\n", signal_levels); 1689 return DDI_BUF_EMP_400MV_0DB_HSW; 1690 } 1691} 1692 1693static uint8_t 1694intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE], 1695 int lane) 1696{ 1697 int s = (lane & 1) * 4; 1698 uint8_t l = link_status[lane>>1]; 1699 1700 return (l >> s) & 0xf; 1701} 1702 1703/* Check for clock recovery is done on all channels */ 1704static bool 1705intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count) 1706{ 1707 int lane; 1708 uint8_t lane_status; 1709 1710 for (lane = 0; lane < lane_count; lane++) { 1711 lane_status = intel_get_lane_status(link_status, lane); 1712 if ((lane_status & DP_LANE_CR_DONE) == 0) 1713 return false; 1714 } 1715 return true; 1716} 1717 1718/* Check to see if channel eq is done on all channels */ 1719#define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\ 1720 DP_LANE_CHANNEL_EQ_DONE|\ 1721 DP_LANE_SYMBOL_LOCKED) 1722static bool 1723intel_channel_eq_ok(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 1724{ 1725 uint8_t lane_align; 1726 uint8_t lane_status; 1727 int lane; 1728 1729 lane_align = intel_dp_link_status(link_status, 1730 DP_LANE_ALIGN_STATUS_UPDATED); 1731 if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) 1732 return false; 1733 for (lane = 0; lane < intel_dp->lane_count; lane++) { 1734 lane_status = intel_get_lane_status(link_status, lane); 1735 if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS) 1736 return false; 1737 } 1738 return true; 1739} 1740 1741static bool 1742intel_dp_set_link_train(struct intel_dp *intel_dp, 1743 uint32_t dp_reg_value, 1744 uint8_t dp_train_pat) 1745{ 1746 struct drm_device *dev = intel_dp->base.base.dev; 1747 struct drm_i915_private *dev_priv = dev->dev_private; 1748 int ret; 1749 uint32_t temp; 1750 1751 if (IS_HASWELL(dev)) { 1752 temp = I915_READ(DP_TP_CTL(intel_dp->port)); 1753 1754 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) 1755 temp |= DP_TP_CTL_SCRAMBLE_DISABLE; 1756 else 1757 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE; 1758 1759 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; 1760 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 1761 case DP_TRAINING_PATTERN_DISABLE: 1762 temp |= DP_TP_CTL_LINK_TRAIN_IDLE; 1763 I915_WRITE(DP_TP_CTL(intel_dp->port), temp); 1764 1765 if (wait_for((I915_READ(DP_TP_STATUS(intel_dp->port)) & 1766 DP_TP_STATUS_IDLE_DONE), 1)) 1767 DRM_ERROR("Timed out waiting for DP idle patterns\n"); 1768 1769 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; 1770 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; 1771 1772 break; 1773 case DP_TRAINING_PATTERN_1: 1774 temp |= DP_TP_CTL_LINK_TRAIN_PAT1; 1775 break; 1776 case DP_TRAINING_PATTERN_2: 1777 temp |= DP_TP_CTL_LINK_TRAIN_PAT2; 1778 break; 1779 case DP_TRAINING_PATTERN_3: 1780 temp |= DP_TP_CTL_LINK_TRAIN_PAT3; 1781 break; 1782 } 1783 I915_WRITE(DP_TP_CTL(intel_dp->port), temp); 1784 1785 } else if (HAS_PCH_CPT(dev) && 1786 (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) { 1787 dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT; 1788 1789 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 1790 case DP_TRAINING_PATTERN_DISABLE: 1791 dp_reg_value |= DP_LINK_TRAIN_OFF_CPT; 1792 break; 1793 case DP_TRAINING_PATTERN_1: 1794 dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT; 1795 break; 1796 case DP_TRAINING_PATTERN_2: 1797 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT; 1798 break; 1799 case DP_TRAINING_PATTERN_3: 1800 DRM_ERROR("DP training pattern 3 not supported\n"); 1801 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT; 1802 break; 1803 } 1804 1805 } else { 1806 dp_reg_value &= ~DP_LINK_TRAIN_MASK; 1807 1808 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 1809 case DP_TRAINING_PATTERN_DISABLE: 1810 dp_reg_value |= DP_LINK_TRAIN_OFF; 1811 break; 1812 case DP_TRAINING_PATTERN_1: 1813 dp_reg_value |= DP_LINK_TRAIN_PAT_1; 1814 break; 1815 case DP_TRAINING_PATTERN_2: 1816 dp_reg_value |= DP_LINK_TRAIN_PAT_2; 1817 break; 1818 case DP_TRAINING_PATTERN_3: 1819 DRM_ERROR("DP training pattern 3 not supported\n"); 1820 dp_reg_value |= DP_LINK_TRAIN_PAT_2; 1821 break; 1822 } 1823 } 1824 1825 I915_WRITE(intel_dp->output_reg, dp_reg_value); 1826 POSTING_READ(intel_dp->output_reg); 1827 1828 intel_dp_aux_native_write_1(intel_dp, 1829 DP_TRAINING_PATTERN_SET, 1830 dp_train_pat); 1831 1832 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) != 1833 DP_TRAINING_PATTERN_DISABLE) { 1834 ret = intel_dp_aux_native_write(intel_dp, 1835 DP_TRAINING_LANE0_SET, 1836 intel_dp->train_set, 1837 intel_dp->lane_count); 1838 if (ret != intel_dp->lane_count) 1839 return false; 1840 } 1841 1842 return true; 1843} 1844 1845/* Enable corresponding port and start training pattern 1 */ 1846static void 1847intel_dp_start_link_train(struct intel_dp *intel_dp) 1848{ 1849 struct drm_device *dev = intel_dp->base.base.dev; 1850 int i; 1851 uint8_t voltage; 1852 bool clock_recovery = false; 1853 int voltage_tries, loop_tries; 1854 uint32_t DP = intel_dp->DP; 1855 1856 /* Write the link configuration data */ 1857 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, 1858 intel_dp->link_configuration, 1859 DP_LINK_CONFIGURATION_SIZE); 1860 1861 DP |= DP_PORT_EN; 1862 1863 memset(intel_dp->train_set, 0, 4); 1864 voltage = 0xff; 1865 voltage_tries = 0; 1866 loop_tries = 0; 1867 clock_recovery = false; 1868 for (;;) { 1869 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 1870 uint8_t link_status[DP_LINK_STATUS_SIZE]; 1871 uint32_t signal_levels; 1872 1873 if (IS_HASWELL(dev)) { 1874 signal_levels = intel_dp_signal_levels_hsw( 1875 intel_dp->train_set[0]); 1876 DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels; 1877 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { 1878 signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]); 1879 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels; 1880 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { 1881 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); 1882 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1883 } else { 1884 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]); 1885 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1886 } 1887 DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", 1888 signal_levels); 1889 1890 if (!intel_dp_set_link_train(intel_dp, DP, 1891 DP_TRAINING_PATTERN_1 | 1892 DP_LINK_SCRAMBLING_DISABLE)) 1893 break; 1894 /* Set training pattern 1 */ 1895 1896 udelay(100); 1897 if (!intel_dp_get_link_status(intel_dp, link_status)) { 1898 DRM_ERROR("failed to get link status\n"); 1899 break; 1900 } 1901 1902 if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) { 1903 DRM_DEBUG_KMS("clock recovery OK\n"); 1904 clock_recovery = true; 1905 break; 1906 } 1907 1908 /* Check to see if we've tried the max voltage */ 1909 for (i = 0; i < intel_dp->lane_count; i++) 1910 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) 1911 break; 1912 if (i == intel_dp->lane_count && voltage_tries == 5) { 1913 ++loop_tries; 1914 if (loop_tries == 5) { 1915 DRM_DEBUG_KMS("too many full retries, give up\n"); 1916 break; 1917 } 1918 memset(intel_dp->train_set, 0, 4); 1919 voltage_tries = 0; 1920 continue; 1921 } 1922 1923 /* Check to see if we've tried the same voltage 5 times */ 1924 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { 1925 ++voltage_tries; 1926 if (voltage_tries == 5) { 1927 DRM_DEBUG_KMS("too many voltage retries, give up\n"); 1928 break; 1929 } 1930 } else 1931 voltage_tries = 0; 1932 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; 1933 1934 /* Compute new intel_dp->train_set as requested by target */ 1935 intel_get_adjust_train(intel_dp, link_status); 1936 } 1937 1938 intel_dp->DP = DP; 1939} 1940 1941static void 1942intel_dp_complete_link_train(struct intel_dp *intel_dp) 1943{ 1944 struct drm_device *dev = intel_dp->base.base.dev; 1945 bool channel_eq = false; 1946 int tries, cr_tries; 1947 uint32_t DP = intel_dp->DP; 1948 1949 /* channel equalization */ 1950 tries = 0; 1951 cr_tries = 0; 1952 channel_eq = false; 1953 for (;;) { 1954 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 1955 uint32_t signal_levels; 1956 uint8_t link_status[DP_LINK_STATUS_SIZE]; 1957 1958 if (cr_tries > 5) { 1959 DRM_ERROR("failed to train DP, aborting\n"); 1960 intel_dp_link_down(intel_dp); 1961 break; 1962 } 1963 1964 if (IS_HASWELL(dev)) { 1965 signal_levels = intel_dp_signal_levels_hsw(intel_dp->train_set[0]); 1966 DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels; 1967 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { 1968 signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]); 1969 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels; 1970 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { 1971 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); 1972 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1973 } else { 1974 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]); 1975 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1976 } 1977 1978 /* channel eq pattern */ 1979 if (!intel_dp_set_link_train(intel_dp, DP, 1980 DP_TRAINING_PATTERN_2 | 1981 DP_LINK_SCRAMBLING_DISABLE)) 1982 break; 1983 1984 udelay(400); 1985 if (!intel_dp_get_link_status(intel_dp, link_status)) 1986 break; 1987 1988 /* Make sure clock is still ok */ 1989 if (!intel_clock_recovery_ok(link_status, intel_dp->lane_count)) { 1990 intel_dp_start_link_train(intel_dp); 1991 cr_tries++; 1992 continue; 1993 } 1994 1995 if (intel_channel_eq_ok(intel_dp, link_status)) { 1996 channel_eq = true; 1997 break; 1998 } 1999 2000 /* Try 5 times, then try clock recovery if that fails */ 2001 if (tries > 5) { 2002 intel_dp_link_down(intel_dp); 2003 intel_dp_start_link_train(intel_dp); 2004 tries = 0; 2005 cr_tries++; 2006 continue; 2007 } 2008 2009 /* Compute new intel_dp->train_set as requested by target */ 2010 intel_get_adjust_train(intel_dp, link_status); 2011 ++tries; 2012 } 2013 2014 if (channel_eq) 2015 DRM_DEBUG_KMS("Channel EQ done. DP Training successfull\n"); 2016 2017 intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE); 2018} 2019 2020static void 2021intel_dp_link_down(struct intel_dp *intel_dp) 2022{ 2023 struct drm_device *dev = intel_dp->base.base.dev; 2024 struct drm_i915_private *dev_priv = dev->dev_private; 2025 uint32_t DP = intel_dp->DP; 2026 2027 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)) 2028 return; 2029 2030 DRM_DEBUG_KMS("\n"); 2031 2032 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) { 2033 DP &= ~DP_LINK_TRAIN_MASK_CPT; 2034 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); 2035 } else { 2036 DP &= ~DP_LINK_TRAIN_MASK; 2037 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); 2038 } 2039 POSTING_READ(intel_dp->output_reg); 2040 2041 msleep(17); 2042 2043 if (HAS_PCH_IBX(dev) && 2044 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { 2045 struct drm_crtc *crtc = intel_dp->base.base.crtc; 2046 2047 /* Hardware workaround: leaving our transcoder select 2048 * set to transcoder B while it's off will prevent the 2049 * corresponding HDMI output on transcoder A. 2050 * 2051 * Combine this with another hardware workaround: 2052 * transcoder select bit can only be cleared while the 2053 * port is enabled. 2054 */ 2055 DP &= ~DP_PIPEB_SELECT; 2056 I915_WRITE(intel_dp->output_reg, DP); 2057 2058 /* Changes to enable or select take place the vblank 2059 * after being written. 2060 */ 2061 if (crtc == NULL) { 2062 /* We can arrive here never having been attached 2063 * to a CRTC, for instance, due to inheriting 2064 * random state from the BIOS. 2065 * 2066 * If the pipe is not running, play safe and 2067 * wait for the clocks to stabilise before 2068 * continuing. 2069 */ 2070 POSTING_READ(intel_dp->output_reg); 2071 msleep(50); 2072 } else 2073 intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe); 2074 } 2075 2076 DP &= ~DP_AUDIO_OUTPUT_ENABLE; 2077 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); 2078 POSTING_READ(intel_dp->output_reg); 2079 msleep(intel_dp->panel_power_down_delay); 2080} 2081 2082static bool 2083intel_dp_get_dpcd(struct intel_dp *intel_dp) 2084{ 2085 if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd, 2086 sizeof(intel_dp->dpcd)) == 0) 2087 return false; /* aux transfer failed */ 2088 2089 if (intel_dp->dpcd[DP_DPCD_REV] == 0) 2090 return false; /* DPCD not present */ 2091 2092 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 2093 DP_DWN_STRM_PORT_PRESENT)) 2094 return true; /* native DP sink */ 2095 2096 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10) 2097 return true; /* no per-port downstream info */ 2098 2099 if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0, 2100 intel_dp->downstream_ports, 2101 DP_MAX_DOWNSTREAM_PORTS) == 0) 2102 return false; /* downstream port status fetch failed */ 2103 2104 return true; 2105} 2106 2107static void 2108intel_dp_probe_oui(struct intel_dp *intel_dp) 2109{ 2110 u8 buf[3]; 2111 2112 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) 2113 return; 2114 2115 ironlake_edp_panel_vdd_on(intel_dp); 2116 2117 if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3)) 2118 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", 2119 buf[0], buf[1], buf[2]); 2120 2121 if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3)) 2122 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", 2123 buf[0], buf[1], buf[2]); 2124 2125 ironlake_edp_panel_vdd_off(intel_dp, false); 2126} 2127 2128static bool 2129intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector) 2130{ 2131 int ret; 2132 2133 ret = intel_dp_aux_native_read_retry(intel_dp, 2134 DP_DEVICE_SERVICE_IRQ_VECTOR, 2135 sink_irq_vector, 1); 2136 if (!ret) 2137 return false; 2138 2139 return true; 2140} 2141 2142static void 2143intel_dp_handle_test_request(struct intel_dp *intel_dp) 2144{ 2145 /* NAK by default */ 2146 intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_ACK); 2147} 2148 2149/* 2150 * According to DP spec 2151 * 5.1.2: 2152 * 1. Read DPCD 2153 * 2. Configure link according to Receiver Capabilities 2154 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 2155 * 4. Check link status on receipt of hot-plug interrupt 2156 */ 2157 2158static void 2159intel_dp_check_link_status(struct intel_dp *intel_dp) 2160{ 2161 u8 sink_irq_vector; 2162 u8 link_status[DP_LINK_STATUS_SIZE]; 2163 2164 if (!intel_dp->base.connectors_active) 2165 return; 2166 2167 if (WARN_ON(!intel_dp->base.base.crtc)) 2168 return; 2169 2170 /* Try to read receiver status if the link appears to be up */ 2171 if (!intel_dp_get_link_status(intel_dp, link_status)) { 2172 intel_dp_link_down(intel_dp); 2173 return; 2174 } 2175 2176 /* Now read the DPCD to see if it's actually running */ 2177 if (!intel_dp_get_dpcd(intel_dp)) { 2178 intel_dp_link_down(intel_dp); 2179 return; 2180 } 2181 2182 /* Try to read the source of the interrupt */ 2183 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 2184 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) { 2185 /* Clear interrupt source */ 2186 intel_dp_aux_native_write_1(intel_dp, 2187 DP_DEVICE_SERVICE_IRQ_VECTOR, 2188 sink_irq_vector); 2189 2190 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST) 2191 intel_dp_handle_test_request(intel_dp); 2192 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ)) 2193 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); 2194 } 2195 2196 if (!intel_channel_eq_ok(intel_dp, link_status)) { 2197 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", 2198 drm_get_encoder_name(&intel_dp->base.base)); 2199 intel_dp_start_link_train(intel_dp); 2200 intel_dp_complete_link_train(intel_dp); 2201 } 2202} 2203 2204/* XXX this is probably wrong for multiple downstream ports */ 2205static enum drm_connector_status 2206intel_dp_detect_dpcd(struct intel_dp *intel_dp) 2207{ 2208 uint8_t *dpcd = intel_dp->dpcd; 2209 bool hpd; 2210 uint8_t type; 2211 2212 if (!intel_dp_get_dpcd(intel_dp)) 2213 return connector_status_disconnected; 2214 2215 /* if there's no downstream port, we're done */ 2216 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT)) 2217 return connector_status_connected; 2218 2219 /* If we're HPD-aware, SINK_COUNT changes dynamically */ 2220 hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD); 2221 if (hpd) { 2222 uint8_t reg; 2223 if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT, 2224 ®, 1)) 2225 return connector_status_unknown; 2226 return DP_GET_SINK_COUNT(reg) ? connector_status_connected 2227 : connector_status_disconnected; 2228 } 2229 2230 /* If no HPD, poke DDC gently */ 2231 if (drm_probe_ddc(&intel_dp->adapter)) 2232 return connector_status_connected; 2233 2234 /* Well we tried, say unknown for unreliable port types */ 2235 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 2236 if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID) 2237 return connector_status_unknown; 2238 2239 /* Anything else is out of spec, warn and ignore */ 2240 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n"); 2241 return connector_status_disconnected; 2242} 2243 2244static enum drm_connector_status 2245ironlake_dp_detect(struct intel_dp *intel_dp) 2246{ 2247 enum drm_connector_status status; 2248 2249 /* Can't disconnect eDP, but you can close the lid... */ 2250 if (is_edp(intel_dp)) { 2251 status = intel_panel_detect(intel_dp->base.base.dev); 2252 if (status == connector_status_unknown) 2253 status = connector_status_connected; 2254 return status; 2255 } 2256 2257 return intel_dp_detect_dpcd(intel_dp); 2258} 2259 2260static enum drm_connector_status 2261g4x_dp_detect(struct intel_dp *intel_dp) 2262{ 2263 struct drm_device *dev = intel_dp->base.base.dev; 2264 struct drm_i915_private *dev_priv = dev->dev_private; 2265 uint32_t bit; 2266 2267 switch (intel_dp->output_reg) { 2268 case DP_B: 2269 bit = DPB_HOTPLUG_LIVE_STATUS; 2270 break; 2271 case DP_C: 2272 bit = DPC_HOTPLUG_LIVE_STATUS; 2273 break; 2274 case DP_D: 2275 bit = DPD_HOTPLUG_LIVE_STATUS; 2276 break; 2277 default: 2278 return connector_status_unknown; 2279 } 2280 2281 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0) 2282 return connector_status_disconnected; 2283 2284 return intel_dp_detect_dpcd(intel_dp); 2285} 2286 2287static struct edid * 2288intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) 2289{ 2290 struct intel_dp *intel_dp = intel_attached_dp(connector); 2291 struct edid *edid; 2292 int size; 2293 2294 if (is_edp(intel_dp)) { 2295 if (!intel_dp->edid) 2296 return NULL; 2297 2298 size = (intel_dp->edid->extensions + 1) * EDID_LENGTH; 2299 edid = kmalloc(size, GFP_KERNEL); 2300 if (!edid) 2301 return NULL; 2302 2303 memcpy(edid, intel_dp->edid, size); 2304 return edid; 2305 } 2306 2307 edid = drm_get_edid(connector, adapter); 2308 return edid; 2309} 2310 2311static int 2312intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter) 2313{ 2314 struct intel_dp *intel_dp = intel_attached_dp(connector); 2315 int ret; 2316 2317 if (is_edp(intel_dp)) { 2318 drm_mode_connector_update_edid_property(connector, 2319 intel_dp->edid); 2320 ret = drm_add_edid_modes(connector, intel_dp->edid); 2321 drm_edid_to_eld(connector, 2322 intel_dp->edid); 2323 return intel_dp->edid_mode_count; 2324 } 2325 2326 ret = intel_ddc_get_modes(connector, adapter); 2327 return ret; 2328} 2329 2330 2331/** 2332 * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection. 2333 * 2334 * \return true if DP port is connected. 2335 * \return false if DP port is disconnected. 2336 */ 2337static enum drm_connector_status 2338intel_dp_detect(struct drm_connector *connector, bool force) 2339{ 2340 struct intel_dp *intel_dp = intel_attached_dp(connector); 2341 struct drm_device *dev = intel_dp->base.base.dev; 2342 enum drm_connector_status status; 2343 struct edid *edid = NULL; 2344 2345 intel_dp->has_audio = false; 2346 2347 if (HAS_PCH_SPLIT(dev)) 2348 status = ironlake_dp_detect(intel_dp); 2349 else 2350 status = g4x_dp_detect(intel_dp); 2351 2352 DRM_DEBUG_KMS("DPCD: %02hx%02hx%02hx%02hx%02hx%02hx%02hx%02hx\n", 2353 intel_dp->dpcd[0], intel_dp->dpcd[1], intel_dp->dpcd[2], 2354 intel_dp->dpcd[3], intel_dp->dpcd[4], intel_dp->dpcd[5], 2355 intel_dp->dpcd[6], intel_dp->dpcd[7]); 2356 2357 if (status != connector_status_connected) 2358 return status; 2359 2360 intel_dp_probe_oui(intel_dp); 2361 2362 if (intel_dp->force_audio != HDMI_AUDIO_AUTO) { 2363 intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON); 2364 } else { 2365 edid = intel_dp_get_edid(connector, &intel_dp->adapter); 2366 if (edid) { 2367 intel_dp->has_audio = drm_detect_monitor_audio(edid); 2368 kfree(edid); 2369 } 2370 } 2371 2372 return connector_status_connected; 2373} 2374 2375static int intel_dp_get_modes(struct drm_connector *connector) 2376{ 2377 struct intel_dp *intel_dp = intel_attached_dp(connector); 2378 struct drm_device *dev = intel_dp->base.base.dev; 2379 struct drm_i915_private *dev_priv = dev->dev_private; 2380 int ret; 2381 2382 /* We should parse the EDID data and find out if it has an audio sink 2383 */ 2384 2385 ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter); 2386 if (ret) { 2387 if (is_edp(intel_dp) && !intel_dp->panel_fixed_mode) { 2388 struct drm_display_mode *newmode; 2389 list_for_each_entry(newmode, &connector->probed_modes, 2390 head) { 2391 if ((newmode->type & DRM_MODE_TYPE_PREFERRED)) { 2392 intel_dp->panel_fixed_mode = 2393 drm_mode_duplicate(dev, newmode); 2394 break; 2395 } 2396 } 2397 } 2398 return ret; 2399 } 2400 2401 /* if eDP has no EDID, try to use fixed panel mode from VBT */ 2402 if (is_edp(intel_dp)) { 2403 /* initialize panel mode from VBT if available for eDP */ 2404 if (intel_dp->panel_fixed_mode == NULL && dev_priv->lfp_lvds_vbt_mode != NULL) { 2405 intel_dp->panel_fixed_mode = 2406 drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); 2407 if (intel_dp->panel_fixed_mode) { 2408 intel_dp->panel_fixed_mode->type |= 2409 DRM_MODE_TYPE_PREFERRED; 2410 } 2411 } 2412 if (intel_dp->panel_fixed_mode) { 2413 struct drm_display_mode *mode; 2414 mode = drm_mode_duplicate(dev, intel_dp->panel_fixed_mode); 2415 drm_mode_probed_add(connector, mode); 2416 return 1; 2417 } 2418 } 2419 return 0; 2420} 2421 2422static bool 2423intel_dp_detect_audio(struct drm_connector *connector) 2424{ 2425 struct intel_dp *intel_dp = intel_attached_dp(connector); 2426 struct edid *edid; 2427 bool has_audio = false; 2428 2429 edid = intel_dp_get_edid(connector, &intel_dp->adapter); 2430 if (edid) { 2431 has_audio = drm_detect_monitor_audio(edid); 2432 kfree(edid); 2433 } 2434 2435 return has_audio; 2436} 2437 2438static int 2439intel_dp_set_property(struct drm_connector *connector, 2440 struct drm_property *property, 2441 uint64_t val) 2442{ 2443 struct drm_i915_private *dev_priv = connector->dev->dev_private; 2444 struct intel_dp *intel_dp = intel_attached_dp(connector); 2445 int ret; 2446 2447 ret = drm_connector_property_set_value(connector, property, val); 2448 if (ret) 2449 return ret; 2450 2451 if (property == dev_priv->force_audio_property) { 2452 int i = val; 2453 bool has_audio; 2454 2455 if (i == intel_dp->force_audio) 2456 return 0; 2457 2458 intel_dp->force_audio = i; 2459 2460 if (i == HDMI_AUDIO_AUTO) 2461 has_audio = intel_dp_detect_audio(connector); 2462 else 2463 has_audio = (i == HDMI_AUDIO_ON); 2464 2465 if (has_audio == intel_dp->has_audio) 2466 return 0; 2467 2468 intel_dp->has_audio = has_audio; 2469 goto done; 2470 } 2471 2472 if (property == dev_priv->broadcast_rgb_property) { 2473 if (val == !!intel_dp->color_range) 2474 return 0; 2475 2476 intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0; 2477 goto done; 2478 } 2479 2480 return -EINVAL; 2481 2482done: 2483 if (intel_dp->base.base.crtc) { 2484 struct drm_crtc *crtc = intel_dp->base.base.crtc; 2485 intel_set_mode(crtc, &crtc->mode, 2486 crtc->x, crtc->y, crtc->fb); 2487 } 2488 2489 return 0; 2490} 2491 2492static void 2493intel_dp_destroy(struct drm_connector *connector) 2494{ 2495 struct drm_device *dev = connector->dev; 2496 2497 if (intel_dpd_is_edp(dev)) 2498 intel_panel_destroy_backlight(dev); 2499 2500 drm_sysfs_connector_remove(connector); 2501 drm_connector_cleanup(connector); 2502 kfree(connector); 2503} 2504 2505static void intel_dp_encoder_destroy(struct drm_encoder *encoder) 2506{ 2507 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2508 2509 i2c_del_adapter(&intel_dp->adapter); 2510 drm_encoder_cleanup(encoder); 2511 if (is_edp(intel_dp)) { 2512 kfree(intel_dp->edid); 2513 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 2514 ironlake_panel_vdd_off_sync(intel_dp); 2515 } 2516 kfree(intel_dp); 2517} 2518 2519static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { 2520 .mode_fixup = intel_dp_mode_fixup, 2521 .mode_set = intel_dp_mode_set, 2522 .disable = intel_encoder_noop, 2523}; 2524 2525static const struct drm_connector_funcs intel_dp_connector_funcs = { 2526 .dpms = intel_connector_dpms, 2527 .detect = intel_dp_detect, 2528 .fill_modes = drm_helper_probe_single_connector_modes, 2529 .set_property = intel_dp_set_property, 2530 .destroy = intel_dp_destroy, 2531}; 2532 2533static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 2534 .get_modes = intel_dp_get_modes, 2535 .mode_valid = intel_dp_mode_valid, 2536 .best_encoder = intel_best_encoder, 2537}; 2538 2539static const struct drm_encoder_funcs intel_dp_enc_funcs = { 2540 .destroy = intel_dp_encoder_destroy, 2541}; 2542 2543static void 2544intel_dp_hot_plug(struct intel_encoder *intel_encoder) 2545{ 2546 struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); 2547 2548 intel_dp_check_link_status(intel_dp); 2549} 2550 2551/* Return which DP Port should be selected for Transcoder DP control */ 2552int 2553intel_trans_dp_port_sel(struct drm_crtc *crtc) 2554{ 2555 struct drm_device *dev = crtc->dev; 2556 struct intel_encoder *encoder; 2557 2558 for_each_encoder_on_crtc(dev, crtc, encoder) { 2559 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2560 2561 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT || 2562 intel_dp->base.type == INTEL_OUTPUT_EDP) 2563 return intel_dp->output_reg; 2564 } 2565 2566 return -1; 2567} 2568 2569/* check the VBT to see whether the eDP is on DP-D port */ 2570bool intel_dpd_is_edp(struct drm_device *dev) 2571{ 2572 struct drm_i915_private *dev_priv = dev->dev_private; 2573 struct child_device_config *p_child; 2574 int i; 2575 2576 if (!dev_priv->child_dev_num) 2577 return false; 2578 2579 for (i = 0; i < dev_priv->child_dev_num; i++) { 2580 p_child = dev_priv->child_dev + i; 2581 2582 if (p_child->dvo_port == PORT_IDPD && 2583 p_child->device_type == DEVICE_TYPE_eDP) 2584 return true; 2585 } 2586 return false; 2587} 2588 2589static void 2590intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) 2591{ 2592 intel_attach_force_audio_property(connector); 2593 intel_attach_broadcast_rgb_property(connector); 2594} 2595 2596void 2597intel_dp_init(struct drm_device *dev, int output_reg, enum port port) 2598{ 2599 struct drm_i915_private *dev_priv = dev->dev_private; 2600 struct drm_connector *connector; 2601 struct intel_dp *intel_dp; 2602 struct intel_encoder *intel_encoder; 2603 struct intel_connector *intel_connector; 2604 const char *name = NULL; 2605 int type; 2606 2607 intel_dp = kzalloc(sizeof(struct intel_dp), GFP_KERNEL); 2608 if (!intel_dp) 2609 return; 2610 2611 intel_dp->output_reg = output_reg; 2612 intel_dp->port = port; 2613 /* Preserve the current hw state. */ 2614 intel_dp->DP = I915_READ(intel_dp->output_reg); 2615 2616 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 2617 if (!intel_connector) { 2618 kfree(intel_dp); 2619 return; 2620 } 2621 intel_encoder = &intel_dp->base; 2622 2623 if (HAS_PCH_SPLIT(dev) && output_reg == PCH_DP_D) 2624 if (intel_dpd_is_edp(dev)) 2625 intel_dp->is_pch_edp = true; 2626 2627 /* 2628 * FIXME : We need to initialize built-in panels before external panels. 2629 * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup 2630 */ 2631 if (IS_VALLEYVIEW(dev) && output_reg == DP_C) { 2632 type = DRM_MODE_CONNECTOR_eDP; 2633 intel_encoder->type = INTEL_OUTPUT_EDP; 2634 } else if (output_reg == DP_A || is_pch_edp(intel_dp)) { 2635 type = DRM_MODE_CONNECTOR_eDP; 2636 intel_encoder->type = INTEL_OUTPUT_EDP; 2637 } else { 2638 type = DRM_MODE_CONNECTOR_DisplayPort; 2639 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; 2640 } 2641 2642 connector = &intel_connector->base; 2643 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); 2644 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 2645 2646 connector->polled = DRM_CONNECTOR_POLL_HPD; 2647 2648 intel_encoder->cloneable = false; 2649 2650 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, 2651 ironlake_panel_vdd_work); 2652 2653 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); 2654 2655 connector->interlace_allowed = true; 2656 connector->doublescan_allowed = 0; 2657 2658 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, 2659 DRM_MODE_ENCODER_TMDS); 2660 drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs); 2661 2662 intel_connector_attach_encoder(intel_connector, intel_encoder); 2663 drm_sysfs_connector_add(connector); 2664 2665 intel_encoder->enable = intel_enable_dp; 2666 intel_encoder->pre_enable = intel_pre_enable_dp; 2667 intel_encoder->disable = intel_disable_dp; 2668 intel_encoder->post_disable = intel_post_disable_dp; 2669 intel_encoder->get_hw_state = intel_dp_get_hw_state; 2670 intel_connector->get_hw_state = intel_connector_get_hw_state; 2671 2672 /* Set up the DDC bus. */ 2673 switch (port) { 2674 case PORT_A: 2675 name = "DPDDC-A"; 2676 break; 2677 case PORT_B: 2678 dev_priv->hotplug_supported_mask |= DPB_HOTPLUG_INT_STATUS; 2679 name = "DPDDC-B"; 2680 break; 2681 case PORT_C: 2682 dev_priv->hotplug_supported_mask |= DPC_HOTPLUG_INT_STATUS; 2683 name = "DPDDC-C"; 2684 break; 2685 case PORT_D: 2686 dev_priv->hotplug_supported_mask |= DPD_HOTPLUG_INT_STATUS; 2687 name = "DPDDC-D"; 2688 break; 2689 default: 2690 WARN(1, "Invalid port %c\n", port_name(port)); 2691 break; 2692 } 2693 2694 /* Cache some DPCD data in the eDP case */ 2695 if (is_edp(intel_dp)) { 2696 struct edp_power_seq cur, vbt; 2697 u32 pp_on, pp_off, pp_div; 2698 2699 pp_on = I915_READ(PCH_PP_ON_DELAYS); 2700 pp_off = I915_READ(PCH_PP_OFF_DELAYS); 2701 pp_div = I915_READ(PCH_PP_DIVISOR); 2702 2703 if (!pp_on || !pp_off || !pp_div) { 2704 DRM_INFO("bad panel power sequencing delays, disabling panel\n"); 2705 intel_dp_encoder_destroy(&intel_dp->base.base); 2706 intel_dp_destroy(&intel_connector->base); 2707 return; 2708 } 2709 2710 /* Pull timing values out of registers */ 2711 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> 2712 PANEL_POWER_UP_DELAY_SHIFT; 2713 2714 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> 2715 PANEL_LIGHT_ON_DELAY_SHIFT; 2716 2717 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> 2718 PANEL_LIGHT_OFF_DELAY_SHIFT; 2719 2720 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> 2721 PANEL_POWER_DOWN_DELAY_SHIFT; 2722 2723 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> 2724 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000; 2725 2726 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 2727 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12); 2728 2729 vbt = dev_priv->edp.pps; 2730 2731 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 2732 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12); 2733 2734#define get_delay(field) ((max(cur.field, vbt.field) + 9) / 10) 2735 2736 intel_dp->panel_power_up_delay = get_delay(t1_t3); 2737 intel_dp->backlight_on_delay = get_delay(t8); 2738 intel_dp->backlight_off_delay = get_delay(t9); 2739 intel_dp->panel_power_down_delay = get_delay(t10); 2740 intel_dp->panel_power_cycle_delay = get_delay(t11_t12); 2741 2742 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n", 2743 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay, 2744 intel_dp->panel_power_cycle_delay); 2745 2746 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", 2747 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); 2748 } 2749 2750 intel_dp_i2c_init(intel_dp, intel_connector, name); 2751 2752 if (is_edp(intel_dp)) { 2753 bool ret; 2754 struct edid *edid; 2755 2756 ironlake_edp_panel_vdd_on(intel_dp); 2757 ret = intel_dp_get_dpcd(intel_dp); 2758 ironlake_edp_panel_vdd_off(intel_dp, false); 2759 2760 if (ret) { 2761 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) 2762 dev_priv->no_aux_handshake = 2763 intel_dp->dpcd[DP_MAX_DOWNSPREAD] & 2764 DP_NO_AUX_HANDSHAKE_LINK_TRAINING; 2765 } else { 2766 /* if this fails, presume the device is a ghost */ 2767 DRM_INFO("failed to retrieve link info, disabling eDP\n"); 2768 intel_dp_encoder_destroy(&intel_dp->base.base); 2769 intel_dp_destroy(&intel_connector->base); 2770 return; 2771 } 2772 2773 ironlake_edp_panel_vdd_on(intel_dp); 2774 edid = drm_get_edid(connector, &intel_dp->adapter); 2775 if (edid) { 2776 drm_mode_connector_update_edid_property(connector, 2777 edid); 2778 intel_dp->edid_mode_count = 2779 drm_add_edid_modes(connector, edid); 2780 drm_edid_to_eld(connector, edid); 2781 intel_dp->edid = edid; 2782 } 2783 ironlake_edp_panel_vdd_off(intel_dp, false); 2784 } 2785 2786 intel_encoder->hot_plug = intel_dp_hot_plug; 2787 2788 if (is_edp(intel_dp)) { 2789 dev_priv->int_edp_connector = connector; 2790 intel_panel_setup_backlight(dev); 2791 } 2792 2793 intel_dp_add_properties(intel_dp, connector); 2794 2795 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 2796 * 0xd. Failure to do so will result in spurious interrupts being 2797 * generated on the port when a cable is not attached. 2798 */ 2799 if (IS_G4X(dev) && !IS_GM45(dev)) { 2800 u32 temp = I915_READ(PEG_BAND_GAP_DATA); 2801 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); 2802 } 2803} 2804