intel_dp.c revision 232351777cd0fe2341f917d28bf130df2b44bf8a
1/* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Keith Packard <keithp@keithp.com> 25 * 26 */ 27 28#include <linux/i2c.h> 29#include <linux/slab.h> 30#include <linux/export.h> 31#include "drmP.h" 32#include "drm.h" 33#include "drm_crtc.h" 34#include "drm_crtc_helper.h" 35#include "drm_edid.h" 36#include "intel_drv.h" 37#include "i915_drm.h" 38#include "i915_drv.h" 39 40#define DP_RECEIVER_CAP_SIZE 0xf 41#define DP_LINK_STATUS_SIZE 6 42#define DP_LINK_CHECK_TIMEOUT (10 * 1000) 43 44/** 45 * is_edp - is the given port attached to an eDP panel (either CPU or PCH) 46 * @intel_dp: DP struct 47 * 48 * If a CPU or PCH DP output is attached to an eDP panel, this function 49 * will return true, and false otherwise. 50 */ 51static bool is_edp(struct intel_dp *intel_dp) 52{ 53 return intel_dp->base.type == INTEL_OUTPUT_EDP; 54} 55 56/** 57 * is_pch_edp - is the port on the PCH and attached to an eDP panel? 58 * @intel_dp: DP struct 59 * 60 * Returns true if the given DP struct corresponds to a PCH DP port attached 61 * to an eDP panel, false otherwise. Helpful for determining whether we 62 * may need FDI resources for a given DP output or not. 63 */ 64static bool is_pch_edp(struct intel_dp *intel_dp) 65{ 66 return intel_dp->is_pch_edp; 67} 68 69/** 70 * is_cpu_edp - is the port on the CPU and attached to an eDP panel? 71 * @intel_dp: DP struct 72 * 73 * Returns true if the given DP struct corresponds to a CPU eDP port. 74 */ 75static bool is_cpu_edp(struct intel_dp *intel_dp) 76{ 77 return is_edp(intel_dp) && !is_pch_edp(intel_dp); 78} 79 80static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) 81{ 82 return container_of(encoder, struct intel_dp, base.base); 83} 84 85static struct intel_dp *intel_attached_dp(struct drm_connector *connector) 86{ 87 return container_of(intel_attached_encoder(connector), 88 struct intel_dp, base); 89} 90 91/** 92 * intel_encoder_is_pch_edp - is the given encoder a PCH attached eDP? 93 * @encoder: DRM encoder 94 * 95 * Return true if @encoder corresponds to a PCH attached eDP panel. Needed 96 * by intel_display.c. 97 */ 98bool intel_encoder_is_pch_edp(struct drm_encoder *encoder) 99{ 100 struct intel_dp *intel_dp; 101 102 if (!encoder) 103 return false; 104 105 intel_dp = enc_to_intel_dp(encoder); 106 107 return is_pch_edp(intel_dp); 108} 109 110static void intel_dp_start_link_train(struct intel_dp *intel_dp); 111static void intel_dp_complete_link_train(struct intel_dp *intel_dp); 112static void intel_dp_link_down(struct intel_dp *intel_dp); 113 114void 115intel_edp_link_config(struct intel_encoder *intel_encoder, 116 int *lane_num, int *link_bw) 117{ 118 struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); 119 120 *lane_num = intel_dp->lane_count; 121 if (intel_dp->link_bw == DP_LINK_BW_1_62) 122 *link_bw = 162000; 123 else if (intel_dp->link_bw == DP_LINK_BW_2_7) 124 *link_bw = 270000; 125} 126 127int 128intel_edp_target_clock(struct intel_encoder *intel_encoder, 129 struct drm_display_mode *mode) 130{ 131 struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); 132 133 if (intel_dp->panel_fixed_mode) 134 return intel_dp->panel_fixed_mode->clock; 135 else 136 return mode->clock; 137} 138 139static int 140intel_dp_max_lane_count(struct intel_dp *intel_dp) 141{ 142 int max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f; 143 switch (max_lane_count) { 144 case 1: case 2: case 4: 145 break; 146 default: 147 max_lane_count = 4; 148 } 149 return max_lane_count; 150} 151 152static int 153intel_dp_max_link_bw(struct intel_dp *intel_dp) 154{ 155 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; 156 157 switch (max_link_bw) { 158 case DP_LINK_BW_1_62: 159 case DP_LINK_BW_2_7: 160 break; 161 default: 162 max_link_bw = DP_LINK_BW_1_62; 163 break; 164 } 165 return max_link_bw; 166} 167 168static int 169intel_dp_link_clock(uint8_t link_bw) 170{ 171 if (link_bw == DP_LINK_BW_2_7) 172 return 270000; 173 else 174 return 162000; 175} 176 177/* 178 * The units on the numbers in the next two are... bizarre. Examples will 179 * make it clearer; this one parallels an example in the eDP spec. 180 * 181 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as: 182 * 183 * 270000 * 1 * 8 / 10 == 216000 184 * 185 * The actual data capacity of that configuration is 2.16Gbit/s, so the 186 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz - 187 * or equivalently, kilopixels per second - so for 1680x1050R it'd be 188 * 119000. At 18bpp that's 2142000 kilobits per second. 189 * 190 * Thus the strange-looking division by 10 in intel_dp_link_required, to 191 * get the result in decakilobits instead of kilobits. 192 */ 193 194static int 195intel_dp_link_required(int pixel_clock, int bpp) 196{ 197 return (pixel_clock * bpp + 9) / 10; 198} 199 200static int 201intel_dp_max_data_rate(int max_link_clock, int max_lanes) 202{ 203 return (max_link_clock * max_lanes * 8) / 10; 204} 205 206static bool 207intel_dp_adjust_dithering(struct intel_dp *intel_dp, 208 struct drm_display_mode *mode, 209 bool adjust_mode) 210{ 211 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); 212 int max_lanes = intel_dp_max_lane_count(intel_dp); 213 int max_rate, mode_rate; 214 215 mode_rate = intel_dp_link_required(mode->clock, 24); 216 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); 217 218 if (mode_rate > max_rate) { 219 mode_rate = intel_dp_link_required(mode->clock, 18); 220 if (mode_rate > max_rate) 221 return false; 222 223 if (adjust_mode) 224 mode->private_flags 225 |= INTEL_MODE_DP_FORCE_6BPC; 226 227 return true; 228 } 229 230 return true; 231} 232 233static int 234intel_dp_mode_valid(struct drm_connector *connector, 235 struct drm_display_mode *mode) 236{ 237 struct intel_dp *intel_dp = intel_attached_dp(connector); 238 239 if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) { 240 if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay) 241 return MODE_PANEL; 242 243 if (mode->vdisplay > intel_dp->panel_fixed_mode->vdisplay) 244 return MODE_PANEL; 245 } 246 247 if (!intel_dp_adjust_dithering(intel_dp, mode, false)) 248 return MODE_CLOCK_HIGH; 249 250 if (mode->clock < 10000) 251 return MODE_CLOCK_LOW; 252 253 if (mode->flags & DRM_MODE_FLAG_DBLCLK) 254 return MODE_H_ILLEGAL; 255 256 return MODE_OK; 257} 258 259static uint32_t 260pack_aux(uint8_t *src, int src_bytes) 261{ 262 int i; 263 uint32_t v = 0; 264 265 if (src_bytes > 4) 266 src_bytes = 4; 267 for (i = 0; i < src_bytes; i++) 268 v |= ((uint32_t) src[i]) << ((3-i) * 8); 269 return v; 270} 271 272static void 273unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) 274{ 275 int i; 276 if (dst_bytes > 4) 277 dst_bytes = 4; 278 for (i = 0; i < dst_bytes; i++) 279 dst[i] = src >> ((3-i) * 8); 280} 281 282/* hrawclock is 1/4 the FSB frequency */ 283static int 284intel_hrawclk(struct drm_device *dev) 285{ 286 struct drm_i915_private *dev_priv = dev->dev_private; 287 uint32_t clkcfg; 288 289 clkcfg = I915_READ(CLKCFG); 290 switch (clkcfg & CLKCFG_FSB_MASK) { 291 case CLKCFG_FSB_400: 292 return 100; 293 case CLKCFG_FSB_533: 294 return 133; 295 case CLKCFG_FSB_667: 296 return 166; 297 case CLKCFG_FSB_800: 298 return 200; 299 case CLKCFG_FSB_1067: 300 return 266; 301 case CLKCFG_FSB_1333: 302 return 333; 303 /* these two are just a guess; one of them might be right */ 304 case CLKCFG_FSB_1600: 305 case CLKCFG_FSB_1600_ALT: 306 return 400; 307 default: 308 return 133; 309 } 310} 311 312static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp) 313{ 314 struct drm_device *dev = intel_dp->base.base.dev; 315 struct drm_i915_private *dev_priv = dev->dev_private; 316 317 return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0; 318} 319 320static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp) 321{ 322 struct drm_device *dev = intel_dp->base.base.dev; 323 struct drm_i915_private *dev_priv = dev->dev_private; 324 325 return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0; 326} 327 328static void 329intel_dp_check_edp(struct intel_dp *intel_dp) 330{ 331 struct drm_device *dev = intel_dp->base.base.dev; 332 struct drm_i915_private *dev_priv = dev->dev_private; 333 334 if (!is_edp(intel_dp)) 335 return; 336 if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) { 337 WARN(1, "eDP powered off while attempting aux channel communication.\n"); 338 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n", 339 I915_READ(PCH_PP_STATUS), 340 I915_READ(PCH_PP_CONTROL)); 341 } 342} 343 344static int 345intel_dp_aux_ch(struct intel_dp *intel_dp, 346 uint8_t *send, int send_bytes, 347 uint8_t *recv, int recv_size) 348{ 349 uint32_t output_reg = intel_dp->output_reg; 350 struct drm_device *dev = intel_dp->base.base.dev; 351 struct drm_i915_private *dev_priv = dev->dev_private; 352 uint32_t ch_ctl = output_reg + 0x10; 353 uint32_t ch_data = ch_ctl + 4; 354 int i; 355 int recv_bytes; 356 uint32_t status; 357 uint32_t aux_clock_divider; 358 int try, precharge; 359 360 intel_dp_check_edp(intel_dp); 361 /* The clock divider is based off the hrawclk, 362 * and would like to run at 2MHz. So, take the 363 * hrawclk value and divide by 2 and use that 364 * 365 * Note that PCH attached eDP panels should use a 125MHz input 366 * clock divider. 367 */ 368 if (is_cpu_edp(intel_dp)) { 369 if (IS_GEN6(dev) || IS_GEN7(dev)) 370 aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */ 371 else 372 aux_clock_divider = 225; /* eDP input clock at 450Mhz */ 373 } else if (HAS_PCH_SPLIT(dev)) 374 aux_clock_divider = 63; /* IRL input clock fixed at 125Mhz */ 375 else 376 aux_clock_divider = intel_hrawclk(dev) / 2; 377 378 if (IS_GEN6(dev)) 379 precharge = 3; 380 else 381 precharge = 5; 382 383 /* Try to wait for any previous AUX channel activity */ 384 for (try = 0; try < 3; try++) { 385 status = I915_READ(ch_ctl); 386 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 387 break; 388 msleep(1); 389 } 390 391 if (try == 3) { 392 WARN(1, "dp_aux_ch not started status 0x%08x\n", 393 I915_READ(ch_ctl)); 394 return -EBUSY; 395 } 396 397 /* Must try at least 3 times according to DP spec */ 398 for (try = 0; try < 5; try++) { 399 /* Load the send data into the aux channel data registers */ 400 for (i = 0; i < send_bytes; i += 4) 401 I915_WRITE(ch_data + i, 402 pack_aux(send + i, send_bytes - i)); 403 404 /* Send the command and wait for it to complete */ 405 I915_WRITE(ch_ctl, 406 DP_AUX_CH_CTL_SEND_BUSY | 407 DP_AUX_CH_CTL_TIME_OUT_400us | 408 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 409 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 410 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | 411 DP_AUX_CH_CTL_DONE | 412 DP_AUX_CH_CTL_TIME_OUT_ERROR | 413 DP_AUX_CH_CTL_RECEIVE_ERROR); 414 for (;;) { 415 status = I915_READ(ch_ctl); 416 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 417 break; 418 udelay(100); 419 } 420 421 /* Clear done status and any errors */ 422 I915_WRITE(ch_ctl, 423 status | 424 DP_AUX_CH_CTL_DONE | 425 DP_AUX_CH_CTL_TIME_OUT_ERROR | 426 DP_AUX_CH_CTL_RECEIVE_ERROR); 427 428 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR | 429 DP_AUX_CH_CTL_RECEIVE_ERROR)) 430 continue; 431 if (status & DP_AUX_CH_CTL_DONE) 432 break; 433 } 434 435 if ((status & DP_AUX_CH_CTL_DONE) == 0) { 436 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status); 437 return -EBUSY; 438 } 439 440 /* Check for timeout or receive error. 441 * Timeouts occur when the sink is not connected 442 */ 443 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 444 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status); 445 return -EIO; 446 } 447 448 /* Timeouts occur when the device isn't connected, so they're 449 * "normal" -- don't fill the kernel log with these */ 450 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { 451 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status); 452 return -ETIMEDOUT; 453 } 454 455 /* Unload any bytes sent back from the other side */ 456 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> 457 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); 458 if (recv_bytes > recv_size) 459 recv_bytes = recv_size; 460 461 for (i = 0; i < recv_bytes; i += 4) 462 unpack_aux(I915_READ(ch_data + i), 463 recv + i, recv_bytes - i); 464 465 return recv_bytes; 466} 467 468/* Write data to the aux channel in native mode */ 469static int 470intel_dp_aux_native_write(struct intel_dp *intel_dp, 471 uint16_t address, uint8_t *send, int send_bytes) 472{ 473 int ret; 474 uint8_t msg[20]; 475 int msg_bytes; 476 uint8_t ack; 477 478 intel_dp_check_edp(intel_dp); 479 if (send_bytes > 16) 480 return -1; 481 msg[0] = AUX_NATIVE_WRITE << 4; 482 msg[1] = address >> 8; 483 msg[2] = address & 0xff; 484 msg[3] = send_bytes - 1; 485 memcpy(&msg[4], send, send_bytes); 486 msg_bytes = send_bytes + 4; 487 for (;;) { 488 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1); 489 if (ret < 0) 490 return ret; 491 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) 492 break; 493 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) 494 udelay(100); 495 else 496 return -EIO; 497 } 498 return send_bytes; 499} 500 501/* Write a single byte to the aux channel in native mode */ 502static int 503intel_dp_aux_native_write_1(struct intel_dp *intel_dp, 504 uint16_t address, uint8_t byte) 505{ 506 return intel_dp_aux_native_write(intel_dp, address, &byte, 1); 507} 508 509/* read bytes from a native aux channel */ 510static int 511intel_dp_aux_native_read(struct intel_dp *intel_dp, 512 uint16_t address, uint8_t *recv, int recv_bytes) 513{ 514 uint8_t msg[4]; 515 int msg_bytes; 516 uint8_t reply[20]; 517 int reply_bytes; 518 uint8_t ack; 519 int ret; 520 521 intel_dp_check_edp(intel_dp); 522 msg[0] = AUX_NATIVE_READ << 4; 523 msg[1] = address >> 8; 524 msg[2] = address & 0xff; 525 msg[3] = recv_bytes - 1; 526 527 msg_bytes = 4; 528 reply_bytes = recv_bytes + 1; 529 530 for (;;) { 531 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, 532 reply, reply_bytes); 533 if (ret == 0) 534 return -EPROTO; 535 if (ret < 0) 536 return ret; 537 ack = reply[0]; 538 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) { 539 memcpy(recv, reply + 1, ret - 1); 540 return ret - 1; 541 } 542 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) 543 udelay(100); 544 else 545 return -EIO; 546 } 547} 548 549static int 550intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, 551 uint8_t write_byte, uint8_t *read_byte) 552{ 553 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; 554 struct intel_dp *intel_dp = container_of(adapter, 555 struct intel_dp, 556 adapter); 557 uint16_t address = algo_data->address; 558 uint8_t msg[5]; 559 uint8_t reply[2]; 560 unsigned retry; 561 int msg_bytes; 562 int reply_bytes; 563 int ret; 564 565 intel_dp_check_edp(intel_dp); 566 /* Set up the command byte */ 567 if (mode & MODE_I2C_READ) 568 msg[0] = AUX_I2C_READ << 4; 569 else 570 msg[0] = AUX_I2C_WRITE << 4; 571 572 if (!(mode & MODE_I2C_STOP)) 573 msg[0] |= AUX_I2C_MOT << 4; 574 575 msg[1] = address >> 8; 576 msg[2] = address; 577 578 switch (mode) { 579 case MODE_I2C_WRITE: 580 msg[3] = 0; 581 msg[4] = write_byte; 582 msg_bytes = 5; 583 reply_bytes = 1; 584 break; 585 case MODE_I2C_READ: 586 msg[3] = 0; 587 msg_bytes = 4; 588 reply_bytes = 2; 589 break; 590 default: 591 msg_bytes = 3; 592 reply_bytes = 1; 593 break; 594 } 595 596 for (retry = 0; retry < 5; retry++) { 597 ret = intel_dp_aux_ch(intel_dp, 598 msg, msg_bytes, 599 reply, reply_bytes); 600 if (ret < 0) { 601 DRM_DEBUG_KMS("aux_ch failed %d\n", ret); 602 return ret; 603 } 604 605 switch (reply[0] & AUX_NATIVE_REPLY_MASK) { 606 case AUX_NATIVE_REPLY_ACK: 607 /* I2C-over-AUX Reply field is only valid 608 * when paired with AUX ACK. 609 */ 610 break; 611 case AUX_NATIVE_REPLY_NACK: 612 DRM_DEBUG_KMS("aux_ch native nack\n"); 613 return -EREMOTEIO; 614 case AUX_NATIVE_REPLY_DEFER: 615 udelay(100); 616 continue; 617 default: 618 DRM_ERROR("aux_ch invalid native reply 0x%02x\n", 619 reply[0]); 620 return -EREMOTEIO; 621 } 622 623 switch (reply[0] & AUX_I2C_REPLY_MASK) { 624 case AUX_I2C_REPLY_ACK: 625 if (mode == MODE_I2C_READ) { 626 *read_byte = reply[1]; 627 } 628 return reply_bytes - 1; 629 case AUX_I2C_REPLY_NACK: 630 DRM_DEBUG_KMS("aux_i2c nack\n"); 631 return -EREMOTEIO; 632 case AUX_I2C_REPLY_DEFER: 633 DRM_DEBUG_KMS("aux_i2c defer\n"); 634 udelay(100); 635 break; 636 default: 637 DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]); 638 return -EREMOTEIO; 639 } 640 } 641 642 DRM_ERROR("too many retries, giving up\n"); 643 return -EREMOTEIO; 644} 645 646static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp); 647static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); 648 649static int 650intel_dp_i2c_init(struct intel_dp *intel_dp, 651 struct intel_connector *intel_connector, const char *name) 652{ 653 int ret; 654 655 DRM_DEBUG_KMS("i2c_init %s\n", name); 656 intel_dp->algo.running = false; 657 intel_dp->algo.address = 0; 658 intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch; 659 660 memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter)); 661 intel_dp->adapter.owner = THIS_MODULE; 662 intel_dp->adapter.class = I2C_CLASS_DDC; 663 strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1); 664 intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0'; 665 intel_dp->adapter.algo_data = &intel_dp->algo; 666 intel_dp->adapter.dev.parent = &intel_connector->base.kdev; 667 668 ironlake_edp_panel_vdd_on(intel_dp); 669 ret = i2c_dp_aux_add_bus(&intel_dp->adapter); 670 ironlake_edp_panel_vdd_off(intel_dp, false); 671 return ret; 672} 673 674static bool 675intel_dp_mode_fixup(struct drm_encoder *encoder, 676 const struct drm_display_mode *mode, 677 struct drm_display_mode *adjusted_mode) 678{ 679 struct drm_device *dev = encoder->dev; 680 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 681 int lane_count, clock; 682 int max_lane_count = intel_dp_max_lane_count(intel_dp); 683 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; 684 int bpp, mode_rate; 685 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; 686 687 if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) { 688 intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode); 689 intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN, 690 mode, adjusted_mode); 691 } 692 693 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 694 return false; 695 696 DRM_DEBUG_KMS("DP link computation with max lane count %i " 697 "max bw %02x pixel clock %iKHz\n", 698 max_lane_count, bws[max_clock], adjusted_mode->clock); 699 700 if (!intel_dp_adjust_dithering(intel_dp, adjusted_mode, true)) 701 return false; 702 703 bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24; 704 mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp); 705 706 for (clock = 0; clock <= max_clock; clock++) { 707 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { 708 int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); 709 710 if (mode_rate <= link_avail) { 711 intel_dp->link_bw = bws[clock]; 712 intel_dp->lane_count = lane_count; 713 adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw); 714 DRM_DEBUG_KMS("DP link bw %02x lane " 715 "count %d clock %d bpp %d\n", 716 intel_dp->link_bw, intel_dp->lane_count, 717 adjusted_mode->clock, bpp); 718 DRM_DEBUG_KMS("DP link bw required %i available %i\n", 719 mode_rate, link_avail); 720 return true; 721 } 722 } 723 } 724 725 return false; 726} 727 728struct intel_dp_m_n { 729 uint32_t tu; 730 uint32_t gmch_m; 731 uint32_t gmch_n; 732 uint32_t link_m; 733 uint32_t link_n; 734}; 735 736static void 737intel_reduce_ratio(uint32_t *num, uint32_t *den) 738{ 739 while (*num > 0xffffff || *den > 0xffffff) { 740 *num >>= 1; 741 *den >>= 1; 742 } 743} 744 745static void 746intel_dp_compute_m_n(int bpp, 747 int nlanes, 748 int pixel_clock, 749 int link_clock, 750 struct intel_dp_m_n *m_n) 751{ 752 m_n->tu = 64; 753 m_n->gmch_m = (pixel_clock * bpp) >> 3; 754 m_n->gmch_n = link_clock * nlanes; 755 intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); 756 m_n->link_m = pixel_clock; 757 m_n->link_n = link_clock; 758 intel_reduce_ratio(&m_n->link_m, &m_n->link_n); 759} 760 761void 762intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, 763 struct drm_display_mode *adjusted_mode) 764{ 765 struct drm_device *dev = crtc->dev; 766 struct intel_encoder *encoder; 767 struct drm_i915_private *dev_priv = dev->dev_private; 768 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 769 int lane_count = 4; 770 struct intel_dp_m_n m_n; 771 int pipe = intel_crtc->pipe; 772 773 /* 774 * Find the lane count in the intel_encoder private 775 */ 776 for_each_encoder_on_crtc(dev, crtc, encoder) { 777 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 778 779 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT || 780 intel_dp->base.type == INTEL_OUTPUT_EDP) 781 { 782 lane_count = intel_dp->lane_count; 783 break; 784 } 785 } 786 787 /* 788 * Compute the GMCH and Link ratios. The '3' here is 789 * the number of bytes_per_pixel post-LUT, which we always 790 * set up for 8-bits of R/G/B, or 3 bytes total. 791 */ 792 intel_dp_compute_m_n(intel_crtc->bpp, lane_count, 793 mode->clock, adjusted_mode->clock, &m_n); 794 795 if (HAS_PCH_SPLIT(dev)) { 796 I915_WRITE(TRANSDATA_M1(pipe), 797 ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | 798 m_n.gmch_m); 799 I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n); 800 I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m); 801 I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n); 802 } else { 803 I915_WRITE(PIPE_GMCH_DATA_M(pipe), 804 ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | 805 m_n.gmch_m); 806 I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n); 807 I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m); 808 I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n); 809 } 810} 811 812static void 813intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, 814 struct drm_display_mode *adjusted_mode) 815{ 816 struct drm_device *dev = encoder->dev; 817 struct drm_i915_private *dev_priv = dev->dev_private; 818 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 819 struct drm_crtc *crtc = intel_dp->base.base.crtc; 820 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 821 822 /* 823 * There are four kinds of DP registers: 824 * 825 * IBX PCH 826 * SNB CPU 827 * IVB CPU 828 * CPT PCH 829 * 830 * IBX PCH and CPU are the same for almost everything, 831 * except that the CPU DP PLL is configured in this 832 * register 833 * 834 * CPT PCH is quite different, having many bits moved 835 * to the TRANS_DP_CTL register instead. That 836 * configuration happens (oddly) in ironlake_pch_enable 837 */ 838 839 /* Preserve the BIOS-computed detected bit. This is 840 * supposed to be read-only. 841 */ 842 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED; 843 844 /* Handle DP bits in common between all three register formats */ 845 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 846 847 switch (intel_dp->lane_count) { 848 case 1: 849 intel_dp->DP |= DP_PORT_WIDTH_1; 850 break; 851 case 2: 852 intel_dp->DP |= DP_PORT_WIDTH_2; 853 break; 854 case 4: 855 intel_dp->DP |= DP_PORT_WIDTH_4; 856 break; 857 } 858 if (intel_dp->has_audio) { 859 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n", 860 pipe_name(intel_crtc->pipe)); 861 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 862 intel_write_eld(encoder, adjusted_mode); 863 } 864 memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); 865 intel_dp->link_configuration[0] = intel_dp->link_bw; 866 intel_dp->link_configuration[1] = intel_dp->lane_count; 867 intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B; 868 /* 869 * Check for DPCD version > 1.1 and enhanced framing support 870 */ 871 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 872 (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) { 873 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; 874 } 875 876 /* Split out the IBX/CPU vs CPT settings */ 877 878 if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) { 879 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 880 intel_dp->DP |= DP_SYNC_HS_HIGH; 881 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 882 intel_dp->DP |= DP_SYNC_VS_HIGH; 883 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 884 885 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) 886 intel_dp->DP |= DP_ENHANCED_FRAMING; 887 888 intel_dp->DP |= intel_crtc->pipe << 29; 889 890 /* don't miss out required setting for eDP */ 891 if (adjusted_mode->clock < 200000) 892 intel_dp->DP |= DP_PLL_FREQ_160MHZ; 893 else 894 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 895 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { 896 intel_dp->DP |= intel_dp->color_range; 897 898 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 899 intel_dp->DP |= DP_SYNC_HS_HIGH; 900 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 901 intel_dp->DP |= DP_SYNC_VS_HIGH; 902 intel_dp->DP |= DP_LINK_TRAIN_OFF; 903 904 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) 905 intel_dp->DP |= DP_ENHANCED_FRAMING; 906 907 if (intel_crtc->pipe == 1) 908 intel_dp->DP |= DP_PIPEB_SELECT; 909 910 if (is_cpu_edp(intel_dp)) { 911 /* don't miss out required setting for eDP */ 912 if (adjusted_mode->clock < 200000) 913 intel_dp->DP |= DP_PLL_FREQ_160MHZ; 914 else 915 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 916 } 917 } else { 918 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 919 } 920} 921 922#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 923#define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) 924 925#define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 926#define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 927 928#define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) 929#define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 930 931static void ironlake_wait_panel_status(struct intel_dp *intel_dp, 932 u32 mask, 933 u32 value) 934{ 935 struct drm_device *dev = intel_dp->base.base.dev; 936 struct drm_i915_private *dev_priv = dev->dev_private; 937 938 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n", 939 mask, value, 940 I915_READ(PCH_PP_STATUS), 941 I915_READ(PCH_PP_CONTROL)); 942 943 if (_wait_for((I915_READ(PCH_PP_STATUS) & mask) == value, 5000, 10)) { 944 DRM_ERROR("Panel status timeout: status %08x control %08x\n", 945 I915_READ(PCH_PP_STATUS), 946 I915_READ(PCH_PP_CONTROL)); 947 } 948} 949 950static void ironlake_wait_panel_on(struct intel_dp *intel_dp) 951{ 952 DRM_DEBUG_KMS("Wait for panel power on\n"); 953 ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); 954} 955 956static void ironlake_wait_panel_off(struct intel_dp *intel_dp) 957{ 958 DRM_DEBUG_KMS("Wait for panel power off time\n"); 959 ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); 960} 961 962static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp) 963{ 964 DRM_DEBUG_KMS("Wait for panel power cycle\n"); 965 ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); 966} 967 968 969/* Read the current pp_control value, unlocking the register if it 970 * is locked 971 */ 972 973static u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv) 974{ 975 u32 control = I915_READ(PCH_PP_CONTROL); 976 977 control &= ~PANEL_UNLOCK_MASK; 978 control |= PANEL_UNLOCK_REGS; 979 return control; 980} 981 982static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) 983{ 984 struct drm_device *dev = intel_dp->base.base.dev; 985 struct drm_i915_private *dev_priv = dev->dev_private; 986 u32 pp; 987 988 if (!is_edp(intel_dp)) 989 return; 990 DRM_DEBUG_KMS("Turn eDP VDD on\n"); 991 992 WARN(intel_dp->want_panel_vdd, 993 "eDP VDD already requested on\n"); 994 995 intel_dp->want_panel_vdd = true; 996 997 if (ironlake_edp_have_panel_vdd(intel_dp)) { 998 DRM_DEBUG_KMS("eDP VDD already on\n"); 999 return; 1000 } 1001 1002 if (!ironlake_edp_have_panel_power(intel_dp)) 1003 ironlake_wait_panel_power_cycle(intel_dp); 1004 1005 pp = ironlake_get_pp_control(dev_priv); 1006 pp |= EDP_FORCE_VDD; 1007 I915_WRITE(PCH_PP_CONTROL, pp); 1008 POSTING_READ(PCH_PP_CONTROL); 1009 DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n", 1010 I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL)); 1011 1012 /* 1013 * If the panel wasn't on, delay before accessing aux channel 1014 */ 1015 if (!ironlake_edp_have_panel_power(intel_dp)) { 1016 DRM_DEBUG_KMS("eDP was not running\n"); 1017 msleep(intel_dp->panel_power_up_delay); 1018 } 1019} 1020 1021static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp) 1022{ 1023 struct drm_device *dev = intel_dp->base.base.dev; 1024 struct drm_i915_private *dev_priv = dev->dev_private; 1025 u32 pp; 1026 1027 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) { 1028 pp = ironlake_get_pp_control(dev_priv); 1029 pp &= ~EDP_FORCE_VDD; 1030 I915_WRITE(PCH_PP_CONTROL, pp); 1031 POSTING_READ(PCH_PP_CONTROL); 1032 1033 /* Make sure sequencer is idle before allowing subsequent activity */ 1034 DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n", 1035 I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL)); 1036 1037 msleep(intel_dp->panel_power_down_delay); 1038 } 1039} 1040 1041static void ironlake_panel_vdd_work(struct work_struct *__work) 1042{ 1043 struct intel_dp *intel_dp = container_of(to_delayed_work(__work), 1044 struct intel_dp, panel_vdd_work); 1045 struct drm_device *dev = intel_dp->base.base.dev; 1046 1047 mutex_lock(&dev->mode_config.mutex); 1048 ironlake_panel_vdd_off_sync(intel_dp); 1049 mutex_unlock(&dev->mode_config.mutex); 1050} 1051 1052static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) 1053{ 1054 if (!is_edp(intel_dp)) 1055 return; 1056 1057 DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd); 1058 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on"); 1059 1060 intel_dp->want_panel_vdd = false; 1061 1062 if (sync) { 1063 ironlake_panel_vdd_off_sync(intel_dp); 1064 } else { 1065 /* 1066 * Queue the timer to fire a long 1067 * time from now (relative to the power down delay) 1068 * to keep the panel power up across a sequence of operations 1069 */ 1070 schedule_delayed_work(&intel_dp->panel_vdd_work, 1071 msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5)); 1072 } 1073} 1074 1075static void ironlake_edp_panel_on(struct intel_dp *intel_dp) 1076{ 1077 struct drm_device *dev = intel_dp->base.base.dev; 1078 struct drm_i915_private *dev_priv = dev->dev_private; 1079 u32 pp; 1080 1081 if (!is_edp(intel_dp)) 1082 return; 1083 1084 DRM_DEBUG_KMS("Turn eDP power on\n"); 1085 1086 if (ironlake_edp_have_panel_power(intel_dp)) { 1087 DRM_DEBUG_KMS("eDP power already on\n"); 1088 return; 1089 } 1090 1091 ironlake_wait_panel_power_cycle(intel_dp); 1092 1093 pp = ironlake_get_pp_control(dev_priv); 1094 if (IS_GEN5(dev)) { 1095 /* ILK workaround: disable reset around power sequence */ 1096 pp &= ~PANEL_POWER_RESET; 1097 I915_WRITE(PCH_PP_CONTROL, pp); 1098 POSTING_READ(PCH_PP_CONTROL); 1099 } 1100 1101 pp |= POWER_TARGET_ON; 1102 if (!IS_GEN5(dev)) 1103 pp |= PANEL_POWER_RESET; 1104 1105 I915_WRITE(PCH_PP_CONTROL, pp); 1106 POSTING_READ(PCH_PP_CONTROL); 1107 1108 ironlake_wait_panel_on(intel_dp); 1109 1110 if (IS_GEN5(dev)) { 1111 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 1112 I915_WRITE(PCH_PP_CONTROL, pp); 1113 POSTING_READ(PCH_PP_CONTROL); 1114 } 1115} 1116 1117static void ironlake_edp_panel_off(struct intel_dp *intel_dp) 1118{ 1119 struct drm_device *dev = intel_dp->base.base.dev; 1120 struct drm_i915_private *dev_priv = dev->dev_private; 1121 u32 pp; 1122 1123 if (!is_edp(intel_dp)) 1124 return; 1125 1126 DRM_DEBUG_KMS("Turn eDP power off\n"); 1127 1128 WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n"); 1129 1130 pp = ironlake_get_pp_control(dev_priv); 1131 /* We need to switch off panel power _and_ force vdd, for otherwise some 1132 * panels get very unhappy and cease to work. */ 1133 pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE); 1134 I915_WRITE(PCH_PP_CONTROL, pp); 1135 POSTING_READ(PCH_PP_CONTROL); 1136 1137 intel_dp->want_panel_vdd = false; 1138 1139 ironlake_wait_panel_off(intel_dp); 1140} 1141 1142static void ironlake_edp_backlight_on(struct intel_dp *intel_dp) 1143{ 1144 struct drm_device *dev = intel_dp->base.base.dev; 1145 struct drm_i915_private *dev_priv = dev->dev_private; 1146 u32 pp; 1147 1148 if (!is_edp(intel_dp)) 1149 return; 1150 1151 DRM_DEBUG_KMS("\n"); 1152 /* 1153 * If we enable the backlight right away following a panel power 1154 * on, we may see slight flicker as the panel syncs with the eDP 1155 * link. So delay a bit to make sure the image is solid before 1156 * allowing it to appear. 1157 */ 1158 msleep(intel_dp->backlight_on_delay); 1159 pp = ironlake_get_pp_control(dev_priv); 1160 pp |= EDP_BLC_ENABLE; 1161 I915_WRITE(PCH_PP_CONTROL, pp); 1162 POSTING_READ(PCH_PP_CONTROL); 1163} 1164 1165static void ironlake_edp_backlight_off(struct intel_dp *intel_dp) 1166{ 1167 struct drm_device *dev = intel_dp->base.base.dev; 1168 struct drm_i915_private *dev_priv = dev->dev_private; 1169 u32 pp; 1170 1171 if (!is_edp(intel_dp)) 1172 return; 1173 1174 DRM_DEBUG_KMS("\n"); 1175 pp = ironlake_get_pp_control(dev_priv); 1176 pp &= ~EDP_BLC_ENABLE; 1177 I915_WRITE(PCH_PP_CONTROL, pp); 1178 POSTING_READ(PCH_PP_CONTROL); 1179 msleep(intel_dp->backlight_off_delay); 1180} 1181 1182static void ironlake_edp_pll_on(struct intel_dp *intel_dp) 1183{ 1184 struct drm_device *dev = intel_dp->base.base.dev; 1185 struct drm_crtc *crtc = intel_dp->base.base.crtc; 1186 struct drm_i915_private *dev_priv = dev->dev_private; 1187 u32 dpa_ctl; 1188 1189 assert_pipe_disabled(dev_priv, 1190 to_intel_crtc(crtc)->pipe); 1191 1192 DRM_DEBUG_KMS("\n"); 1193 dpa_ctl = I915_READ(DP_A); 1194 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n"); 1195 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n"); 1196 1197 /* We don't adjust intel_dp->DP while tearing down the link, to 1198 * facilitate link retraining (e.g. after hotplug). Hence clear all 1199 * enable bits here to ensure that we don't enable too much. */ 1200 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE); 1201 intel_dp->DP |= DP_PLL_ENABLE; 1202 I915_WRITE(DP_A, intel_dp->DP); 1203 POSTING_READ(DP_A); 1204 udelay(200); 1205} 1206 1207static void ironlake_edp_pll_off(struct intel_dp *intel_dp) 1208{ 1209 struct drm_device *dev = intel_dp->base.base.dev; 1210 struct drm_crtc *crtc = intel_dp->base.base.crtc; 1211 struct drm_i915_private *dev_priv = dev->dev_private; 1212 u32 dpa_ctl; 1213 1214 assert_pipe_disabled(dev_priv, 1215 to_intel_crtc(crtc)->pipe); 1216 1217 dpa_ctl = I915_READ(DP_A); 1218 WARN((dpa_ctl & DP_PLL_ENABLE) == 0, 1219 "dp pll off, should be on\n"); 1220 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n"); 1221 1222 /* We can't rely on the value tracked for the DP register in 1223 * intel_dp->DP because link_down must not change that (otherwise link 1224 * re-training will fail. */ 1225 dpa_ctl &= ~DP_PLL_ENABLE; 1226 I915_WRITE(DP_A, dpa_ctl); 1227 POSTING_READ(DP_A); 1228 udelay(200); 1229} 1230 1231/* If the sink supports it, try to set the power state appropriately */ 1232static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) 1233{ 1234 int ret, i; 1235 1236 /* Should have a valid DPCD by this point */ 1237 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 1238 return; 1239 1240 if (mode != DRM_MODE_DPMS_ON) { 1241 ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER, 1242 DP_SET_POWER_D3); 1243 if (ret != 1) 1244 DRM_DEBUG_DRIVER("failed to write sink power state\n"); 1245 } else { 1246 /* 1247 * When turning on, we need to retry for 1ms to give the sink 1248 * time to wake up. 1249 */ 1250 for (i = 0; i < 3; i++) { 1251 ret = intel_dp_aux_native_write_1(intel_dp, 1252 DP_SET_POWER, 1253 DP_SET_POWER_D0); 1254 if (ret == 1) 1255 break; 1256 msleep(1); 1257 } 1258 } 1259} 1260 1261static bool intel_dp_get_hw_state(struct intel_encoder *encoder, 1262 enum pipe *pipe) 1263{ 1264 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1265 struct drm_device *dev = encoder->base.dev; 1266 struct drm_i915_private *dev_priv = dev->dev_private; 1267 u32 tmp = I915_READ(intel_dp->output_reg); 1268 1269 if (!(tmp & DP_PORT_EN)) 1270 return false; 1271 1272 if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) { 1273 *pipe = PORT_TO_PIPE_CPT(tmp); 1274 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { 1275 *pipe = PORT_TO_PIPE(tmp); 1276 } else { 1277 u32 trans_sel; 1278 u32 trans_dp; 1279 int i; 1280 1281 switch (intel_dp->output_reg) { 1282 case PCH_DP_B: 1283 trans_sel = TRANS_DP_PORT_SEL_B; 1284 break; 1285 case PCH_DP_C: 1286 trans_sel = TRANS_DP_PORT_SEL_C; 1287 break; 1288 case PCH_DP_D: 1289 trans_sel = TRANS_DP_PORT_SEL_D; 1290 break; 1291 default: 1292 return true; 1293 } 1294 1295 for_each_pipe(i) { 1296 trans_dp = I915_READ(TRANS_DP_CTL(i)); 1297 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) { 1298 *pipe = i; 1299 return true; 1300 } 1301 } 1302 } 1303 1304 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", intel_dp->output_reg); 1305 1306 return true; 1307} 1308 1309static void intel_disable_dp(struct intel_encoder *encoder) 1310{ 1311 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1312 1313 /* Make sure the panel is off before trying to change the mode. But also 1314 * ensure that we have vdd while we switch off the panel. */ 1315 ironlake_edp_panel_vdd_on(intel_dp); 1316 ironlake_edp_backlight_off(intel_dp); 1317 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1318 ironlake_edp_panel_off(intel_dp); 1319 1320 /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */ 1321 if (!is_cpu_edp(intel_dp)) 1322 intel_dp_link_down(intel_dp); 1323} 1324 1325static void intel_post_disable_dp(struct intel_encoder *encoder) 1326{ 1327 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1328 1329 if (is_cpu_edp(intel_dp)) { 1330 intel_dp_link_down(intel_dp); 1331 ironlake_edp_pll_off(intel_dp); 1332 } 1333} 1334 1335static void intel_enable_dp(struct intel_encoder *encoder) 1336{ 1337 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1338 struct drm_device *dev = encoder->base.dev; 1339 struct drm_i915_private *dev_priv = dev->dev_private; 1340 uint32_t dp_reg = I915_READ(intel_dp->output_reg); 1341 1342 if (WARN_ON(dp_reg & DP_PORT_EN)) 1343 return; 1344 1345 ironlake_edp_panel_vdd_on(intel_dp); 1346 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1347 intel_dp_start_link_train(intel_dp); 1348 ironlake_edp_panel_on(intel_dp); 1349 ironlake_edp_panel_vdd_off(intel_dp, true); 1350 intel_dp_complete_link_train(intel_dp); 1351 ironlake_edp_backlight_on(intel_dp); 1352} 1353 1354static void intel_pre_enable_dp(struct intel_encoder *encoder) 1355{ 1356 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1357 1358 if (is_cpu_edp(intel_dp)) 1359 ironlake_edp_pll_on(intel_dp); 1360} 1361 1362/* 1363 * Native read with retry for link status and receiver capability reads for 1364 * cases where the sink may still be asleep. 1365 */ 1366static bool 1367intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address, 1368 uint8_t *recv, int recv_bytes) 1369{ 1370 int ret, i; 1371 1372 /* 1373 * Sinks are *supposed* to come up within 1ms from an off state, 1374 * but we're also supposed to retry 3 times per the spec. 1375 */ 1376 for (i = 0; i < 3; i++) { 1377 ret = intel_dp_aux_native_read(intel_dp, address, recv, 1378 recv_bytes); 1379 if (ret == recv_bytes) 1380 return true; 1381 msleep(1); 1382 } 1383 1384 return false; 1385} 1386 1387/* 1388 * Fetch AUX CH registers 0x202 - 0x207 which contain 1389 * link status information 1390 */ 1391static bool 1392intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 1393{ 1394 return intel_dp_aux_native_read_retry(intel_dp, 1395 DP_LANE0_1_STATUS, 1396 link_status, 1397 DP_LINK_STATUS_SIZE); 1398} 1399 1400static uint8_t 1401intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE], 1402 int r) 1403{ 1404 return link_status[r - DP_LANE0_1_STATUS]; 1405} 1406 1407static uint8_t 1408intel_get_adjust_request_voltage(uint8_t adjust_request[2], 1409 int lane) 1410{ 1411 int s = ((lane & 1) ? 1412 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : 1413 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT); 1414 uint8_t l = adjust_request[lane>>1]; 1415 1416 return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; 1417} 1418 1419static uint8_t 1420intel_get_adjust_request_pre_emphasis(uint8_t adjust_request[2], 1421 int lane) 1422{ 1423 int s = ((lane & 1) ? 1424 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT : 1425 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT); 1426 uint8_t l = adjust_request[lane>>1]; 1427 1428 return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; 1429} 1430 1431 1432#if 0 1433static char *voltage_names[] = { 1434 "0.4V", "0.6V", "0.8V", "1.2V" 1435}; 1436static char *pre_emph_names[] = { 1437 "0dB", "3.5dB", "6dB", "9.5dB" 1438}; 1439static char *link_train_names[] = { 1440 "pattern 1", "pattern 2", "idle", "off" 1441}; 1442#endif 1443 1444/* 1445 * These are source-specific values; current Intel hardware supports 1446 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB 1447 */ 1448 1449static uint8_t 1450intel_dp_voltage_max(struct intel_dp *intel_dp) 1451{ 1452 struct drm_device *dev = intel_dp->base.base.dev; 1453 1454 if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) 1455 return DP_TRAIN_VOLTAGE_SWING_800; 1456 else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) 1457 return DP_TRAIN_VOLTAGE_SWING_1200; 1458 else 1459 return DP_TRAIN_VOLTAGE_SWING_800; 1460} 1461 1462static uint8_t 1463intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) 1464{ 1465 struct drm_device *dev = intel_dp->base.base.dev; 1466 1467 if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) { 1468 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1469 case DP_TRAIN_VOLTAGE_SWING_400: 1470 return DP_TRAIN_PRE_EMPHASIS_6; 1471 case DP_TRAIN_VOLTAGE_SWING_600: 1472 case DP_TRAIN_VOLTAGE_SWING_800: 1473 return DP_TRAIN_PRE_EMPHASIS_3_5; 1474 default: 1475 return DP_TRAIN_PRE_EMPHASIS_0; 1476 } 1477 } else { 1478 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1479 case DP_TRAIN_VOLTAGE_SWING_400: 1480 return DP_TRAIN_PRE_EMPHASIS_6; 1481 case DP_TRAIN_VOLTAGE_SWING_600: 1482 return DP_TRAIN_PRE_EMPHASIS_6; 1483 case DP_TRAIN_VOLTAGE_SWING_800: 1484 return DP_TRAIN_PRE_EMPHASIS_3_5; 1485 case DP_TRAIN_VOLTAGE_SWING_1200: 1486 default: 1487 return DP_TRAIN_PRE_EMPHASIS_0; 1488 } 1489 } 1490} 1491 1492static void 1493intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 1494{ 1495 uint8_t v = 0; 1496 uint8_t p = 0; 1497 int lane; 1498 uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS); 1499 uint8_t voltage_max; 1500 uint8_t preemph_max; 1501 1502 for (lane = 0; lane < intel_dp->lane_count; lane++) { 1503 uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane); 1504 uint8_t this_p = intel_get_adjust_request_pre_emphasis(adjust_request, lane); 1505 1506 if (this_v > v) 1507 v = this_v; 1508 if (this_p > p) 1509 p = this_p; 1510 } 1511 1512 voltage_max = intel_dp_voltage_max(intel_dp); 1513 if (v >= voltage_max) 1514 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; 1515 1516 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v); 1517 if (p >= preemph_max) 1518 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; 1519 1520 for (lane = 0; lane < 4; lane++) 1521 intel_dp->train_set[lane] = v | p; 1522} 1523 1524static uint32_t 1525intel_dp_signal_levels(uint8_t train_set) 1526{ 1527 uint32_t signal_levels = 0; 1528 1529 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 1530 case DP_TRAIN_VOLTAGE_SWING_400: 1531 default: 1532 signal_levels |= DP_VOLTAGE_0_4; 1533 break; 1534 case DP_TRAIN_VOLTAGE_SWING_600: 1535 signal_levels |= DP_VOLTAGE_0_6; 1536 break; 1537 case DP_TRAIN_VOLTAGE_SWING_800: 1538 signal_levels |= DP_VOLTAGE_0_8; 1539 break; 1540 case DP_TRAIN_VOLTAGE_SWING_1200: 1541 signal_levels |= DP_VOLTAGE_1_2; 1542 break; 1543 } 1544 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 1545 case DP_TRAIN_PRE_EMPHASIS_0: 1546 default: 1547 signal_levels |= DP_PRE_EMPHASIS_0; 1548 break; 1549 case DP_TRAIN_PRE_EMPHASIS_3_5: 1550 signal_levels |= DP_PRE_EMPHASIS_3_5; 1551 break; 1552 case DP_TRAIN_PRE_EMPHASIS_6: 1553 signal_levels |= DP_PRE_EMPHASIS_6; 1554 break; 1555 case DP_TRAIN_PRE_EMPHASIS_9_5: 1556 signal_levels |= DP_PRE_EMPHASIS_9_5; 1557 break; 1558 } 1559 return signal_levels; 1560} 1561 1562/* Gen6's DP voltage swing and pre-emphasis control */ 1563static uint32_t 1564intel_gen6_edp_signal_levels(uint8_t train_set) 1565{ 1566 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 1567 DP_TRAIN_PRE_EMPHASIS_MASK); 1568 switch (signal_levels) { 1569 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 1570 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 1571 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 1572 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 1573 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B; 1574 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 1575 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: 1576 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B; 1577 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 1578 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 1579 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B; 1580 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 1581 case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0: 1582 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B; 1583 default: 1584 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 1585 "0x%x\n", signal_levels); 1586 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 1587 } 1588} 1589 1590/* Gen7's DP voltage swing and pre-emphasis control */ 1591static uint32_t 1592intel_gen7_edp_signal_levels(uint8_t train_set) 1593{ 1594 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 1595 DP_TRAIN_PRE_EMPHASIS_MASK); 1596 switch (signal_levels) { 1597 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 1598 return EDP_LINK_TRAIN_400MV_0DB_IVB; 1599 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 1600 return EDP_LINK_TRAIN_400MV_3_5DB_IVB; 1601 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 1602 return EDP_LINK_TRAIN_400MV_6DB_IVB; 1603 1604 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 1605 return EDP_LINK_TRAIN_600MV_0DB_IVB; 1606 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 1607 return EDP_LINK_TRAIN_600MV_3_5DB_IVB; 1608 1609 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 1610 return EDP_LINK_TRAIN_800MV_0DB_IVB; 1611 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 1612 return EDP_LINK_TRAIN_800MV_3_5DB_IVB; 1613 1614 default: 1615 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 1616 "0x%x\n", signal_levels); 1617 return EDP_LINK_TRAIN_500MV_0DB_IVB; 1618 } 1619} 1620 1621static uint8_t 1622intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE], 1623 int lane) 1624{ 1625 int s = (lane & 1) * 4; 1626 uint8_t l = link_status[lane>>1]; 1627 1628 return (l >> s) & 0xf; 1629} 1630 1631/* Check for clock recovery is done on all channels */ 1632static bool 1633intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count) 1634{ 1635 int lane; 1636 uint8_t lane_status; 1637 1638 for (lane = 0; lane < lane_count; lane++) { 1639 lane_status = intel_get_lane_status(link_status, lane); 1640 if ((lane_status & DP_LANE_CR_DONE) == 0) 1641 return false; 1642 } 1643 return true; 1644} 1645 1646/* Check to see if channel eq is done on all channels */ 1647#define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\ 1648 DP_LANE_CHANNEL_EQ_DONE|\ 1649 DP_LANE_SYMBOL_LOCKED) 1650static bool 1651intel_channel_eq_ok(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 1652{ 1653 uint8_t lane_align; 1654 uint8_t lane_status; 1655 int lane; 1656 1657 lane_align = intel_dp_link_status(link_status, 1658 DP_LANE_ALIGN_STATUS_UPDATED); 1659 if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) 1660 return false; 1661 for (lane = 0; lane < intel_dp->lane_count; lane++) { 1662 lane_status = intel_get_lane_status(link_status, lane); 1663 if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS) 1664 return false; 1665 } 1666 return true; 1667} 1668 1669static bool 1670intel_dp_set_link_train(struct intel_dp *intel_dp, 1671 uint32_t dp_reg_value, 1672 uint8_t dp_train_pat) 1673{ 1674 struct drm_device *dev = intel_dp->base.base.dev; 1675 struct drm_i915_private *dev_priv = dev->dev_private; 1676 int ret; 1677 1678 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) { 1679 dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT; 1680 1681 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 1682 case DP_TRAINING_PATTERN_DISABLE: 1683 dp_reg_value |= DP_LINK_TRAIN_OFF_CPT; 1684 break; 1685 case DP_TRAINING_PATTERN_1: 1686 dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT; 1687 break; 1688 case DP_TRAINING_PATTERN_2: 1689 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT; 1690 break; 1691 case DP_TRAINING_PATTERN_3: 1692 DRM_ERROR("DP training pattern 3 not supported\n"); 1693 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT; 1694 break; 1695 } 1696 1697 } else { 1698 dp_reg_value &= ~DP_LINK_TRAIN_MASK; 1699 1700 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 1701 case DP_TRAINING_PATTERN_DISABLE: 1702 dp_reg_value |= DP_LINK_TRAIN_OFF; 1703 break; 1704 case DP_TRAINING_PATTERN_1: 1705 dp_reg_value |= DP_LINK_TRAIN_PAT_1; 1706 break; 1707 case DP_TRAINING_PATTERN_2: 1708 dp_reg_value |= DP_LINK_TRAIN_PAT_2; 1709 break; 1710 case DP_TRAINING_PATTERN_3: 1711 DRM_ERROR("DP training pattern 3 not supported\n"); 1712 dp_reg_value |= DP_LINK_TRAIN_PAT_2; 1713 break; 1714 } 1715 } 1716 1717 I915_WRITE(intel_dp->output_reg, dp_reg_value); 1718 POSTING_READ(intel_dp->output_reg); 1719 1720 intel_dp_aux_native_write_1(intel_dp, 1721 DP_TRAINING_PATTERN_SET, 1722 dp_train_pat); 1723 1724 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) != 1725 DP_TRAINING_PATTERN_DISABLE) { 1726 ret = intel_dp_aux_native_write(intel_dp, 1727 DP_TRAINING_LANE0_SET, 1728 intel_dp->train_set, 1729 intel_dp->lane_count); 1730 if (ret != intel_dp->lane_count) 1731 return false; 1732 } 1733 1734 return true; 1735} 1736 1737/* Enable corresponding port and start training pattern 1 */ 1738static void 1739intel_dp_start_link_train(struct intel_dp *intel_dp) 1740{ 1741 struct drm_device *dev = intel_dp->base.base.dev; 1742 int i; 1743 uint8_t voltage; 1744 bool clock_recovery = false; 1745 int voltage_tries, loop_tries; 1746 uint32_t DP = intel_dp->DP; 1747 1748 /* Write the link configuration data */ 1749 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, 1750 intel_dp->link_configuration, 1751 DP_LINK_CONFIGURATION_SIZE); 1752 1753 DP |= DP_PORT_EN; 1754 1755 memset(intel_dp->train_set, 0, 4); 1756 voltage = 0xff; 1757 voltage_tries = 0; 1758 loop_tries = 0; 1759 clock_recovery = false; 1760 for (;;) { 1761 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 1762 uint8_t link_status[DP_LINK_STATUS_SIZE]; 1763 uint32_t signal_levels; 1764 1765 1766 if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) { 1767 signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]); 1768 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels; 1769 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { 1770 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); 1771 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1772 } else { 1773 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]); 1774 DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", signal_levels); 1775 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1776 } 1777 1778 if (!intel_dp_set_link_train(intel_dp, DP, 1779 DP_TRAINING_PATTERN_1 | 1780 DP_LINK_SCRAMBLING_DISABLE)) 1781 break; 1782 /* Set training pattern 1 */ 1783 1784 udelay(100); 1785 if (!intel_dp_get_link_status(intel_dp, link_status)) { 1786 DRM_ERROR("failed to get link status\n"); 1787 break; 1788 } 1789 1790 if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) { 1791 DRM_DEBUG_KMS("clock recovery OK\n"); 1792 clock_recovery = true; 1793 break; 1794 } 1795 1796 /* Check to see if we've tried the max voltage */ 1797 for (i = 0; i < intel_dp->lane_count; i++) 1798 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) 1799 break; 1800 if (i == intel_dp->lane_count && voltage_tries == 5) { 1801 ++loop_tries; 1802 if (loop_tries == 5) { 1803 DRM_DEBUG_KMS("too many full retries, give up\n"); 1804 break; 1805 } 1806 memset(intel_dp->train_set, 0, 4); 1807 voltage_tries = 0; 1808 continue; 1809 } 1810 1811 /* Check to see if we've tried the same voltage 5 times */ 1812 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { 1813 ++voltage_tries; 1814 if (voltage_tries == 5) { 1815 DRM_DEBUG_KMS("too many voltage retries, give up\n"); 1816 break; 1817 } 1818 } else 1819 voltage_tries = 0; 1820 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; 1821 1822 /* Compute new intel_dp->train_set as requested by target */ 1823 intel_get_adjust_train(intel_dp, link_status); 1824 } 1825 1826 intel_dp->DP = DP; 1827} 1828 1829static void 1830intel_dp_complete_link_train(struct intel_dp *intel_dp) 1831{ 1832 struct drm_device *dev = intel_dp->base.base.dev; 1833 bool channel_eq = false; 1834 int tries, cr_tries; 1835 uint32_t DP = intel_dp->DP; 1836 1837 /* channel equalization */ 1838 tries = 0; 1839 cr_tries = 0; 1840 channel_eq = false; 1841 for (;;) { 1842 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 1843 uint32_t signal_levels; 1844 uint8_t link_status[DP_LINK_STATUS_SIZE]; 1845 1846 if (cr_tries > 5) { 1847 DRM_ERROR("failed to train DP, aborting\n"); 1848 intel_dp_link_down(intel_dp); 1849 break; 1850 } 1851 1852 if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) { 1853 signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]); 1854 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels; 1855 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { 1856 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); 1857 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1858 } else { 1859 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]); 1860 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1861 } 1862 1863 /* channel eq pattern */ 1864 if (!intel_dp_set_link_train(intel_dp, DP, 1865 DP_TRAINING_PATTERN_2 | 1866 DP_LINK_SCRAMBLING_DISABLE)) 1867 break; 1868 1869 udelay(400); 1870 if (!intel_dp_get_link_status(intel_dp, link_status)) 1871 break; 1872 1873 /* Make sure clock is still ok */ 1874 if (!intel_clock_recovery_ok(link_status, intel_dp->lane_count)) { 1875 intel_dp_start_link_train(intel_dp); 1876 cr_tries++; 1877 continue; 1878 } 1879 1880 if (intel_channel_eq_ok(intel_dp, link_status)) { 1881 channel_eq = true; 1882 break; 1883 } 1884 1885 /* Try 5 times, then try clock recovery if that fails */ 1886 if (tries > 5) { 1887 intel_dp_link_down(intel_dp); 1888 intel_dp_start_link_train(intel_dp); 1889 tries = 0; 1890 cr_tries++; 1891 continue; 1892 } 1893 1894 /* Compute new intel_dp->train_set as requested by target */ 1895 intel_get_adjust_train(intel_dp, link_status); 1896 ++tries; 1897 } 1898 1899 intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE); 1900} 1901 1902static void 1903intel_dp_link_down(struct intel_dp *intel_dp) 1904{ 1905 struct drm_device *dev = intel_dp->base.base.dev; 1906 struct drm_i915_private *dev_priv = dev->dev_private; 1907 uint32_t DP = intel_dp->DP; 1908 1909 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)) 1910 return; 1911 1912 DRM_DEBUG_KMS("\n"); 1913 1914 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) { 1915 DP &= ~DP_LINK_TRAIN_MASK_CPT; 1916 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); 1917 } else { 1918 DP &= ~DP_LINK_TRAIN_MASK; 1919 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); 1920 } 1921 POSTING_READ(intel_dp->output_reg); 1922 1923 msleep(17); 1924 1925 if (HAS_PCH_IBX(dev) && 1926 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { 1927 struct drm_crtc *crtc = intel_dp->base.base.crtc; 1928 1929 /* Hardware workaround: leaving our transcoder select 1930 * set to transcoder B while it's off will prevent the 1931 * corresponding HDMI output on transcoder A. 1932 * 1933 * Combine this with another hardware workaround: 1934 * transcoder select bit can only be cleared while the 1935 * port is enabled. 1936 */ 1937 DP &= ~DP_PIPEB_SELECT; 1938 I915_WRITE(intel_dp->output_reg, DP); 1939 1940 /* Changes to enable or select take place the vblank 1941 * after being written. 1942 */ 1943 if (crtc == NULL) { 1944 /* We can arrive here never having been attached 1945 * to a CRTC, for instance, due to inheriting 1946 * random state from the BIOS. 1947 * 1948 * If the pipe is not running, play safe and 1949 * wait for the clocks to stabilise before 1950 * continuing. 1951 */ 1952 POSTING_READ(intel_dp->output_reg); 1953 msleep(50); 1954 } else 1955 intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe); 1956 } 1957 1958 DP &= ~DP_AUDIO_OUTPUT_ENABLE; 1959 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); 1960 POSTING_READ(intel_dp->output_reg); 1961 msleep(intel_dp->panel_power_down_delay); 1962} 1963 1964static bool 1965intel_dp_get_dpcd(struct intel_dp *intel_dp) 1966{ 1967 if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd, 1968 sizeof(intel_dp->dpcd)) == 0) 1969 return false; /* aux transfer failed */ 1970 1971 if (intel_dp->dpcd[DP_DPCD_REV] == 0) 1972 return false; /* DPCD not present */ 1973 1974 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 1975 DP_DWN_STRM_PORT_PRESENT)) 1976 return true; /* native DP sink */ 1977 1978 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10) 1979 return true; /* no per-port downstream info */ 1980 1981 if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0, 1982 intel_dp->downstream_ports, 1983 DP_MAX_DOWNSTREAM_PORTS) == 0) 1984 return false; /* downstream port status fetch failed */ 1985 1986 return true; 1987} 1988 1989static void 1990intel_dp_probe_oui(struct intel_dp *intel_dp) 1991{ 1992 u8 buf[3]; 1993 1994 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) 1995 return; 1996 1997 ironlake_edp_panel_vdd_on(intel_dp); 1998 1999 if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3)) 2000 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", 2001 buf[0], buf[1], buf[2]); 2002 2003 if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3)) 2004 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", 2005 buf[0], buf[1], buf[2]); 2006 2007 ironlake_edp_panel_vdd_off(intel_dp, false); 2008} 2009 2010static bool 2011intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector) 2012{ 2013 int ret; 2014 2015 ret = intel_dp_aux_native_read_retry(intel_dp, 2016 DP_DEVICE_SERVICE_IRQ_VECTOR, 2017 sink_irq_vector, 1); 2018 if (!ret) 2019 return false; 2020 2021 return true; 2022} 2023 2024static void 2025intel_dp_handle_test_request(struct intel_dp *intel_dp) 2026{ 2027 /* NAK by default */ 2028 intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_ACK); 2029} 2030 2031/* 2032 * According to DP spec 2033 * 5.1.2: 2034 * 1. Read DPCD 2035 * 2. Configure link according to Receiver Capabilities 2036 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 2037 * 4. Check link status on receipt of hot-plug interrupt 2038 */ 2039 2040static void 2041intel_dp_check_link_status(struct intel_dp *intel_dp) 2042{ 2043 u8 sink_irq_vector; 2044 u8 link_status[DP_LINK_STATUS_SIZE]; 2045 2046 if (!intel_dp->base.connectors_active) 2047 return; 2048 2049 if (WARN_ON(!intel_dp->base.base.crtc)) 2050 return; 2051 2052 /* Try to read receiver status if the link appears to be up */ 2053 if (!intel_dp_get_link_status(intel_dp, link_status)) { 2054 intel_dp_link_down(intel_dp); 2055 return; 2056 } 2057 2058 /* Now read the DPCD to see if it's actually running */ 2059 if (!intel_dp_get_dpcd(intel_dp)) { 2060 intel_dp_link_down(intel_dp); 2061 return; 2062 } 2063 2064 /* Try to read the source of the interrupt */ 2065 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 2066 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) { 2067 /* Clear interrupt source */ 2068 intel_dp_aux_native_write_1(intel_dp, 2069 DP_DEVICE_SERVICE_IRQ_VECTOR, 2070 sink_irq_vector); 2071 2072 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST) 2073 intel_dp_handle_test_request(intel_dp); 2074 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ)) 2075 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); 2076 } 2077 2078 if (!intel_channel_eq_ok(intel_dp, link_status)) { 2079 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", 2080 drm_get_encoder_name(&intel_dp->base.base)); 2081 intel_dp_start_link_train(intel_dp); 2082 intel_dp_complete_link_train(intel_dp); 2083 } 2084} 2085 2086/* XXX this is probably wrong for multiple downstream ports */ 2087static enum drm_connector_status 2088intel_dp_detect_dpcd(struct intel_dp *intel_dp) 2089{ 2090 uint8_t *dpcd = intel_dp->dpcd; 2091 bool hpd; 2092 uint8_t type; 2093 2094 if (!intel_dp_get_dpcd(intel_dp)) 2095 return connector_status_disconnected; 2096 2097 /* if there's no downstream port, we're done */ 2098 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT)) 2099 return connector_status_connected; 2100 2101 /* If we're HPD-aware, SINK_COUNT changes dynamically */ 2102 hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD); 2103 if (hpd) { 2104 uint8_t reg; 2105 if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT, 2106 ®, 1)) 2107 return connector_status_unknown; 2108 return DP_GET_SINK_COUNT(reg) ? connector_status_connected 2109 : connector_status_disconnected; 2110 } 2111 2112 /* If no HPD, poke DDC gently */ 2113 if (drm_probe_ddc(&intel_dp->adapter)) 2114 return connector_status_connected; 2115 2116 /* Well we tried, say unknown for unreliable port types */ 2117 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 2118 if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID) 2119 return connector_status_unknown; 2120 2121 /* Anything else is out of spec, warn and ignore */ 2122 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n"); 2123 return connector_status_disconnected; 2124} 2125 2126static enum drm_connector_status 2127ironlake_dp_detect(struct intel_dp *intel_dp) 2128{ 2129 enum drm_connector_status status; 2130 2131 /* Can't disconnect eDP, but you can close the lid... */ 2132 if (is_edp(intel_dp)) { 2133 status = intel_panel_detect(intel_dp->base.base.dev); 2134 if (status == connector_status_unknown) 2135 status = connector_status_connected; 2136 return status; 2137 } 2138 2139 return intel_dp_detect_dpcd(intel_dp); 2140} 2141 2142static enum drm_connector_status 2143g4x_dp_detect(struct intel_dp *intel_dp) 2144{ 2145 struct drm_device *dev = intel_dp->base.base.dev; 2146 struct drm_i915_private *dev_priv = dev->dev_private; 2147 uint32_t bit; 2148 2149 switch (intel_dp->output_reg) { 2150 case DP_B: 2151 bit = DPB_HOTPLUG_LIVE_STATUS; 2152 break; 2153 case DP_C: 2154 bit = DPC_HOTPLUG_LIVE_STATUS; 2155 break; 2156 case DP_D: 2157 bit = DPD_HOTPLUG_LIVE_STATUS; 2158 break; 2159 default: 2160 return connector_status_unknown; 2161 } 2162 2163 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0) 2164 return connector_status_disconnected; 2165 2166 return intel_dp_detect_dpcd(intel_dp); 2167} 2168 2169static struct edid * 2170intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) 2171{ 2172 struct intel_dp *intel_dp = intel_attached_dp(connector); 2173 struct edid *edid; 2174 int size; 2175 2176 if (is_edp(intel_dp)) { 2177 if (!intel_dp->edid) 2178 return NULL; 2179 2180 size = (intel_dp->edid->extensions + 1) * EDID_LENGTH; 2181 edid = kmalloc(size, GFP_KERNEL); 2182 if (!edid) 2183 return NULL; 2184 2185 memcpy(edid, intel_dp->edid, size); 2186 return edid; 2187 } 2188 2189 edid = drm_get_edid(connector, adapter); 2190 return edid; 2191} 2192 2193static int 2194intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter) 2195{ 2196 struct intel_dp *intel_dp = intel_attached_dp(connector); 2197 int ret; 2198 2199 if (is_edp(intel_dp)) { 2200 drm_mode_connector_update_edid_property(connector, 2201 intel_dp->edid); 2202 ret = drm_add_edid_modes(connector, intel_dp->edid); 2203 drm_edid_to_eld(connector, 2204 intel_dp->edid); 2205 return intel_dp->edid_mode_count; 2206 } 2207 2208 ret = intel_ddc_get_modes(connector, adapter); 2209 return ret; 2210} 2211 2212 2213/** 2214 * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection. 2215 * 2216 * \return true if DP port is connected. 2217 * \return false if DP port is disconnected. 2218 */ 2219static enum drm_connector_status 2220intel_dp_detect(struct drm_connector *connector, bool force) 2221{ 2222 struct intel_dp *intel_dp = intel_attached_dp(connector); 2223 struct drm_device *dev = intel_dp->base.base.dev; 2224 enum drm_connector_status status; 2225 struct edid *edid = NULL; 2226 2227 intel_dp->has_audio = false; 2228 2229 if (HAS_PCH_SPLIT(dev)) 2230 status = ironlake_dp_detect(intel_dp); 2231 else 2232 status = g4x_dp_detect(intel_dp); 2233 2234 DRM_DEBUG_KMS("DPCD: %02hx%02hx%02hx%02hx%02hx%02hx%02hx%02hx\n", 2235 intel_dp->dpcd[0], intel_dp->dpcd[1], intel_dp->dpcd[2], 2236 intel_dp->dpcd[3], intel_dp->dpcd[4], intel_dp->dpcd[5], 2237 intel_dp->dpcd[6], intel_dp->dpcd[7]); 2238 2239 if (status != connector_status_connected) 2240 return status; 2241 2242 intel_dp_probe_oui(intel_dp); 2243 2244 if (intel_dp->force_audio != HDMI_AUDIO_AUTO) { 2245 intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON); 2246 } else { 2247 edid = intel_dp_get_edid(connector, &intel_dp->adapter); 2248 if (edid) { 2249 intel_dp->has_audio = drm_detect_monitor_audio(edid); 2250 kfree(edid); 2251 } 2252 } 2253 2254 return connector_status_connected; 2255} 2256 2257static int intel_dp_get_modes(struct drm_connector *connector) 2258{ 2259 struct intel_dp *intel_dp = intel_attached_dp(connector); 2260 struct drm_device *dev = intel_dp->base.base.dev; 2261 struct drm_i915_private *dev_priv = dev->dev_private; 2262 int ret; 2263 2264 /* We should parse the EDID data and find out if it has an audio sink 2265 */ 2266 2267 ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter); 2268 if (ret) { 2269 if (is_edp(intel_dp) && !intel_dp->panel_fixed_mode) { 2270 struct drm_display_mode *newmode; 2271 list_for_each_entry(newmode, &connector->probed_modes, 2272 head) { 2273 if ((newmode->type & DRM_MODE_TYPE_PREFERRED)) { 2274 intel_dp->panel_fixed_mode = 2275 drm_mode_duplicate(dev, newmode); 2276 break; 2277 } 2278 } 2279 } 2280 return ret; 2281 } 2282 2283 /* if eDP has no EDID, try to use fixed panel mode from VBT */ 2284 if (is_edp(intel_dp)) { 2285 /* initialize panel mode from VBT if available for eDP */ 2286 if (intel_dp->panel_fixed_mode == NULL && dev_priv->lfp_lvds_vbt_mode != NULL) { 2287 intel_dp->panel_fixed_mode = 2288 drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); 2289 if (intel_dp->panel_fixed_mode) { 2290 intel_dp->panel_fixed_mode->type |= 2291 DRM_MODE_TYPE_PREFERRED; 2292 } 2293 } 2294 if (intel_dp->panel_fixed_mode) { 2295 struct drm_display_mode *mode; 2296 mode = drm_mode_duplicate(dev, intel_dp->panel_fixed_mode); 2297 drm_mode_probed_add(connector, mode); 2298 return 1; 2299 } 2300 } 2301 return 0; 2302} 2303 2304static bool 2305intel_dp_detect_audio(struct drm_connector *connector) 2306{ 2307 struct intel_dp *intel_dp = intel_attached_dp(connector); 2308 struct edid *edid; 2309 bool has_audio = false; 2310 2311 edid = intel_dp_get_edid(connector, &intel_dp->adapter); 2312 if (edid) { 2313 has_audio = drm_detect_monitor_audio(edid); 2314 kfree(edid); 2315 } 2316 2317 return has_audio; 2318} 2319 2320static int 2321intel_dp_set_property(struct drm_connector *connector, 2322 struct drm_property *property, 2323 uint64_t val) 2324{ 2325 struct drm_i915_private *dev_priv = connector->dev->dev_private; 2326 struct intel_dp *intel_dp = intel_attached_dp(connector); 2327 int ret; 2328 2329 ret = drm_connector_property_set_value(connector, property, val); 2330 if (ret) 2331 return ret; 2332 2333 if (property == dev_priv->force_audio_property) { 2334 int i = val; 2335 bool has_audio; 2336 2337 if (i == intel_dp->force_audio) 2338 return 0; 2339 2340 intel_dp->force_audio = i; 2341 2342 if (i == HDMI_AUDIO_AUTO) 2343 has_audio = intel_dp_detect_audio(connector); 2344 else 2345 has_audio = (i == HDMI_AUDIO_ON); 2346 2347 if (has_audio == intel_dp->has_audio) 2348 return 0; 2349 2350 intel_dp->has_audio = has_audio; 2351 goto done; 2352 } 2353 2354 if (property == dev_priv->broadcast_rgb_property) { 2355 if (val == !!intel_dp->color_range) 2356 return 0; 2357 2358 intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0; 2359 goto done; 2360 } 2361 2362 return -EINVAL; 2363 2364done: 2365 if (intel_dp->base.base.crtc) { 2366 struct drm_crtc *crtc = intel_dp->base.base.crtc; 2367 intel_set_mode(crtc, &crtc->mode, 2368 crtc->x, crtc->y, crtc->fb); 2369 } 2370 2371 return 0; 2372} 2373 2374static void 2375intel_dp_destroy(struct drm_connector *connector) 2376{ 2377 struct drm_device *dev = connector->dev; 2378 2379 if (intel_dpd_is_edp(dev)) 2380 intel_panel_destroy_backlight(dev); 2381 2382 drm_sysfs_connector_remove(connector); 2383 drm_connector_cleanup(connector); 2384 kfree(connector); 2385} 2386 2387static void intel_dp_encoder_destroy(struct drm_encoder *encoder) 2388{ 2389 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2390 2391 i2c_del_adapter(&intel_dp->adapter); 2392 drm_encoder_cleanup(encoder); 2393 if (is_edp(intel_dp)) { 2394 kfree(intel_dp->edid); 2395 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 2396 ironlake_panel_vdd_off_sync(intel_dp); 2397 } 2398 kfree(intel_dp); 2399} 2400 2401static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { 2402 .mode_fixup = intel_dp_mode_fixup, 2403 .mode_set = intel_dp_mode_set, 2404 .disable = intel_encoder_noop, 2405}; 2406 2407static const struct drm_connector_funcs intel_dp_connector_funcs = { 2408 .dpms = intel_connector_dpms, 2409 .detect = intel_dp_detect, 2410 .fill_modes = drm_helper_probe_single_connector_modes, 2411 .set_property = intel_dp_set_property, 2412 .destroy = intel_dp_destroy, 2413}; 2414 2415static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 2416 .get_modes = intel_dp_get_modes, 2417 .mode_valid = intel_dp_mode_valid, 2418 .best_encoder = intel_best_encoder, 2419}; 2420 2421static const struct drm_encoder_funcs intel_dp_enc_funcs = { 2422 .destroy = intel_dp_encoder_destroy, 2423}; 2424 2425static void 2426intel_dp_hot_plug(struct intel_encoder *intel_encoder) 2427{ 2428 struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); 2429 2430 intel_dp_check_link_status(intel_dp); 2431} 2432 2433/* Return which DP Port should be selected for Transcoder DP control */ 2434int 2435intel_trans_dp_port_sel(struct drm_crtc *crtc) 2436{ 2437 struct drm_device *dev = crtc->dev; 2438 struct intel_encoder *encoder; 2439 2440 for_each_encoder_on_crtc(dev, crtc, encoder) { 2441 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2442 2443 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT || 2444 intel_dp->base.type == INTEL_OUTPUT_EDP) 2445 return intel_dp->output_reg; 2446 } 2447 2448 return -1; 2449} 2450 2451/* check the VBT to see whether the eDP is on DP-D port */ 2452bool intel_dpd_is_edp(struct drm_device *dev) 2453{ 2454 struct drm_i915_private *dev_priv = dev->dev_private; 2455 struct child_device_config *p_child; 2456 int i; 2457 2458 if (!dev_priv->child_dev_num) 2459 return false; 2460 2461 for (i = 0; i < dev_priv->child_dev_num; i++) { 2462 p_child = dev_priv->child_dev + i; 2463 2464 if (p_child->dvo_port == PORT_IDPD && 2465 p_child->device_type == DEVICE_TYPE_eDP) 2466 return true; 2467 } 2468 return false; 2469} 2470 2471static void 2472intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) 2473{ 2474 intel_attach_force_audio_property(connector); 2475 intel_attach_broadcast_rgb_property(connector); 2476} 2477 2478void 2479intel_dp_init(struct drm_device *dev, int output_reg, enum port port) 2480{ 2481 struct drm_i915_private *dev_priv = dev->dev_private; 2482 struct drm_connector *connector; 2483 struct intel_dp *intel_dp; 2484 struct intel_encoder *intel_encoder; 2485 struct intel_connector *intel_connector; 2486 const char *name = NULL; 2487 int type; 2488 2489 intel_dp = kzalloc(sizeof(struct intel_dp), GFP_KERNEL); 2490 if (!intel_dp) 2491 return; 2492 2493 intel_dp->output_reg = output_reg; 2494 intel_dp->port = port; 2495 /* Preserve the current hw state. */ 2496 intel_dp->DP = I915_READ(intel_dp->output_reg); 2497 2498 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 2499 if (!intel_connector) { 2500 kfree(intel_dp); 2501 return; 2502 } 2503 intel_encoder = &intel_dp->base; 2504 2505 if (HAS_PCH_SPLIT(dev) && output_reg == PCH_DP_D) 2506 if (intel_dpd_is_edp(dev)) 2507 intel_dp->is_pch_edp = true; 2508 2509 if (output_reg == DP_A || is_pch_edp(intel_dp)) { 2510 type = DRM_MODE_CONNECTOR_eDP; 2511 intel_encoder->type = INTEL_OUTPUT_EDP; 2512 } else { 2513 type = DRM_MODE_CONNECTOR_DisplayPort; 2514 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; 2515 } 2516 2517 connector = &intel_connector->base; 2518 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); 2519 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 2520 2521 connector->polled = DRM_CONNECTOR_POLL_HPD; 2522 2523 intel_encoder->cloneable = false; 2524 2525 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, 2526 ironlake_panel_vdd_work); 2527 2528 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); 2529 2530 connector->interlace_allowed = true; 2531 connector->doublescan_allowed = 0; 2532 2533 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, 2534 DRM_MODE_ENCODER_TMDS); 2535 drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs); 2536 2537 intel_connector_attach_encoder(intel_connector, intel_encoder); 2538 drm_sysfs_connector_add(connector); 2539 2540 intel_encoder->enable = intel_enable_dp; 2541 intel_encoder->pre_enable = intel_pre_enable_dp; 2542 intel_encoder->disable = intel_disable_dp; 2543 intel_encoder->post_disable = intel_post_disable_dp; 2544 intel_encoder->get_hw_state = intel_dp_get_hw_state; 2545 intel_connector->get_hw_state = intel_connector_get_hw_state; 2546 2547 /* Set up the DDC bus. */ 2548 switch (port) { 2549 case PORT_A: 2550 name = "DPDDC-A"; 2551 break; 2552 case PORT_B: 2553 dev_priv->hotplug_supported_mask |= DPB_HOTPLUG_INT_STATUS; 2554 name = "DPDDC-B"; 2555 break; 2556 case PORT_C: 2557 dev_priv->hotplug_supported_mask |= DPC_HOTPLUG_INT_STATUS; 2558 name = "DPDDC-C"; 2559 break; 2560 case PORT_D: 2561 dev_priv->hotplug_supported_mask |= DPD_HOTPLUG_INT_STATUS; 2562 name = "DPDDC-D"; 2563 break; 2564 default: 2565 WARN(1, "Invalid port %c\n", port_name(port)); 2566 break; 2567 } 2568 2569 /* Cache some DPCD data in the eDP case */ 2570 if (is_edp(intel_dp)) { 2571 struct edp_power_seq cur, vbt; 2572 u32 pp_on, pp_off, pp_div; 2573 2574 pp_on = I915_READ(PCH_PP_ON_DELAYS); 2575 pp_off = I915_READ(PCH_PP_OFF_DELAYS); 2576 pp_div = I915_READ(PCH_PP_DIVISOR); 2577 2578 if (!pp_on || !pp_off || !pp_div) { 2579 DRM_INFO("bad panel power sequencing delays, disabling panel\n"); 2580 intel_dp_encoder_destroy(&intel_dp->base.base); 2581 intel_dp_destroy(&intel_connector->base); 2582 return; 2583 } 2584 2585 /* Pull timing values out of registers */ 2586 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> 2587 PANEL_POWER_UP_DELAY_SHIFT; 2588 2589 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> 2590 PANEL_LIGHT_ON_DELAY_SHIFT; 2591 2592 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> 2593 PANEL_LIGHT_OFF_DELAY_SHIFT; 2594 2595 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> 2596 PANEL_POWER_DOWN_DELAY_SHIFT; 2597 2598 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> 2599 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000; 2600 2601 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 2602 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12); 2603 2604 vbt = dev_priv->edp.pps; 2605 2606 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 2607 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12); 2608 2609#define get_delay(field) ((max(cur.field, vbt.field) + 9) / 10) 2610 2611 intel_dp->panel_power_up_delay = get_delay(t1_t3); 2612 intel_dp->backlight_on_delay = get_delay(t8); 2613 intel_dp->backlight_off_delay = get_delay(t9); 2614 intel_dp->panel_power_down_delay = get_delay(t10); 2615 intel_dp->panel_power_cycle_delay = get_delay(t11_t12); 2616 2617 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n", 2618 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay, 2619 intel_dp->panel_power_cycle_delay); 2620 2621 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", 2622 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); 2623 } 2624 2625 intel_dp_i2c_init(intel_dp, intel_connector, name); 2626 2627 if (is_edp(intel_dp)) { 2628 bool ret; 2629 struct edid *edid; 2630 2631 ironlake_edp_panel_vdd_on(intel_dp); 2632 ret = intel_dp_get_dpcd(intel_dp); 2633 ironlake_edp_panel_vdd_off(intel_dp, false); 2634 2635 if (ret) { 2636 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) 2637 dev_priv->no_aux_handshake = 2638 intel_dp->dpcd[DP_MAX_DOWNSPREAD] & 2639 DP_NO_AUX_HANDSHAKE_LINK_TRAINING; 2640 } else { 2641 /* if this fails, presume the device is a ghost */ 2642 DRM_INFO("failed to retrieve link info, disabling eDP\n"); 2643 intel_dp_encoder_destroy(&intel_dp->base.base); 2644 intel_dp_destroy(&intel_connector->base); 2645 return; 2646 } 2647 2648 ironlake_edp_panel_vdd_on(intel_dp); 2649 edid = drm_get_edid(connector, &intel_dp->adapter); 2650 if (edid) { 2651 drm_mode_connector_update_edid_property(connector, 2652 edid); 2653 intel_dp->edid_mode_count = 2654 drm_add_edid_modes(connector, edid); 2655 drm_edid_to_eld(connector, edid); 2656 intel_dp->edid = edid; 2657 } 2658 ironlake_edp_panel_vdd_off(intel_dp, false); 2659 } 2660 2661 intel_encoder->hot_plug = intel_dp_hot_plug; 2662 2663 if (is_edp(intel_dp)) { 2664 dev_priv->int_edp_connector = connector; 2665 intel_panel_setup_backlight(dev); 2666 } 2667 2668 intel_dp_add_properties(intel_dp, connector); 2669 2670 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 2671 * 0xd. Failure to do so will result in spurious interrupts being 2672 * generated on the port when a cable is not attached. 2673 */ 2674 if (IS_G4X(dev) && !IS_GM45(dev)) { 2675 u32 temp = I915_READ(PEG_BAND_GAP_DATA); 2676 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); 2677 } 2678} 2679