intel_dp.c revision 30add22d8459f8ac28d7ead366129224e0d17c43
1/* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Keith Packard <keithp@keithp.com> 25 * 26 */ 27 28#include <linux/i2c.h> 29#include <linux/slab.h> 30#include <linux/export.h> 31#include <drm/drmP.h> 32#include <drm/drm_crtc.h> 33#include <drm/drm_crtc_helper.h> 34#include <drm/drm_edid.h> 35#include "intel_drv.h" 36#include <drm/i915_drm.h> 37#include "i915_drv.h" 38 39#define DP_LINK_CHECK_TIMEOUT (10 * 1000) 40 41/** 42 * is_edp - is the given port attached to an eDP panel (either CPU or PCH) 43 * @intel_dp: DP struct 44 * 45 * If a CPU or PCH DP output is attached to an eDP panel, this function 46 * will return true, and false otherwise. 47 */ 48static bool is_edp(struct intel_dp *intel_dp) 49{ 50 return intel_dp->base.type == INTEL_OUTPUT_EDP; 51} 52 53/** 54 * is_pch_edp - is the port on the PCH and attached to an eDP panel? 55 * @intel_dp: DP struct 56 * 57 * Returns true if the given DP struct corresponds to a PCH DP port attached 58 * to an eDP panel, false otherwise. Helpful for determining whether we 59 * may need FDI resources for a given DP output or not. 60 */ 61static bool is_pch_edp(struct intel_dp *intel_dp) 62{ 63 return intel_dp->is_pch_edp; 64} 65 66/** 67 * is_cpu_edp - is the port on the CPU and attached to an eDP panel? 68 * @intel_dp: DP struct 69 * 70 * Returns true if the given DP struct corresponds to a CPU eDP port. 71 */ 72static bool is_cpu_edp(struct intel_dp *intel_dp) 73{ 74 return is_edp(intel_dp) && !is_pch_edp(intel_dp); 75} 76 77static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp) 78{ 79 return intel_dp->base.base.dev; 80} 81 82static struct intel_dp *intel_attached_dp(struct drm_connector *connector) 83{ 84 return enc_to_intel_dp(&intel_attached_encoder(connector)->base); 85} 86 87/** 88 * intel_encoder_is_pch_edp - is the given encoder a PCH attached eDP? 89 * @encoder: DRM encoder 90 * 91 * Return true if @encoder corresponds to a PCH attached eDP panel. Needed 92 * by intel_display.c. 93 */ 94bool intel_encoder_is_pch_edp(struct drm_encoder *encoder) 95{ 96 struct intel_dp *intel_dp; 97 98 if (!encoder) 99 return false; 100 101 intel_dp = enc_to_intel_dp(encoder); 102 103 return is_pch_edp(intel_dp); 104} 105 106static void intel_dp_link_down(struct intel_dp *intel_dp); 107 108void 109intel_edp_link_config(struct intel_encoder *intel_encoder, 110 int *lane_num, int *link_bw) 111{ 112 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 113 114 *lane_num = intel_dp->lane_count; 115 *link_bw = drm_dp_bw_code_to_link_rate(intel_dp->link_bw); 116} 117 118int 119intel_edp_target_clock(struct intel_encoder *intel_encoder, 120 struct drm_display_mode *mode) 121{ 122 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 123 struct intel_connector *intel_connector = intel_dp->attached_connector; 124 125 if (intel_connector->panel.fixed_mode) 126 return intel_connector->panel.fixed_mode->clock; 127 else 128 return mode->clock; 129} 130 131static int 132intel_dp_max_link_bw(struct intel_dp *intel_dp) 133{ 134 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; 135 136 switch (max_link_bw) { 137 case DP_LINK_BW_1_62: 138 case DP_LINK_BW_2_7: 139 break; 140 default: 141 max_link_bw = DP_LINK_BW_1_62; 142 break; 143 } 144 return max_link_bw; 145} 146 147static int 148intel_dp_link_clock(uint8_t link_bw) 149{ 150 if (link_bw == DP_LINK_BW_2_7) 151 return 270000; 152 else 153 return 162000; 154} 155 156/* 157 * The units on the numbers in the next two are... bizarre. Examples will 158 * make it clearer; this one parallels an example in the eDP spec. 159 * 160 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as: 161 * 162 * 270000 * 1 * 8 / 10 == 216000 163 * 164 * The actual data capacity of that configuration is 2.16Gbit/s, so the 165 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz - 166 * or equivalently, kilopixels per second - so for 1680x1050R it'd be 167 * 119000. At 18bpp that's 2142000 kilobits per second. 168 * 169 * Thus the strange-looking division by 10 in intel_dp_link_required, to 170 * get the result in decakilobits instead of kilobits. 171 */ 172 173static int 174intel_dp_link_required(int pixel_clock, int bpp) 175{ 176 return (pixel_clock * bpp + 9) / 10; 177} 178 179static int 180intel_dp_max_data_rate(int max_link_clock, int max_lanes) 181{ 182 return (max_link_clock * max_lanes * 8) / 10; 183} 184 185static bool 186intel_dp_adjust_dithering(struct intel_dp *intel_dp, 187 struct drm_display_mode *mode, 188 bool adjust_mode) 189{ 190 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); 191 int max_lanes = drm_dp_max_lane_count(intel_dp->dpcd); 192 int max_rate, mode_rate; 193 194 mode_rate = intel_dp_link_required(mode->clock, 24); 195 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); 196 197 if (mode_rate > max_rate) { 198 mode_rate = intel_dp_link_required(mode->clock, 18); 199 if (mode_rate > max_rate) 200 return false; 201 202 if (adjust_mode) 203 mode->private_flags 204 |= INTEL_MODE_DP_FORCE_6BPC; 205 206 return true; 207 } 208 209 return true; 210} 211 212static int 213intel_dp_mode_valid(struct drm_connector *connector, 214 struct drm_display_mode *mode) 215{ 216 struct intel_dp *intel_dp = intel_attached_dp(connector); 217 struct intel_connector *intel_connector = to_intel_connector(connector); 218 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; 219 220 if (is_edp(intel_dp) && fixed_mode) { 221 if (mode->hdisplay > fixed_mode->hdisplay) 222 return MODE_PANEL; 223 224 if (mode->vdisplay > fixed_mode->vdisplay) 225 return MODE_PANEL; 226 } 227 228 if (!intel_dp_adjust_dithering(intel_dp, mode, false)) 229 return MODE_CLOCK_HIGH; 230 231 if (mode->clock < 10000) 232 return MODE_CLOCK_LOW; 233 234 if (mode->flags & DRM_MODE_FLAG_DBLCLK) 235 return MODE_H_ILLEGAL; 236 237 return MODE_OK; 238} 239 240static uint32_t 241pack_aux(uint8_t *src, int src_bytes) 242{ 243 int i; 244 uint32_t v = 0; 245 246 if (src_bytes > 4) 247 src_bytes = 4; 248 for (i = 0; i < src_bytes; i++) 249 v |= ((uint32_t) src[i]) << ((3-i) * 8); 250 return v; 251} 252 253static void 254unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) 255{ 256 int i; 257 if (dst_bytes > 4) 258 dst_bytes = 4; 259 for (i = 0; i < dst_bytes; i++) 260 dst[i] = src >> ((3-i) * 8); 261} 262 263/* hrawclock is 1/4 the FSB frequency */ 264static int 265intel_hrawclk(struct drm_device *dev) 266{ 267 struct drm_i915_private *dev_priv = dev->dev_private; 268 uint32_t clkcfg; 269 270 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */ 271 if (IS_VALLEYVIEW(dev)) 272 return 200; 273 274 clkcfg = I915_READ(CLKCFG); 275 switch (clkcfg & CLKCFG_FSB_MASK) { 276 case CLKCFG_FSB_400: 277 return 100; 278 case CLKCFG_FSB_533: 279 return 133; 280 case CLKCFG_FSB_667: 281 return 166; 282 case CLKCFG_FSB_800: 283 return 200; 284 case CLKCFG_FSB_1067: 285 return 266; 286 case CLKCFG_FSB_1333: 287 return 333; 288 /* these two are just a guess; one of them might be right */ 289 case CLKCFG_FSB_1600: 290 case CLKCFG_FSB_1600_ALT: 291 return 400; 292 default: 293 return 133; 294 } 295} 296 297static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp) 298{ 299 struct drm_device *dev = intel_dp_to_dev(intel_dp); 300 struct drm_i915_private *dev_priv = dev->dev_private; 301 302 return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0; 303} 304 305static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp) 306{ 307 struct drm_device *dev = intel_dp_to_dev(intel_dp); 308 struct drm_i915_private *dev_priv = dev->dev_private; 309 310 return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0; 311} 312 313static void 314intel_dp_check_edp(struct intel_dp *intel_dp) 315{ 316 struct drm_device *dev = intel_dp_to_dev(intel_dp); 317 struct drm_i915_private *dev_priv = dev->dev_private; 318 319 if (!is_edp(intel_dp)) 320 return; 321 if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) { 322 WARN(1, "eDP powered off while attempting aux channel communication.\n"); 323 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n", 324 I915_READ(PCH_PP_STATUS), 325 I915_READ(PCH_PP_CONTROL)); 326 } 327} 328 329static int 330intel_dp_aux_ch(struct intel_dp *intel_dp, 331 uint8_t *send, int send_bytes, 332 uint8_t *recv, int recv_size) 333{ 334 uint32_t output_reg = intel_dp->output_reg; 335 struct drm_device *dev = intel_dp_to_dev(intel_dp); 336 struct drm_i915_private *dev_priv = dev->dev_private; 337 uint32_t ch_ctl = output_reg + 0x10; 338 uint32_t ch_data = ch_ctl + 4; 339 int i; 340 int recv_bytes; 341 uint32_t status; 342 uint32_t aux_clock_divider; 343 int try, precharge; 344 345 if (IS_HASWELL(dev)) { 346 switch (intel_dp->port) { 347 case PORT_A: 348 ch_ctl = DPA_AUX_CH_CTL; 349 ch_data = DPA_AUX_CH_DATA1; 350 break; 351 case PORT_B: 352 ch_ctl = PCH_DPB_AUX_CH_CTL; 353 ch_data = PCH_DPB_AUX_CH_DATA1; 354 break; 355 case PORT_C: 356 ch_ctl = PCH_DPC_AUX_CH_CTL; 357 ch_data = PCH_DPC_AUX_CH_DATA1; 358 break; 359 case PORT_D: 360 ch_ctl = PCH_DPD_AUX_CH_CTL; 361 ch_data = PCH_DPD_AUX_CH_DATA1; 362 break; 363 default: 364 BUG(); 365 } 366 } 367 368 intel_dp_check_edp(intel_dp); 369 /* The clock divider is based off the hrawclk, 370 * and would like to run at 2MHz. So, take the 371 * hrawclk value and divide by 2 and use that 372 * 373 * Note that PCH attached eDP panels should use a 125MHz input 374 * clock divider. 375 */ 376 if (is_cpu_edp(intel_dp)) { 377 if (IS_HASWELL(dev)) 378 aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1; 379 else if (IS_VALLEYVIEW(dev)) 380 aux_clock_divider = 100; 381 else if (IS_GEN6(dev) || IS_GEN7(dev)) 382 aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */ 383 else 384 aux_clock_divider = 225; /* eDP input clock at 450Mhz */ 385 } else if (HAS_PCH_SPLIT(dev)) 386 aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2); 387 else 388 aux_clock_divider = intel_hrawclk(dev) / 2; 389 390 if (IS_GEN6(dev)) 391 precharge = 3; 392 else 393 precharge = 5; 394 395 /* Try to wait for any previous AUX channel activity */ 396 for (try = 0; try < 3; try++) { 397 status = I915_READ(ch_ctl); 398 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 399 break; 400 msleep(1); 401 } 402 403 if (try == 3) { 404 WARN(1, "dp_aux_ch not started status 0x%08x\n", 405 I915_READ(ch_ctl)); 406 return -EBUSY; 407 } 408 409 /* Must try at least 3 times according to DP spec */ 410 for (try = 0; try < 5; try++) { 411 /* Load the send data into the aux channel data registers */ 412 for (i = 0; i < send_bytes; i += 4) 413 I915_WRITE(ch_data + i, 414 pack_aux(send + i, send_bytes - i)); 415 416 /* Send the command and wait for it to complete */ 417 I915_WRITE(ch_ctl, 418 DP_AUX_CH_CTL_SEND_BUSY | 419 DP_AUX_CH_CTL_TIME_OUT_400us | 420 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 421 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 422 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | 423 DP_AUX_CH_CTL_DONE | 424 DP_AUX_CH_CTL_TIME_OUT_ERROR | 425 DP_AUX_CH_CTL_RECEIVE_ERROR); 426 for (;;) { 427 status = I915_READ(ch_ctl); 428 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 429 break; 430 udelay(100); 431 } 432 433 /* Clear done status and any errors */ 434 I915_WRITE(ch_ctl, 435 status | 436 DP_AUX_CH_CTL_DONE | 437 DP_AUX_CH_CTL_TIME_OUT_ERROR | 438 DP_AUX_CH_CTL_RECEIVE_ERROR); 439 440 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR | 441 DP_AUX_CH_CTL_RECEIVE_ERROR)) 442 continue; 443 if (status & DP_AUX_CH_CTL_DONE) 444 break; 445 } 446 447 if ((status & DP_AUX_CH_CTL_DONE) == 0) { 448 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status); 449 return -EBUSY; 450 } 451 452 /* Check for timeout or receive error. 453 * Timeouts occur when the sink is not connected 454 */ 455 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 456 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status); 457 return -EIO; 458 } 459 460 /* Timeouts occur when the device isn't connected, so they're 461 * "normal" -- don't fill the kernel log with these */ 462 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { 463 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status); 464 return -ETIMEDOUT; 465 } 466 467 /* Unload any bytes sent back from the other side */ 468 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> 469 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); 470 if (recv_bytes > recv_size) 471 recv_bytes = recv_size; 472 473 for (i = 0; i < recv_bytes; i += 4) 474 unpack_aux(I915_READ(ch_data + i), 475 recv + i, recv_bytes - i); 476 477 return recv_bytes; 478} 479 480/* Write data to the aux channel in native mode */ 481static int 482intel_dp_aux_native_write(struct intel_dp *intel_dp, 483 uint16_t address, uint8_t *send, int send_bytes) 484{ 485 int ret; 486 uint8_t msg[20]; 487 int msg_bytes; 488 uint8_t ack; 489 490 intel_dp_check_edp(intel_dp); 491 if (send_bytes > 16) 492 return -1; 493 msg[0] = AUX_NATIVE_WRITE << 4; 494 msg[1] = address >> 8; 495 msg[2] = address & 0xff; 496 msg[3] = send_bytes - 1; 497 memcpy(&msg[4], send, send_bytes); 498 msg_bytes = send_bytes + 4; 499 for (;;) { 500 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1); 501 if (ret < 0) 502 return ret; 503 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) 504 break; 505 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) 506 udelay(100); 507 else 508 return -EIO; 509 } 510 return send_bytes; 511} 512 513/* Write a single byte to the aux channel in native mode */ 514static int 515intel_dp_aux_native_write_1(struct intel_dp *intel_dp, 516 uint16_t address, uint8_t byte) 517{ 518 return intel_dp_aux_native_write(intel_dp, address, &byte, 1); 519} 520 521/* read bytes from a native aux channel */ 522static int 523intel_dp_aux_native_read(struct intel_dp *intel_dp, 524 uint16_t address, uint8_t *recv, int recv_bytes) 525{ 526 uint8_t msg[4]; 527 int msg_bytes; 528 uint8_t reply[20]; 529 int reply_bytes; 530 uint8_t ack; 531 int ret; 532 533 intel_dp_check_edp(intel_dp); 534 msg[0] = AUX_NATIVE_READ << 4; 535 msg[1] = address >> 8; 536 msg[2] = address & 0xff; 537 msg[3] = recv_bytes - 1; 538 539 msg_bytes = 4; 540 reply_bytes = recv_bytes + 1; 541 542 for (;;) { 543 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, 544 reply, reply_bytes); 545 if (ret == 0) 546 return -EPROTO; 547 if (ret < 0) 548 return ret; 549 ack = reply[0]; 550 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) { 551 memcpy(recv, reply + 1, ret - 1); 552 return ret - 1; 553 } 554 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) 555 udelay(100); 556 else 557 return -EIO; 558 } 559} 560 561static int 562intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, 563 uint8_t write_byte, uint8_t *read_byte) 564{ 565 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; 566 struct intel_dp *intel_dp = container_of(adapter, 567 struct intel_dp, 568 adapter); 569 uint16_t address = algo_data->address; 570 uint8_t msg[5]; 571 uint8_t reply[2]; 572 unsigned retry; 573 int msg_bytes; 574 int reply_bytes; 575 int ret; 576 577 intel_dp_check_edp(intel_dp); 578 /* Set up the command byte */ 579 if (mode & MODE_I2C_READ) 580 msg[0] = AUX_I2C_READ << 4; 581 else 582 msg[0] = AUX_I2C_WRITE << 4; 583 584 if (!(mode & MODE_I2C_STOP)) 585 msg[0] |= AUX_I2C_MOT << 4; 586 587 msg[1] = address >> 8; 588 msg[2] = address; 589 590 switch (mode) { 591 case MODE_I2C_WRITE: 592 msg[3] = 0; 593 msg[4] = write_byte; 594 msg_bytes = 5; 595 reply_bytes = 1; 596 break; 597 case MODE_I2C_READ: 598 msg[3] = 0; 599 msg_bytes = 4; 600 reply_bytes = 2; 601 break; 602 default: 603 msg_bytes = 3; 604 reply_bytes = 1; 605 break; 606 } 607 608 for (retry = 0; retry < 5; retry++) { 609 ret = intel_dp_aux_ch(intel_dp, 610 msg, msg_bytes, 611 reply, reply_bytes); 612 if (ret < 0) { 613 DRM_DEBUG_KMS("aux_ch failed %d\n", ret); 614 return ret; 615 } 616 617 switch (reply[0] & AUX_NATIVE_REPLY_MASK) { 618 case AUX_NATIVE_REPLY_ACK: 619 /* I2C-over-AUX Reply field is only valid 620 * when paired with AUX ACK. 621 */ 622 break; 623 case AUX_NATIVE_REPLY_NACK: 624 DRM_DEBUG_KMS("aux_ch native nack\n"); 625 return -EREMOTEIO; 626 case AUX_NATIVE_REPLY_DEFER: 627 udelay(100); 628 continue; 629 default: 630 DRM_ERROR("aux_ch invalid native reply 0x%02x\n", 631 reply[0]); 632 return -EREMOTEIO; 633 } 634 635 switch (reply[0] & AUX_I2C_REPLY_MASK) { 636 case AUX_I2C_REPLY_ACK: 637 if (mode == MODE_I2C_READ) { 638 *read_byte = reply[1]; 639 } 640 return reply_bytes - 1; 641 case AUX_I2C_REPLY_NACK: 642 DRM_DEBUG_KMS("aux_i2c nack\n"); 643 return -EREMOTEIO; 644 case AUX_I2C_REPLY_DEFER: 645 DRM_DEBUG_KMS("aux_i2c defer\n"); 646 udelay(100); 647 break; 648 default: 649 DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]); 650 return -EREMOTEIO; 651 } 652 } 653 654 DRM_ERROR("too many retries, giving up\n"); 655 return -EREMOTEIO; 656} 657 658static int 659intel_dp_i2c_init(struct intel_dp *intel_dp, 660 struct intel_connector *intel_connector, const char *name) 661{ 662 int ret; 663 664 DRM_DEBUG_KMS("i2c_init %s\n", name); 665 intel_dp->algo.running = false; 666 intel_dp->algo.address = 0; 667 intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch; 668 669 memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter)); 670 intel_dp->adapter.owner = THIS_MODULE; 671 intel_dp->adapter.class = I2C_CLASS_DDC; 672 strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1); 673 intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0'; 674 intel_dp->adapter.algo_data = &intel_dp->algo; 675 intel_dp->adapter.dev.parent = &intel_connector->base.kdev; 676 677 ironlake_edp_panel_vdd_on(intel_dp); 678 ret = i2c_dp_aux_add_bus(&intel_dp->adapter); 679 ironlake_edp_panel_vdd_off(intel_dp, false); 680 return ret; 681} 682 683static bool 684intel_dp_mode_fixup(struct drm_encoder *encoder, 685 const struct drm_display_mode *mode, 686 struct drm_display_mode *adjusted_mode) 687{ 688 struct drm_device *dev = encoder->dev; 689 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 690 struct intel_connector *intel_connector = intel_dp->attached_connector; 691 int lane_count, clock; 692 int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd); 693 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; 694 int bpp, mode_rate; 695 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; 696 697 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 698 intel_fixed_panel_mode(intel_connector->panel.fixed_mode, 699 adjusted_mode); 700 intel_pch_panel_fitting(dev, 701 intel_connector->panel.fitting_mode, 702 mode, adjusted_mode); 703 } 704 705 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 706 return false; 707 708 DRM_DEBUG_KMS("DP link computation with max lane count %i " 709 "max bw %02x pixel clock %iKHz\n", 710 max_lane_count, bws[max_clock], adjusted_mode->clock); 711 712 if (!intel_dp_adjust_dithering(intel_dp, adjusted_mode, true)) 713 return false; 714 715 bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24; 716 mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp); 717 718 for (clock = 0; clock <= max_clock; clock++) { 719 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { 720 int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); 721 722 if (mode_rate <= link_avail) { 723 intel_dp->link_bw = bws[clock]; 724 intel_dp->lane_count = lane_count; 725 adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw); 726 DRM_DEBUG_KMS("DP link bw %02x lane " 727 "count %d clock %d bpp %d\n", 728 intel_dp->link_bw, intel_dp->lane_count, 729 adjusted_mode->clock, bpp); 730 DRM_DEBUG_KMS("DP link bw required %i available %i\n", 731 mode_rate, link_avail); 732 return true; 733 } 734 } 735 } 736 737 return false; 738} 739 740struct intel_dp_m_n { 741 uint32_t tu; 742 uint32_t gmch_m; 743 uint32_t gmch_n; 744 uint32_t link_m; 745 uint32_t link_n; 746}; 747 748static void 749intel_reduce_ratio(uint32_t *num, uint32_t *den) 750{ 751 while (*num > 0xffffff || *den > 0xffffff) { 752 *num >>= 1; 753 *den >>= 1; 754 } 755} 756 757static void 758intel_dp_compute_m_n(int bpp, 759 int nlanes, 760 int pixel_clock, 761 int link_clock, 762 struct intel_dp_m_n *m_n) 763{ 764 m_n->tu = 64; 765 m_n->gmch_m = (pixel_clock * bpp) >> 3; 766 m_n->gmch_n = link_clock * nlanes; 767 intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); 768 m_n->link_m = pixel_clock; 769 m_n->link_n = link_clock; 770 intel_reduce_ratio(&m_n->link_m, &m_n->link_n); 771} 772 773void 774intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, 775 struct drm_display_mode *adjusted_mode) 776{ 777 struct drm_device *dev = crtc->dev; 778 struct intel_encoder *intel_encoder; 779 struct intel_dp *intel_dp; 780 struct drm_i915_private *dev_priv = dev->dev_private; 781 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 782 int lane_count = 4; 783 struct intel_dp_m_n m_n; 784 int pipe = intel_crtc->pipe; 785 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; 786 787 /* 788 * Find the lane count in the intel_encoder private 789 */ 790 for_each_encoder_on_crtc(dev, crtc, intel_encoder) { 791 intel_dp = enc_to_intel_dp(&intel_encoder->base); 792 793 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || 794 intel_encoder->type == INTEL_OUTPUT_EDP) 795 { 796 lane_count = intel_dp->lane_count; 797 break; 798 } 799 } 800 801 /* 802 * Compute the GMCH and Link ratios. The '3' here is 803 * the number of bytes_per_pixel post-LUT, which we always 804 * set up for 8-bits of R/G/B, or 3 bytes total. 805 */ 806 intel_dp_compute_m_n(intel_crtc->bpp, lane_count, 807 mode->clock, adjusted_mode->clock, &m_n); 808 809 if (IS_HASWELL(dev)) { 810 I915_WRITE(PIPE_DATA_M1(cpu_transcoder), 811 TU_SIZE(m_n.tu) | m_n.gmch_m); 812 I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n); 813 I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m); 814 I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n); 815 } else if (HAS_PCH_SPLIT(dev)) { 816 I915_WRITE(TRANSDATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); 817 I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n); 818 I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m); 819 I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n); 820 } else if (IS_VALLEYVIEW(dev)) { 821 I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); 822 I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n); 823 I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m); 824 I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n); 825 } else { 826 I915_WRITE(PIPE_GMCH_DATA_M(pipe), 827 TU_SIZE(m_n.tu) | m_n.gmch_m); 828 I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n); 829 I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m); 830 I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n); 831 } 832} 833 834void intel_dp_init_link_config(struct intel_dp *intel_dp) 835{ 836 memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); 837 intel_dp->link_configuration[0] = intel_dp->link_bw; 838 intel_dp->link_configuration[1] = intel_dp->lane_count; 839 intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B; 840 /* 841 * Check for DPCD version > 1.1 and enhanced framing support 842 */ 843 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 844 (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) { 845 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; 846 } 847} 848 849static void 850intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, 851 struct drm_display_mode *adjusted_mode) 852{ 853 struct drm_device *dev = encoder->dev; 854 struct drm_i915_private *dev_priv = dev->dev_private; 855 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 856 struct drm_crtc *crtc = encoder->crtc; 857 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 858 859 /* 860 * There are four kinds of DP registers: 861 * 862 * IBX PCH 863 * SNB CPU 864 * IVB CPU 865 * CPT PCH 866 * 867 * IBX PCH and CPU are the same for almost everything, 868 * except that the CPU DP PLL is configured in this 869 * register 870 * 871 * CPT PCH is quite different, having many bits moved 872 * to the TRANS_DP_CTL register instead. That 873 * configuration happens (oddly) in ironlake_pch_enable 874 */ 875 876 /* Preserve the BIOS-computed detected bit. This is 877 * supposed to be read-only. 878 */ 879 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED; 880 881 /* Handle DP bits in common between all three register formats */ 882 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 883 884 switch (intel_dp->lane_count) { 885 case 1: 886 intel_dp->DP |= DP_PORT_WIDTH_1; 887 break; 888 case 2: 889 intel_dp->DP |= DP_PORT_WIDTH_2; 890 break; 891 case 4: 892 intel_dp->DP |= DP_PORT_WIDTH_4; 893 break; 894 } 895 if (intel_dp->has_audio) { 896 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n", 897 pipe_name(intel_crtc->pipe)); 898 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 899 intel_write_eld(encoder, adjusted_mode); 900 } 901 902 intel_dp_init_link_config(intel_dp); 903 904 /* Split out the IBX/CPU vs CPT settings */ 905 906 if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) { 907 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 908 intel_dp->DP |= DP_SYNC_HS_HIGH; 909 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 910 intel_dp->DP |= DP_SYNC_VS_HIGH; 911 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 912 913 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) 914 intel_dp->DP |= DP_ENHANCED_FRAMING; 915 916 intel_dp->DP |= intel_crtc->pipe << 29; 917 918 /* don't miss out required setting for eDP */ 919 if (adjusted_mode->clock < 200000) 920 intel_dp->DP |= DP_PLL_FREQ_160MHZ; 921 else 922 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 923 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { 924 intel_dp->DP |= intel_dp->color_range; 925 926 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 927 intel_dp->DP |= DP_SYNC_HS_HIGH; 928 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 929 intel_dp->DP |= DP_SYNC_VS_HIGH; 930 intel_dp->DP |= DP_LINK_TRAIN_OFF; 931 932 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) 933 intel_dp->DP |= DP_ENHANCED_FRAMING; 934 935 if (intel_crtc->pipe == 1) 936 intel_dp->DP |= DP_PIPEB_SELECT; 937 938 if (is_cpu_edp(intel_dp)) { 939 /* don't miss out required setting for eDP */ 940 if (adjusted_mode->clock < 200000) 941 intel_dp->DP |= DP_PLL_FREQ_160MHZ; 942 else 943 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 944 } 945 } else { 946 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 947 } 948} 949 950#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 951#define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) 952 953#define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 954#define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 955 956#define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) 957#define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 958 959static void ironlake_wait_panel_status(struct intel_dp *intel_dp, 960 u32 mask, 961 u32 value) 962{ 963 struct drm_device *dev = intel_dp_to_dev(intel_dp); 964 struct drm_i915_private *dev_priv = dev->dev_private; 965 966 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n", 967 mask, value, 968 I915_READ(PCH_PP_STATUS), 969 I915_READ(PCH_PP_CONTROL)); 970 971 if (_wait_for((I915_READ(PCH_PP_STATUS) & mask) == value, 5000, 10)) { 972 DRM_ERROR("Panel status timeout: status %08x control %08x\n", 973 I915_READ(PCH_PP_STATUS), 974 I915_READ(PCH_PP_CONTROL)); 975 } 976} 977 978static void ironlake_wait_panel_on(struct intel_dp *intel_dp) 979{ 980 DRM_DEBUG_KMS("Wait for panel power on\n"); 981 ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); 982} 983 984static void ironlake_wait_panel_off(struct intel_dp *intel_dp) 985{ 986 DRM_DEBUG_KMS("Wait for panel power off time\n"); 987 ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); 988} 989 990static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp) 991{ 992 DRM_DEBUG_KMS("Wait for panel power cycle\n"); 993 ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); 994} 995 996 997/* Read the current pp_control value, unlocking the register if it 998 * is locked 999 */ 1000 1001static u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv) 1002{ 1003 u32 control = I915_READ(PCH_PP_CONTROL); 1004 1005 control &= ~PANEL_UNLOCK_MASK; 1006 control |= PANEL_UNLOCK_REGS; 1007 return control; 1008} 1009 1010void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) 1011{ 1012 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1013 struct drm_i915_private *dev_priv = dev->dev_private; 1014 u32 pp; 1015 1016 if (!is_edp(intel_dp)) 1017 return; 1018 DRM_DEBUG_KMS("Turn eDP VDD on\n"); 1019 1020 WARN(intel_dp->want_panel_vdd, 1021 "eDP VDD already requested on\n"); 1022 1023 intel_dp->want_panel_vdd = true; 1024 1025 if (ironlake_edp_have_panel_vdd(intel_dp)) { 1026 DRM_DEBUG_KMS("eDP VDD already on\n"); 1027 return; 1028 } 1029 1030 if (!ironlake_edp_have_panel_power(intel_dp)) 1031 ironlake_wait_panel_power_cycle(intel_dp); 1032 1033 pp = ironlake_get_pp_control(dev_priv); 1034 pp |= EDP_FORCE_VDD; 1035 I915_WRITE(PCH_PP_CONTROL, pp); 1036 POSTING_READ(PCH_PP_CONTROL); 1037 DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n", 1038 I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL)); 1039 1040 /* 1041 * If the panel wasn't on, delay before accessing aux channel 1042 */ 1043 if (!ironlake_edp_have_panel_power(intel_dp)) { 1044 DRM_DEBUG_KMS("eDP was not running\n"); 1045 msleep(intel_dp->panel_power_up_delay); 1046 } 1047} 1048 1049static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp) 1050{ 1051 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1052 struct drm_i915_private *dev_priv = dev->dev_private; 1053 u32 pp; 1054 1055 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) { 1056 pp = ironlake_get_pp_control(dev_priv); 1057 pp &= ~EDP_FORCE_VDD; 1058 I915_WRITE(PCH_PP_CONTROL, pp); 1059 POSTING_READ(PCH_PP_CONTROL); 1060 1061 /* Make sure sequencer is idle before allowing subsequent activity */ 1062 DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n", 1063 I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL)); 1064 1065 msleep(intel_dp->panel_power_down_delay); 1066 } 1067} 1068 1069static void ironlake_panel_vdd_work(struct work_struct *__work) 1070{ 1071 struct intel_dp *intel_dp = container_of(to_delayed_work(__work), 1072 struct intel_dp, panel_vdd_work); 1073 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1074 1075 mutex_lock(&dev->mode_config.mutex); 1076 ironlake_panel_vdd_off_sync(intel_dp); 1077 mutex_unlock(&dev->mode_config.mutex); 1078} 1079 1080void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) 1081{ 1082 if (!is_edp(intel_dp)) 1083 return; 1084 1085 DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd); 1086 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on"); 1087 1088 intel_dp->want_panel_vdd = false; 1089 1090 if (sync) { 1091 ironlake_panel_vdd_off_sync(intel_dp); 1092 } else { 1093 /* 1094 * Queue the timer to fire a long 1095 * time from now (relative to the power down delay) 1096 * to keep the panel power up across a sequence of operations 1097 */ 1098 schedule_delayed_work(&intel_dp->panel_vdd_work, 1099 msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5)); 1100 } 1101} 1102 1103void ironlake_edp_panel_on(struct intel_dp *intel_dp) 1104{ 1105 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1106 struct drm_i915_private *dev_priv = dev->dev_private; 1107 u32 pp; 1108 1109 if (!is_edp(intel_dp)) 1110 return; 1111 1112 DRM_DEBUG_KMS("Turn eDP power on\n"); 1113 1114 if (ironlake_edp_have_panel_power(intel_dp)) { 1115 DRM_DEBUG_KMS("eDP power already on\n"); 1116 return; 1117 } 1118 1119 ironlake_wait_panel_power_cycle(intel_dp); 1120 1121 pp = ironlake_get_pp_control(dev_priv); 1122 if (IS_GEN5(dev)) { 1123 /* ILK workaround: disable reset around power sequence */ 1124 pp &= ~PANEL_POWER_RESET; 1125 I915_WRITE(PCH_PP_CONTROL, pp); 1126 POSTING_READ(PCH_PP_CONTROL); 1127 } 1128 1129 pp |= POWER_TARGET_ON; 1130 if (!IS_GEN5(dev)) 1131 pp |= PANEL_POWER_RESET; 1132 1133 I915_WRITE(PCH_PP_CONTROL, pp); 1134 POSTING_READ(PCH_PP_CONTROL); 1135 1136 ironlake_wait_panel_on(intel_dp); 1137 1138 if (IS_GEN5(dev)) { 1139 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 1140 I915_WRITE(PCH_PP_CONTROL, pp); 1141 POSTING_READ(PCH_PP_CONTROL); 1142 } 1143} 1144 1145void ironlake_edp_panel_off(struct intel_dp *intel_dp) 1146{ 1147 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1148 struct drm_i915_private *dev_priv = dev->dev_private; 1149 u32 pp; 1150 1151 if (!is_edp(intel_dp)) 1152 return; 1153 1154 DRM_DEBUG_KMS("Turn eDP power off\n"); 1155 1156 WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n"); 1157 1158 pp = ironlake_get_pp_control(dev_priv); 1159 /* We need to switch off panel power _and_ force vdd, for otherwise some 1160 * panels get very unhappy and cease to work. */ 1161 pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE); 1162 I915_WRITE(PCH_PP_CONTROL, pp); 1163 POSTING_READ(PCH_PP_CONTROL); 1164 1165 intel_dp->want_panel_vdd = false; 1166 1167 ironlake_wait_panel_off(intel_dp); 1168} 1169 1170void ironlake_edp_backlight_on(struct intel_dp *intel_dp) 1171{ 1172 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1173 struct drm_i915_private *dev_priv = dev->dev_private; 1174 int pipe = to_intel_crtc(intel_dp->base.base.crtc)->pipe; 1175 u32 pp; 1176 1177 if (!is_edp(intel_dp)) 1178 return; 1179 1180 DRM_DEBUG_KMS("\n"); 1181 /* 1182 * If we enable the backlight right away following a panel power 1183 * on, we may see slight flicker as the panel syncs with the eDP 1184 * link. So delay a bit to make sure the image is solid before 1185 * allowing it to appear. 1186 */ 1187 msleep(intel_dp->backlight_on_delay); 1188 pp = ironlake_get_pp_control(dev_priv); 1189 pp |= EDP_BLC_ENABLE; 1190 I915_WRITE(PCH_PP_CONTROL, pp); 1191 POSTING_READ(PCH_PP_CONTROL); 1192 1193 intel_panel_enable_backlight(dev, pipe); 1194} 1195 1196void ironlake_edp_backlight_off(struct intel_dp *intel_dp) 1197{ 1198 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1199 struct drm_i915_private *dev_priv = dev->dev_private; 1200 u32 pp; 1201 1202 if (!is_edp(intel_dp)) 1203 return; 1204 1205 intel_panel_disable_backlight(dev); 1206 1207 DRM_DEBUG_KMS("\n"); 1208 pp = ironlake_get_pp_control(dev_priv); 1209 pp &= ~EDP_BLC_ENABLE; 1210 I915_WRITE(PCH_PP_CONTROL, pp); 1211 POSTING_READ(PCH_PP_CONTROL); 1212 msleep(intel_dp->backlight_off_delay); 1213} 1214 1215static void ironlake_edp_pll_on(struct intel_dp *intel_dp) 1216{ 1217 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1218 struct drm_crtc *crtc = intel_dp->base.base.crtc; 1219 struct drm_i915_private *dev_priv = dev->dev_private; 1220 u32 dpa_ctl; 1221 1222 assert_pipe_disabled(dev_priv, 1223 to_intel_crtc(crtc)->pipe); 1224 1225 DRM_DEBUG_KMS("\n"); 1226 dpa_ctl = I915_READ(DP_A); 1227 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n"); 1228 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n"); 1229 1230 /* We don't adjust intel_dp->DP while tearing down the link, to 1231 * facilitate link retraining (e.g. after hotplug). Hence clear all 1232 * enable bits here to ensure that we don't enable too much. */ 1233 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE); 1234 intel_dp->DP |= DP_PLL_ENABLE; 1235 I915_WRITE(DP_A, intel_dp->DP); 1236 POSTING_READ(DP_A); 1237 udelay(200); 1238} 1239 1240static void ironlake_edp_pll_off(struct intel_dp *intel_dp) 1241{ 1242 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1243 struct drm_crtc *crtc = intel_dp->base.base.crtc; 1244 struct drm_i915_private *dev_priv = dev->dev_private; 1245 u32 dpa_ctl; 1246 1247 assert_pipe_disabled(dev_priv, 1248 to_intel_crtc(crtc)->pipe); 1249 1250 dpa_ctl = I915_READ(DP_A); 1251 WARN((dpa_ctl & DP_PLL_ENABLE) == 0, 1252 "dp pll off, should be on\n"); 1253 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n"); 1254 1255 /* We can't rely on the value tracked for the DP register in 1256 * intel_dp->DP because link_down must not change that (otherwise link 1257 * re-training will fail. */ 1258 dpa_ctl &= ~DP_PLL_ENABLE; 1259 I915_WRITE(DP_A, dpa_ctl); 1260 POSTING_READ(DP_A); 1261 udelay(200); 1262} 1263 1264/* If the sink supports it, try to set the power state appropriately */ 1265void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) 1266{ 1267 int ret, i; 1268 1269 /* Should have a valid DPCD by this point */ 1270 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 1271 return; 1272 1273 if (mode != DRM_MODE_DPMS_ON) { 1274 ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER, 1275 DP_SET_POWER_D3); 1276 if (ret != 1) 1277 DRM_DEBUG_DRIVER("failed to write sink power state\n"); 1278 } else { 1279 /* 1280 * When turning on, we need to retry for 1ms to give the sink 1281 * time to wake up. 1282 */ 1283 for (i = 0; i < 3; i++) { 1284 ret = intel_dp_aux_native_write_1(intel_dp, 1285 DP_SET_POWER, 1286 DP_SET_POWER_D0); 1287 if (ret == 1) 1288 break; 1289 msleep(1); 1290 } 1291 } 1292} 1293 1294static bool intel_dp_get_hw_state(struct intel_encoder *encoder, 1295 enum pipe *pipe) 1296{ 1297 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1298 struct drm_device *dev = encoder->base.dev; 1299 struct drm_i915_private *dev_priv = dev->dev_private; 1300 u32 tmp = I915_READ(intel_dp->output_reg); 1301 1302 if (!(tmp & DP_PORT_EN)) 1303 return false; 1304 1305 if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) { 1306 *pipe = PORT_TO_PIPE_CPT(tmp); 1307 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { 1308 *pipe = PORT_TO_PIPE(tmp); 1309 } else { 1310 u32 trans_sel; 1311 u32 trans_dp; 1312 int i; 1313 1314 switch (intel_dp->output_reg) { 1315 case PCH_DP_B: 1316 trans_sel = TRANS_DP_PORT_SEL_B; 1317 break; 1318 case PCH_DP_C: 1319 trans_sel = TRANS_DP_PORT_SEL_C; 1320 break; 1321 case PCH_DP_D: 1322 trans_sel = TRANS_DP_PORT_SEL_D; 1323 break; 1324 default: 1325 return true; 1326 } 1327 1328 for_each_pipe(i) { 1329 trans_dp = I915_READ(TRANS_DP_CTL(i)); 1330 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) { 1331 *pipe = i; 1332 return true; 1333 } 1334 } 1335 1336 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", 1337 intel_dp->output_reg); 1338 } 1339 1340 return true; 1341} 1342 1343static void intel_disable_dp(struct intel_encoder *encoder) 1344{ 1345 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1346 1347 /* Make sure the panel is off before trying to change the mode. But also 1348 * ensure that we have vdd while we switch off the panel. */ 1349 ironlake_edp_panel_vdd_on(intel_dp); 1350 ironlake_edp_backlight_off(intel_dp); 1351 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1352 ironlake_edp_panel_off(intel_dp); 1353 1354 /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */ 1355 if (!is_cpu_edp(intel_dp)) 1356 intel_dp_link_down(intel_dp); 1357} 1358 1359static void intel_post_disable_dp(struct intel_encoder *encoder) 1360{ 1361 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1362 1363 if (is_cpu_edp(intel_dp)) { 1364 intel_dp_link_down(intel_dp); 1365 ironlake_edp_pll_off(intel_dp); 1366 } 1367} 1368 1369static void intel_enable_dp(struct intel_encoder *encoder) 1370{ 1371 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1372 struct drm_device *dev = encoder->base.dev; 1373 struct drm_i915_private *dev_priv = dev->dev_private; 1374 uint32_t dp_reg = I915_READ(intel_dp->output_reg); 1375 1376 if (WARN_ON(dp_reg & DP_PORT_EN)) 1377 return; 1378 1379 ironlake_edp_panel_vdd_on(intel_dp); 1380 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1381 intel_dp_start_link_train(intel_dp); 1382 ironlake_edp_panel_on(intel_dp); 1383 ironlake_edp_panel_vdd_off(intel_dp, true); 1384 intel_dp_complete_link_train(intel_dp); 1385 ironlake_edp_backlight_on(intel_dp); 1386} 1387 1388static void intel_pre_enable_dp(struct intel_encoder *encoder) 1389{ 1390 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1391 1392 if (is_cpu_edp(intel_dp)) 1393 ironlake_edp_pll_on(intel_dp); 1394} 1395 1396/* 1397 * Native read with retry for link status and receiver capability reads for 1398 * cases where the sink may still be asleep. 1399 */ 1400static bool 1401intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address, 1402 uint8_t *recv, int recv_bytes) 1403{ 1404 int ret, i; 1405 1406 /* 1407 * Sinks are *supposed* to come up within 1ms from an off state, 1408 * but we're also supposed to retry 3 times per the spec. 1409 */ 1410 for (i = 0; i < 3; i++) { 1411 ret = intel_dp_aux_native_read(intel_dp, address, recv, 1412 recv_bytes); 1413 if (ret == recv_bytes) 1414 return true; 1415 msleep(1); 1416 } 1417 1418 return false; 1419} 1420 1421/* 1422 * Fetch AUX CH registers 0x202 - 0x207 which contain 1423 * link status information 1424 */ 1425static bool 1426intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 1427{ 1428 return intel_dp_aux_native_read_retry(intel_dp, 1429 DP_LANE0_1_STATUS, 1430 link_status, 1431 DP_LINK_STATUS_SIZE); 1432} 1433 1434#if 0 1435static char *voltage_names[] = { 1436 "0.4V", "0.6V", "0.8V", "1.2V" 1437}; 1438static char *pre_emph_names[] = { 1439 "0dB", "3.5dB", "6dB", "9.5dB" 1440}; 1441static char *link_train_names[] = { 1442 "pattern 1", "pattern 2", "idle", "off" 1443}; 1444#endif 1445 1446/* 1447 * These are source-specific values; current Intel hardware supports 1448 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB 1449 */ 1450 1451static uint8_t 1452intel_dp_voltage_max(struct intel_dp *intel_dp) 1453{ 1454 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1455 1456 if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) 1457 return DP_TRAIN_VOLTAGE_SWING_800; 1458 else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) 1459 return DP_TRAIN_VOLTAGE_SWING_1200; 1460 else 1461 return DP_TRAIN_VOLTAGE_SWING_800; 1462} 1463 1464static uint8_t 1465intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) 1466{ 1467 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1468 1469 if (IS_HASWELL(dev)) { 1470 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1471 case DP_TRAIN_VOLTAGE_SWING_400: 1472 return DP_TRAIN_PRE_EMPHASIS_9_5; 1473 case DP_TRAIN_VOLTAGE_SWING_600: 1474 return DP_TRAIN_PRE_EMPHASIS_6; 1475 case DP_TRAIN_VOLTAGE_SWING_800: 1476 return DP_TRAIN_PRE_EMPHASIS_3_5; 1477 case DP_TRAIN_VOLTAGE_SWING_1200: 1478 default: 1479 return DP_TRAIN_PRE_EMPHASIS_0; 1480 } 1481 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { 1482 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1483 case DP_TRAIN_VOLTAGE_SWING_400: 1484 return DP_TRAIN_PRE_EMPHASIS_6; 1485 case DP_TRAIN_VOLTAGE_SWING_600: 1486 case DP_TRAIN_VOLTAGE_SWING_800: 1487 return DP_TRAIN_PRE_EMPHASIS_3_5; 1488 default: 1489 return DP_TRAIN_PRE_EMPHASIS_0; 1490 } 1491 } else { 1492 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1493 case DP_TRAIN_VOLTAGE_SWING_400: 1494 return DP_TRAIN_PRE_EMPHASIS_6; 1495 case DP_TRAIN_VOLTAGE_SWING_600: 1496 return DP_TRAIN_PRE_EMPHASIS_6; 1497 case DP_TRAIN_VOLTAGE_SWING_800: 1498 return DP_TRAIN_PRE_EMPHASIS_3_5; 1499 case DP_TRAIN_VOLTAGE_SWING_1200: 1500 default: 1501 return DP_TRAIN_PRE_EMPHASIS_0; 1502 } 1503 } 1504} 1505 1506static void 1507intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 1508{ 1509 uint8_t v = 0; 1510 uint8_t p = 0; 1511 int lane; 1512 uint8_t voltage_max; 1513 uint8_t preemph_max; 1514 1515 for (lane = 0; lane < intel_dp->lane_count; lane++) { 1516 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane); 1517 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); 1518 1519 if (this_v > v) 1520 v = this_v; 1521 if (this_p > p) 1522 p = this_p; 1523 } 1524 1525 voltage_max = intel_dp_voltage_max(intel_dp); 1526 if (v >= voltage_max) 1527 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; 1528 1529 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v); 1530 if (p >= preemph_max) 1531 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; 1532 1533 for (lane = 0; lane < 4; lane++) 1534 intel_dp->train_set[lane] = v | p; 1535} 1536 1537static uint32_t 1538intel_dp_signal_levels(uint8_t train_set) 1539{ 1540 uint32_t signal_levels = 0; 1541 1542 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 1543 case DP_TRAIN_VOLTAGE_SWING_400: 1544 default: 1545 signal_levels |= DP_VOLTAGE_0_4; 1546 break; 1547 case DP_TRAIN_VOLTAGE_SWING_600: 1548 signal_levels |= DP_VOLTAGE_0_6; 1549 break; 1550 case DP_TRAIN_VOLTAGE_SWING_800: 1551 signal_levels |= DP_VOLTAGE_0_8; 1552 break; 1553 case DP_TRAIN_VOLTAGE_SWING_1200: 1554 signal_levels |= DP_VOLTAGE_1_2; 1555 break; 1556 } 1557 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 1558 case DP_TRAIN_PRE_EMPHASIS_0: 1559 default: 1560 signal_levels |= DP_PRE_EMPHASIS_0; 1561 break; 1562 case DP_TRAIN_PRE_EMPHASIS_3_5: 1563 signal_levels |= DP_PRE_EMPHASIS_3_5; 1564 break; 1565 case DP_TRAIN_PRE_EMPHASIS_6: 1566 signal_levels |= DP_PRE_EMPHASIS_6; 1567 break; 1568 case DP_TRAIN_PRE_EMPHASIS_9_5: 1569 signal_levels |= DP_PRE_EMPHASIS_9_5; 1570 break; 1571 } 1572 return signal_levels; 1573} 1574 1575/* Gen6's DP voltage swing and pre-emphasis control */ 1576static uint32_t 1577intel_gen6_edp_signal_levels(uint8_t train_set) 1578{ 1579 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 1580 DP_TRAIN_PRE_EMPHASIS_MASK); 1581 switch (signal_levels) { 1582 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 1583 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 1584 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 1585 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 1586 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B; 1587 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 1588 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: 1589 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B; 1590 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 1591 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 1592 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B; 1593 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 1594 case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0: 1595 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B; 1596 default: 1597 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 1598 "0x%x\n", signal_levels); 1599 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 1600 } 1601} 1602 1603/* Gen7's DP voltage swing and pre-emphasis control */ 1604static uint32_t 1605intel_gen7_edp_signal_levels(uint8_t train_set) 1606{ 1607 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 1608 DP_TRAIN_PRE_EMPHASIS_MASK); 1609 switch (signal_levels) { 1610 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 1611 return EDP_LINK_TRAIN_400MV_0DB_IVB; 1612 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 1613 return EDP_LINK_TRAIN_400MV_3_5DB_IVB; 1614 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 1615 return EDP_LINK_TRAIN_400MV_6DB_IVB; 1616 1617 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 1618 return EDP_LINK_TRAIN_600MV_0DB_IVB; 1619 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 1620 return EDP_LINK_TRAIN_600MV_3_5DB_IVB; 1621 1622 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 1623 return EDP_LINK_TRAIN_800MV_0DB_IVB; 1624 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 1625 return EDP_LINK_TRAIN_800MV_3_5DB_IVB; 1626 1627 default: 1628 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 1629 "0x%x\n", signal_levels); 1630 return EDP_LINK_TRAIN_500MV_0DB_IVB; 1631 } 1632} 1633 1634/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */ 1635static uint32_t 1636intel_dp_signal_levels_hsw(uint8_t train_set) 1637{ 1638 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 1639 DP_TRAIN_PRE_EMPHASIS_MASK); 1640 switch (signal_levels) { 1641 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 1642 return DDI_BUF_EMP_400MV_0DB_HSW; 1643 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 1644 return DDI_BUF_EMP_400MV_3_5DB_HSW; 1645 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 1646 return DDI_BUF_EMP_400MV_6DB_HSW; 1647 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5: 1648 return DDI_BUF_EMP_400MV_9_5DB_HSW; 1649 1650 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 1651 return DDI_BUF_EMP_600MV_0DB_HSW; 1652 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 1653 return DDI_BUF_EMP_600MV_3_5DB_HSW; 1654 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: 1655 return DDI_BUF_EMP_600MV_6DB_HSW; 1656 1657 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 1658 return DDI_BUF_EMP_800MV_0DB_HSW; 1659 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 1660 return DDI_BUF_EMP_800MV_3_5DB_HSW; 1661 default: 1662 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 1663 "0x%x\n", signal_levels); 1664 return DDI_BUF_EMP_400MV_0DB_HSW; 1665 } 1666} 1667 1668static bool 1669intel_dp_set_link_train(struct intel_dp *intel_dp, 1670 uint32_t dp_reg_value, 1671 uint8_t dp_train_pat) 1672{ 1673 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1674 struct drm_i915_private *dev_priv = dev->dev_private; 1675 int ret; 1676 uint32_t temp; 1677 1678 if (IS_HASWELL(dev)) { 1679 temp = I915_READ(DP_TP_CTL(intel_dp->port)); 1680 1681 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) 1682 temp |= DP_TP_CTL_SCRAMBLE_DISABLE; 1683 else 1684 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE; 1685 1686 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; 1687 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 1688 case DP_TRAINING_PATTERN_DISABLE: 1689 temp |= DP_TP_CTL_LINK_TRAIN_IDLE; 1690 I915_WRITE(DP_TP_CTL(intel_dp->port), temp); 1691 1692 if (wait_for((I915_READ(DP_TP_STATUS(intel_dp->port)) & 1693 DP_TP_STATUS_IDLE_DONE), 1)) 1694 DRM_ERROR("Timed out waiting for DP idle patterns\n"); 1695 1696 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; 1697 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; 1698 1699 break; 1700 case DP_TRAINING_PATTERN_1: 1701 temp |= DP_TP_CTL_LINK_TRAIN_PAT1; 1702 break; 1703 case DP_TRAINING_PATTERN_2: 1704 temp |= DP_TP_CTL_LINK_TRAIN_PAT2; 1705 break; 1706 case DP_TRAINING_PATTERN_3: 1707 temp |= DP_TP_CTL_LINK_TRAIN_PAT3; 1708 break; 1709 } 1710 I915_WRITE(DP_TP_CTL(intel_dp->port), temp); 1711 1712 } else if (HAS_PCH_CPT(dev) && 1713 (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) { 1714 dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT; 1715 1716 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 1717 case DP_TRAINING_PATTERN_DISABLE: 1718 dp_reg_value |= DP_LINK_TRAIN_OFF_CPT; 1719 break; 1720 case DP_TRAINING_PATTERN_1: 1721 dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT; 1722 break; 1723 case DP_TRAINING_PATTERN_2: 1724 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT; 1725 break; 1726 case DP_TRAINING_PATTERN_3: 1727 DRM_ERROR("DP training pattern 3 not supported\n"); 1728 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT; 1729 break; 1730 } 1731 1732 } else { 1733 dp_reg_value &= ~DP_LINK_TRAIN_MASK; 1734 1735 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 1736 case DP_TRAINING_PATTERN_DISABLE: 1737 dp_reg_value |= DP_LINK_TRAIN_OFF; 1738 break; 1739 case DP_TRAINING_PATTERN_1: 1740 dp_reg_value |= DP_LINK_TRAIN_PAT_1; 1741 break; 1742 case DP_TRAINING_PATTERN_2: 1743 dp_reg_value |= DP_LINK_TRAIN_PAT_2; 1744 break; 1745 case DP_TRAINING_PATTERN_3: 1746 DRM_ERROR("DP training pattern 3 not supported\n"); 1747 dp_reg_value |= DP_LINK_TRAIN_PAT_2; 1748 break; 1749 } 1750 } 1751 1752 I915_WRITE(intel_dp->output_reg, dp_reg_value); 1753 POSTING_READ(intel_dp->output_reg); 1754 1755 intel_dp_aux_native_write_1(intel_dp, 1756 DP_TRAINING_PATTERN_SET, 1757 dp_train_pat); 1758 1759 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) != 1760 DP_TRAINING_PATTERN_DISABLE) { 1761 ret = intel_dp_aux_native_write(intel_dp, 1762 DP_TRAINING_LANE0_SET, 1763 intel_dp->train_set, 1764 intel_dp->lane_count); 1765 if (ret != intel_dp->lane_count) 1766 return false; 1767 } 1768 1769 return true; 1770} 1771 1772/* Enable corresponding port and start training pattern 1 */ 1773void 1774intel_dp_start_link_train(struct intel_dp *intel_dp) 1775{ 1776 struct drm_encoder *encoder = &intel_dp->base.base; 1777 struct drm_device *dev = encoder->dev; 1778 int i; 1779 uint8_t voltage; 1780 bool clock_recovery = false; 1781 int voltage_tries, loop_tries; 1782 uint32_t DP = intel_dp->DP; 1783 1784 if (IS_HASWELL(dev)) 1785 intel_ddi_prepare_link_retrain(encoder); 1786 1787 /* Write the link configuration data */ 1788 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, 1789 intel_dp->link_configuration, 1790 DP_LINK_CONFIGURATION_SIZE); 1791 1792 DP |= DP_PORT_EN; 1793 1794 memset(intel_dp->train_set, 0, 4); 1795 voltage = 0xff; 1796 voltage_tries = 0; 1797 loop_tries = 0; 1798 clock_recovery = false; 1799 for (;;) { 1800 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 1801 uint8_t link_status[DP_LINK_STATUS_SIZE]; 1802 uint32_t signal_levels; 1803 1804 if (IS_HASWELL(dev)) { 1805 signal_levels = intel_dp_signal_levels_hsw( 1806 intel_dp->train_set[0]); 1807 DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels; 1808 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { 1809 signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]); 1810 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels; 1811 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { 1812 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); 1813 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1814 } else { 1815 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]); 1816 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1817 } 1818 DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", 1819 signal_levels); 1820 1821 /* Set training pattern 1 */ 1822 if (!intel_dp_set_link_train(intel_dp, DP, 1823 DP_TRAINING_PATTERN_1 | 1824 DP_LINK_SCRAMBLING_DISABLE)) 1825 break; 1826 1827 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd); 1828 if (!intel_dp_get_link_status(intel_dp, link_status)) { 1829 DRM_ERROR("failed to get link status\n"); 1830 break; 1831 } 1832 1833 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { 1834 DRM_DEBUG_KMS("clock recovery OK\n"); 1835 clock_recovery = true; 1836 break; 1837 } 1838 1839 /* Check to see if we've tried the max voltage */ 1840 for (i = 0; i < intel_dp->lane_count; i++) 1841 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) 1842 break; 1843 if (i == intel_dp->lane_count && voltage_tries == 5) { 1844 if (++loop_tries == 5) { 1845 DRM_DEBUG_KMS("too many full retries, give up\n"); 1846 break; 1847 } 1848 memset(intel_dp->train_set, 0, 4); 1849 voltage_tries = 0; 1850 continue; 1851 } 1852 1853 /* Check to see if we've tried the same voltage 5 times */ 1854 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) != voltage) { 1855 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; 1856 voltage_tries = 0; 1857 } else 1858 ++voltage_tries; 1859 1860 /* Compute new intel_dp->train_set as requested by target */ 1861 intel_get_adjust_train(intel_dp, link_status); 1862 } 1863 1864 intel_dp->DP = DP; 1865} 1866 1867void 1868intel_dp_complete_link_train(struct intel_dp *intel_dp) 1869{ 1870 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1871 bool channel_eq = false; 1872 int tries, cr_tries; 1873 uint32_t DP = intel_dp->DP; 1874 1875 /* channel equalization */ 1876 tries = 0; 1877 cr_tries = 0; 1878 channel_eq = false; 1879 for (;;) { 1880 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 1881 uint32_t signal_levels; 1882 uint8_t link_status[DP_LINK_STATUS_SIZE]; 1883 1884 if (cr_tries > 5) { 1885 DRM_ERROR("failed to train DP, aborting\n"); 1886 intel_dp_link_down(intel_dp); 1887 break; 1888 } 1889 1890 if (IS_HASWELL(dev)) { 1891 signal_levels = intel_dp_signal_levels_hsw(intel_dp->train_set[0]); 1892 DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels; 1893 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { 1894 signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]); 1895 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels; 1896 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { 1897 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); 1898 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1899 } else { 1900 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]); 1901 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1902 } 1903 1904 /* channel eq pattern */ 1905 if (!intel_dp_set_link_train(intel_dp, DP, 1906 DP_TRAINING_PATTERN_2 | 1907 DP_LINK_SCRAMBLING_DISABLE)) 1908 break; 1909 1910 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd); 1911 if (!intel_dp_get_link_status(intel_dp, link_status)) 1912 break; 1913 1914 /* Make sure clock is still ok */ 1915 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { 1916 intel_dp_start_link_train(intel_dp); 1917 cr_tries++; 1918 continue; 1919 } 1920 1921 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { 1922 channel_eq = true; 1923 break; 1924 } 1925 1926 /* Try 5 times, then try clock recovery if that fails */ 1927 if (tries > 5) { 1928 intel_dp_link_down(intel_dp); 1929 intel_dp_start_link_train(intel_dp); 1930 tries = 0; 1931 cr_tries++; 1932 continue; 1933 } 1934 1935 /* Compute new intel_dp->train_set as requested by target */ 1936 intel_get_adjust_train(intel_dp, link_status); 1937 ++tries; 1938 } 1939 1940 if (channel_eq) 1941 DRM_DEBUG_KMS("Channel EQ done. DP Training successfull\n"); 1942 1943 intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE); 1944} 1945 1946static void 1947intel_dp_link_down(struct intel_dp *intel_dp) 1948{ 1949 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1950 struct drm_i915_private *dev_priv = dev->dev_private; 1951 uint32_t DP = intel_dp->DP; 1952 1953 /* 1954 * DDI code has a strict mode set sequence and we should try to respect 1955 * it, otherwise we might hang the machine in many different ways. So we 1956 * really should be disabling the port only on a complete crtc_disable 1957 * sequence. This function is just called under two conditions on DDI 1958 * code: 1959 * - Link train failed while doing crtc_enable, and on this case we 1960 * really should respect the mode set sequence and wait for a 1961 * crtc_disable. 1962 * - Someone turned the monitor off and intel_dp_check_link_status 1963 * called us. We don't need to disable the whole port on this case, so 1964 * when someone turns the monitor on again, 1965 * intel_ddi_prepare_link_retrain will take care of redoing the link 1966 * train. 1967 */ 1968 if (IS_HASWELL(dev)) 1969 return; 1970 1971 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)) 1972 return; 1973 1974 DRM_DEBUG_KMS("\n"); 1975 1976 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) { 1977 DP &= ~DP_LINK_TRAIN_MASK_CPT; 1978 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); 1979 } else { 1980 DP &= ~DP_LINK_TRAIN_MASK; 1981 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); 1982 } 1983 POSTING_READ(intel_dp->output_reg); 1984 1985 msleep(17); 1986 1987 if (HAS_PCH_IBX(dev) && 1988 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { 1989 struct drm_crtc *crtc = intel_dp->base.base.crtc; 1990 1991 /* Hardware workaround: leaving our transcoder select 1992 * set to transcoder B while it's off will prevent the 1993 * corresponding HDMI output on transcoder A. 1994 * 1995 * Combine this with another hardware workaround: 1996 * transcoder select bit can only be cleared while the 1997 * port is enabled. 1998 */ 1999 DP &= ~DP_PIPEB_SELECT; 2000 I915_WRITE(intel_dp->output_reg, DP); 2001 2002 /* Changes to enable or select take place the vblank 2003 * after being written. 2004 */ 2005 if (crtc == NULL) { 2006 /* We can arrive here never having been attached 2007 * to a CRTC, for instance, due to inheriting 2008 * random state from the BIOS. 2009 * 2010 * If the pipe is not running, play safe and 2011 * wait for the clocks to stabilise before 2012 * continuing. 2013 */ 2014 POSTING_READ(intel_dp->output_reg); 2015 msleep(50); 2016 } else 2017 intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe); 2018 } 2019 2020 DP &= ~DP_AUDIO_OUTPUT_ENABLE; 2021 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); 2022 POSTING_READ(intel_dp->output_reg); 2023 msleep(intel_dp->panel_power_down_delay); 2024} 2025 2026static bool 2027intel_dp_get_dpcd(struct intel_dp *intel_dp) 2028{ 2029 if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd, 2030 sizeof(intel_dp->dpcd)) == 0) 2031 return false; /* aux transfer failed */ 2032 2033 if (intel_dp->dpcd[DP_DPCD_REV] == 0) 2034 return false; /* DPCD not present */ 2035 2036 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 2037 DP_DWN_STRM_PORT_PRESENT)) 2038 return true; /* native DP sink */ 2039 2040 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10) 2041 return true; /* no per-port downstream info */ 2042 2043 if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0, 2044 intel_dp->downstream_ports, 2045 DP_MAX_DOWNSTREAM_PORTS) == 0) 2046 return false; /* downstream port status fetch failed */ 2047 2048 return true; 2049} 2050 2051static void 2052intel_dp_probe_oui(struct intel_dp *intel_dp) 2053{ 2054 u8 buf[3]; 2055 2056 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) 2057 return; 2058 2059 ironlake_edp_panel_vdd_on(intel_dp); 2060 2061 if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3)) 2062 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", 2063 buf[0], buf[1], buf[2]); 2064 2065 if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3)) 2066 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", 2067 buf[0], buf[1], buf[2]); 2068 2069 ironlake_edp_panel_vdd_off(intel_dp, false); 2070} 2071 2072static bool 2073intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector) 2074{ 2075 int ret; 2076 2077 ret = intel_dp_aux_native_read_retry(intel_dp, 2078 DP_DEVICE_SERVICE_IRQ_VECTOR, 2079 sink_irq_vector, 1); 2080 if (!ret) 2081 return false; 2082 2083 return true; 2084} 2085 2086static void 2087intel_dp_handle_test_request(struct intel_dp *intel_dp) 2088{ 2089 /* NAK by default */ 2090 intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK); 2091} 2092 2093/* 2094 * According to DP spec 2095 * 5.1.2: 2096 * 1. Read DPCD 2097 * 2. Configure link according to Receiver Capabilities 2098 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 2099 * 4. Check link status on receipt of hot-plug interrupt 2100 */ 2101 2102static void 2103intel_dp_check_link_status(struct intel_dp *intel_dp) 2104{ 2105 u8 sink_irq_vector; 2106 u8 link_status[DP_LINK_STATUS_SIZE]; 2107 2108 if (!intel_dp->base.connectors_active) 2109 return; 2110 2111 if (WARN_ON(!intel_dp->base.base.crtc)) 2112 return; 2113 2114 /* Try to read receiver status if the link appears to be up */ 2115 if (!intel_dp_get_link_status(intel_dp, link_status)) { 2116 intel_dp_link_down(intel_dp); 2117 return; 2118 } 2119 2120 /* Now read the DPCD to see if it's actually running */ 2121 if (!intel_dp_get_dpcd(intel_dp)) { 2122 intel_dp_link_down(intel_dp); 2123 return; 2124 } 2125 2126 /* Try to read the source of the interrupt */ 2127 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 2128 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) { 2129 /* Clear interrupt source */ 2130 intel_dp_aux_native_write_1(intel_dp, 2131 DP_DEVICE_SERVICE_IRQ_VECTOR, 2132 sink_irq_vector); 2133 2134 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST) 2135 intel_dp_handle_test_request(intel_dp); 2136 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ)) 2137 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); 2138 } 2139 2140 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { 2141 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", 2142 drm_get_encoder_name(&intel_dp->base.base)); 2143 intel_dp_start_link_train(intel_dp); 2144 intel_dp_complete_link_train(intel_dp); 2145 } 2146} 2147 2148/* XXX this is probably wrong for multiple downstream ports */ 2149static enum drm_connector_status 2150intel_dp_detect_dpcd(struct intel_dp *intel_dp) 2151{ 2152 uint8_t *dpcd = intel_dp->dpcd; 2153 bool hpd; 2154 uint8_t type; 2155 2156 if (!intel_dp_get_dpcd(intel_dp)) 2157 return connector_status_disconnected; 2158 2159 /* if there's no downstream port, we're done */ 2160 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT)) 2161 return connector_status_connected; 2162 2163 /* If we're HPD-aware, SINK_COUNT changes dynamically */ 2164 hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD); 2165 if (hpd) { 2166 uint8_t reg; 2167 if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT, 2168 ®, 1)) 2169 return connector_status_unknown; 2170 return DP_GET_SINK_COUNT(reg) ? connector_status_connected 2171 : connector_status_disconnected; 2172 } 2173 2174 /* If no HPD, poke DDC gently */ 2175 if (drm_probe_ddc(&intel_dp->adapter)) 2176 return connector_status_connected; 2177 2178 /* Well we tried, say unknown for unreliable port types */ 2179 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 2180 if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID) 2181 return connector_status_unknown; 2182 2183 /* Anything else is out of spec, warn and ignore */ 2184 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n"); 2185 return connector_status_disconnected; 2186} 2187 2188static enum drm_connector_status 2189ironlake_dp_detect(struct intel_dp *intel_dp) 2190{ 2191 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2192 enum drm_connector_status status; 2193 2194 /* Can't disconnect eDP, but you can close the lid... */ 2195 if (is_edp(intel_dp)) { 2196 status = intel_panel_detect(dev); 2197 if (status == connector_status_unknown) 2198 status = connector_status_connected; 2199 return status; 2200 } 2201 2202 return intel_dp_detect_dpcd(intel_dp); 2203} 2204 2205static enum drm_connector_status 2206g4x_dp_detect(struct intel_dp *intel_dp) 2207{ 2208 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2209 struct drm_i915_private *dev_priv = dev->dev_private; 2210 uint32_t bit; 2211 2212 switch (intel_dp->output_reg) { 2213 case DP_B: 2214 bit = DPB_HOTPLUG_LIVE_STATUS; 2215 break; 2216 case DP_C: 2217 bit = DPC_HOTPLUG_LIVE_STATUS; 2218 break; 2219 case DP_D: 2220 bit = DPD_HOTPLUG_LIVE_STATUS; 2221 break; 2222 default: 2223 return connector_status_unknown; 2224 } 2225 2226 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0) 2227 return connector_status_disconnected; 2228 2229 return intel_dp_detect_dpcd(intel_dp); 2230} 2231 2232static struct edid * 2233intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) 2234{ 2235 struct intel_connector *intel_connector = to_intel_connector(connector); 2236 2237 /* use cached edid if we have one */ 2238 if (intel_connector->edid) { 2239 struct edid *edid; 2240 int size; 2241 2242 /* invalid edid */ 2243 if (IS_ERR(intel_connector->edid)) 2244 return NULL; 2245 2246 size = (intel_connector->edid->extensions + 1) * EDID_LENGTH; 2247 edid = kmalloc(size, GFP_KERNEL); 2248 if (!edid) 2249 return NULL; 2250 2251 memcpy(edid, intel_connector->edid, size); 2252 return edid; 2253 } 2254 2255 return drm_get_edid(connector, adapter); 2256} 2257 2258static int 2259intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter) 2260{ 2261 struct intel_connector *intel_connector = to_intel_connector(connector); 2262 2263 /* use cached edid if we have one */ 2264 if (intel_connector->edid) { 2265 /* invalid edid */ 2266 if (IS_ERR(intel_connector->edid)) 2267 return 0; 2268 2269 return intel_connector_update_modes(connector, 2270 intel_connector->edid); 2271 } 2272 2273 return intel_ddc_get_modes(connector, adapter); 2274} 2275 2276 2277/** 2278 * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection. 2279 * 2280 * \return true if DP port is connected. 2281 * \return false if DP port is disconnected. 2282 */ 2283static enum drm_connector_status 2284intel_dp_detect(struct drm_connector *connector, bool force) 2285{ 2286 struct intel_dp *intel_dp = intel_attached_dp(connector); 2287 struct drm_device *dev = connector->dev; 2288 enum drm_connector_status status; 2289 struct edid *edid = NULL; 2290 char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3]; 2291 2292 intel_dp->has_audio = false; 2293 2294 if (HAS_PCH_SPLIT(dev)) 2295 status = ironlake_dp_detect(intel_dp); 2296 else 2297 status = g4x_dp_detect(intel_dp); 2298 2299 hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd), 2300 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false); 2301 DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump); 2302 2303 if (status != connector_status_connected) 2304 return status; 2305 2306 intel_dp_probe_oui(intel_dp); 2307 2308 if (intel_dp->force_audio != HDMI_AUDIO_AUTO) { 2309 intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON); 2310 } else { 2311 edid = intel_dp_get_edid(connector, &intel_dp->adapter); 2312 if (edid) { 2313 intel_dp->has_audio = drm_detect_monitor_audio(edid); 2314 kfree(edid); 2315 } 2316 } 2317 2318 return connector_status_connected; 2319} 2320 2321static int intel_dp_get_modes(struct drm_connector *connector) 2322{ 2323 struct intel_dp *intel_dp = intel_attached_dp(connector); 2324 struct intel_connector *intel_connector = to_intel_connector(connector); 2325 struct drm_device *dev = connector->dev; 2326 int ret; 2327 2328 /* We should parse the EDID data and find out if it has an audio sink 2329 */ 2330 2331 ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter); 2332 if (ret) 2333 return ret; 2334 2335 /* if eDP has no EDID, fall back to fixed mode */ 2336 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 2337 struct drm_display_mode *mode; 2338 mode = drm_mode_duplicate(dev, 2339 intel_connector->panel.fixed_mode); 2340 if (mode) { 2341 drm_mode_probed_add(connector, mode); 2342 return 1; 2343 } 2344 } 2345 return 0; 2346} 2347 2348static bool 2349intel_dp_detect_audio(struct drm_connector *connector) 2350{ 2351 struct intel_dp *intel_dp = intel_attached_dp(connector); 2352 struct edid *edid; 2353 bool has_audio = false; 2354 2355 edid = intel_dp_get_edid(connector, &intel_dp->adapter); 2356 if (edid) { 2357 has_audio = drm_detect_monitor_audio(edid); 2358 kfree(edid); 2359 } 2360 2361 return has_audio; 2362} 2363 2364static int 2365intel_dp_set_property(struct drm_connector *connector, 2366 struct drm_property *property, 2367 uint64_t val) 2368{ 2369 struct drm_i915_private *dev_priv = connector->dev->dev_private; 2370 struct intel_connector *intel_connector = to_intel_connector(connector); 2371 struct intel_dp *intel_dp = intel_attached_dp(connector); 2372 int ret; 2373 2374 ret = drm_connector_property_set_value(connector, property, val); 2375 if (ret) 2376 return ret; 2377 2378 if (property == dev_priv->force_audio_property) { 2379 int i = val; 2380 bool has_audio; 2381 2382 if (i == intel_dp->force_audio) 2383 return 0; 2384 2385 intel_dp->force_audio = i; 2386 2387 if (i == HDMI_AUDIO_AUTO) 2388 has_audio = intel_dp_detect_audio(connector); 2389 else 2390 has_audio = (i == HDMI_AUDIO_ON); 2391 2392 if (has_audio == intel_dp->has_audio) 2393 return 0; 2394 2395 intel_dp->has_audio = has_audio; 2396 goto done; 2397 } 2398 2399 if (property == dev_priv->broadcast_rgb_property) { 2400 if (val == !!intel_dp->color_range) 2401 return 0; 2402 2403 intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0; 2404 goto done; 2405 } 2406 2407 if (is_edp(intel_dp) && 2408 property == connector->dev->mode_config.scaling_mode_property) { 2409 if (val == DRM_MODE_SCALE_NONE) { 2410 DRM_DEBUG_KMS("no scaling not supported\n"); 2411 return -EINVAL; 2412 } 2413 2414 if (intel_connector->panel.fitting_mode == val) { 2415 /* the eDP scaling property is not changed */ 2416 return 0; 2417 } 2418 intel_connector->panel.fitting_mode = val; 2419 2420 goto done; 2421 } 2422 2423 return -EINVAL; 2424 2425done: 2426 if (intel_dp->base.base.crtc) { 2427 struct drm_crtc *crtc = intel_dp->base.base.crtc; 2428 intel_set_mode(crtc, &crtc->mode, 2429 crtc->x, crtc->y, crtc->fb); 2430 } 2431 2432 return 0; 2433} 2434 2435static void 2436intel_dp_destroy(struct drm_connector *connector) 2437{ 2438 struct drm_device *dev = connector->dev; 2439 struct intel_dp *intel_dp = intel_attached_dp(connector); 2440 struct intel_connector *intel_connector = to_intel_connector(connector); 2441 2442 if (!IS_ERR_OR_NULL(intel_connector->edid)) 2443 kfree(intel_connector->edid); 2444 2445 if (is_edp(intel_dp)) { 2446 intel_panel_destroy_backlight(dev); 2447 intel_panel_fini(&intel_connector->panel); 2448 } 2449 2450 drm_sysfs_connector_remove(connector); 2451 drm_connector_cleanup(connector); 2452 kfree(connector); 2453} 2454 2455static void intel_dp_encoder_destroy(struct drm_encoder *encoder) 2456{ 2457 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2458 2459 i2c_del_adapter(&intel_dp->adapter); 2460 drm_encoder_cleanup(encoder); 2461 if (is_edp(intel_dp)) { 2462 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 2463 ironlake_panel_vdd_off_sync(intel_dp); 2464 } 2465 kfree(intel_dp); 2466} 2467 2468static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { 2469 .mode_fixup = intel_dp_mode_fixup, 2470 .mode_set = intel_dp_mode_set, 2471 .disable = intel_encoder_noop, 2472}; 2473 2474static const struct drm_encoder_helper_funcs intel_dp_helper_funcs_hsw = { 2475 .mode_fixup = intel_dp_mode_fixup, 2476 .mode_set = intel_ddi_mode_set, 2477 .disable = intel_encoder_noop, 2478}; 2479 2480static const struct drm_connector_funcs intel_dp_connector_funcs = { 2481 .dpms = intel_connector_dpms, 2482 .detect = intel_dp_detect, 2483 .fill_modes = drm_helper_probe_single_connector_modes, 2484 .set_property = intel_dp_set_property, 2485 .destroy = intel_dp_destroy, 2486}; 2487 2488static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 2489 .get_modes = intel_dp_get_modes, 2490 .mode_valid = intel_dp_mode_valid, 2491 .best_encoder = intel_best_encoder, 2492}; 2493 2494static const struct drm_encoder_funcs intel_dp_enc_funcs = { 2495 .destroy = intel_dp_encoder_destroy, 2496}; 2497 2498static void 2499intel_dp_hot_plug(struct intel_encoder *intel_encoder) 2500{ 2501 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 2502 2503 intel_dp_check_link_status(intel_dp); 2504} 2505 2506/* Return which DP Port should be selected for Transcoder DP control */ 2507int 2508intel_trans_dp_port_sel(struct drm_crtc *crtc) 2509{ 2510 struct drm_device *dev = crtc->dev; 2511 struct intel_encoder *intel_encoder; 2512 struct intel_dp *intel_dp; 2513 2514 for_each_encoder_on_crtc(dev, crtc, intel_encoder) { 2515 intel_dp = enc_to_intel_dp(&intel_encoder->base); 2516 2517 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || 2518 intel_encoder->type == INTEL_OUTPUT_EDP) 2519 return intel_dp->output_reg; 2520 } 2521 2522 return -1; 2523} 2524 2525/* check the VBT to see whether the eDP is on DP-D port */ 2526bool intel_dpd_is_edp(struct drm_device *dev) 2527{ 2528 struct drm_i915_private *dev_priv = dev->dev_private; 2529 struct child_device_config *p_child; 2530 int i; 2531 2532 if (!dev_priv->child_dev_num) 2533 return false; 2534 2535 for (i = 0; i < dev_priv->child_dev_num; i++) { 2536 p_child = dev_priv->child_dev + i; 2537 2538 if (p_child->dvo_port == PORT_IDPD && 2539 p_child->device_type == DEVICE_TYPE_eDP) 2540 return true; 2541 } 2542 return false; 2543} 2544 2545static void 2546intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) 2547{ 2548 struct intel_connector *intel_connector = to_intel_connector(connector); 2549 2550 intel_attach_force_audio_property(connector); 2551 intel_attach_broadcast_rgb_property(connector); 2552 2553 if (is_edp(intel_dp)) { 2554 drm_mode_create_scaling_mode_property(connector->dev); 2555 drm_connector_attach_property( 2556 connector, 2557 connector->dev->mode_config.scaling_mode_property, 2558 DRM_MODE_SCALE_ASPECT); 2559 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT; 2560 } 2561} 2562 2563static void 2564intel_dp_init_panel_power_sequencer(struct drm_device *dev, 2565 struct intel_dp *intel_dp) 2566{ 2567 struct drm_i915_private *dev_priv = dev->dev_private; 2568 struct edp_power_seq cur, vbt, spec, final; 2569 u32 pp_on, pp_off, pp_div, pp; 2570 2571 /* Workaround: Need to write PP_CONTROL with the unlock key as 2572 * the very first thing. */ 2573 pp = ironlake_get_pp_control(dev_priv); 2574 I915_WRITE(PCH_PP_CONTROL, pp); 2575 2576 pp_on = I915_READ(PCH_PP_ON_DELAYS); 2577 pp_off = I915_READ(PCH_PP_OFF_DELAYS); 2578 pp_div = I915_READ(PCH_PP_DIVISOR); 2579 2580 /* Pull timing values out of registers */ 2581 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> 2582 PANEL_POWER_UP_DELAY_SHIFT; 2583 2584 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> 2585 PANEL_LIGHT_ON_DELAY_SHIFT; 2586 2587 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> 2588 PANEL_LIGHT_OFF_DELAY_SHIFT; 2589 2590 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> 2591 PANEL_POWER_DOWN_DELAY_SHIFT; 2592 2593 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> 2594 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000; 2595 2596 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 2597 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12); 2598 2599 vbt = dev_priv->edp.pps; 2600 2601 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of 2602 * our hw here, which are all in 100usec. */ 2603 spec.t1_t3 = 210 * 10; 2604 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */ 2605 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */ 2606 spec.t10 = 500 * 10; 2607 /* This one is special and actually in units of 100ms, but zero 2608 * based in the hw (so we need to add 100 ms). But the sw vbt 2609 * table multiplies it with 1000 to make it in units of 100usec, 2610 * too. */ 2611 spec.t11_t12 = (510 + 100) * 10; 2612 2613 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 2614 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12); 2615 2616 /* Use the max of the register settings and vbt. If both are 2617 * unset, fall back to the spec limits. */ 2618#define assign_final(field) final.field = (max(cur.field, vbt.field) == 0 ? \ 2619 spec.field : \ 2620 max(cur.field, vbt.field)) 2621 assign_final(t1_t3); 2622 assign_final(t8); 2623 assign_final(t9); 2624 assign_final(t10); 2625 assign_final(t11_t12); 2626#undef assign_final 2627 2628#define get_delay(field) (DIV_ROUND_UP(final.field, 10)) 2629 intel_dp->panel_power_up_delay = get_delay(t1_t3); 2630 intel_dp->backlight_on_delay = get_delay(t8); 2631 intel_dp->backlight_off_delay = get_delay(t9); 2632 intel_dp->panel_power_down_delay = get_delay(t10); 2633 intel_dp->panel_power_cycle_delay = get_delay(t11_t12); 2634#undef get_delay 2635 2636 /* And finally store the new values in the power sequencer. */ 2637 pp_on = (final.t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | 2638 (final.t8 << PANEL_LIGHT_ON_DELAY_SHIFT); 2639 pp_off = (final.t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) | 2640 (final.t10 << PANEL_POWER_DOWN_DELAY_SHIFT); 2641 /* Compute the divisor for the pp clock, simply match the Bspec 2642 * formula. */ 2643 pp_div = ((100 * intel_pch_rawclk(dev))/2 - 1) 2644 << PP_REFERENCE_DIVIDER_SHIFT; 2645 pp_div |= (DIV_ROUND_UP(final.t11_t12, 1000) 2646 << PANEL_POWER_CYCLE_DELAY_SHIFT); 2647 2648 /* Haswell doesn't have any port selection bits for the panel 2649 * power sequencer any more. */ 2650 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { 2651 if (is_cpu_edp(intel_dp)) 2652 pp_on |= PANEL_POWER_PORT_DP_A; 2653 else 2654 pp_on |= PANEL_POWER_PORT_DP_D; 2655 } 2656 2657 I915_WRITE(PCH_PP_ON_DELAYS, pp_on); 2658 I915_WRITE(PCH_PP_OFF_DELAYS, pp_off); 2659 I915_WRITE(PCH_PP_DIVISOR, pp_div); 2660 2661 2662 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n", 2663 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay, 2664 intel_dp->panel_power_cycle_delay); 2665 2666 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", 2667 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); 2668 2669 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", 2670 I915_READ(PCH_PP_ON_DELAYS), 2671 I915_READ(PCH_PP_OFF_DELAYS), 2672 I915_READ(PCH_PP_DIVISOR)); 2673} 2674 2675void 2676intel_dp_init(struct drm_device *dev, int output_reg, enum port port) 2677{ 2678 struct drm_i915_private *dev_priv = dev->dev_private; 2679 struct drm_connector *connector; 2680 struct intel_dp *intel_dp; 2681 struct intel_encoder *intel_encoder; 2682 struct intel_connector *intel_connector; 2683 struct drm_display_mode *fixed_mode = NULL; 2684 const char *name = NULL; 2685 int type; 2686 2687 intel_dp = kzalloc(sizeof(struct intel_dp), GFP_KERNEL); 2688 if (!intel_dp) 2689 return; 2690 2691 intel_dp->output_reg = output_reg; 2692 intel_dp->port = port; 2693 /* Preserve the current hw state. */ 2694 intel_dp->DP = I915_READ(intel_dp->output_reg); 2695 2696 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 2697 if (!intel_connector) { 2698 kfree(intel_dp); 2699 return; 2700 } 2701 intel_encoder = &intel_dp->base; 2702 intel_dp->attached_connector = intel_connector; 2703 2704 if (HAS_PCH_SPLIT(dev) && output_reg == PCH_DP_D) 2705 if (intel_dpd_is_edp(dev)) 2706 intel_dp->is_pch_edp = true; 2707 2708 /* 2709 * FIXME : We need to initialize built-in panels before external panels. 2710 * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup 2711 */ 2712 if (IS_VALLEYVIEW(dev) && output_reg == DP_C) { 2713 type = DRM_MODE_CONNECTOR_eDP; 2714 intel_encoder->type = INTEL_OUTPUT_EDP; 2715 } else if (output_reg == DP_A || is_pch_edp(intel_dp)) { 2716 type = DRM_MODE_CONNECTOR_eDP; 2717 intel_encoder->type = INTEL_OUTPUT_EDP; 2718 } else { 2719 type = DRM_MODE_CONNECTOR_DisplayPort; 2720 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; 2721 } 2722 2723 connector = &intel_connector->base; 2724 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); 2725 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 2726 2727 connector->polled = DRM_CONNECTOR_POLL_HPD; 2728 2729 intel_encoder->cloneable = false; 2730 2731 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, 2732 ironlake_panel_vdd_work); 2733 2734 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); 2735 2736 connector->interlace_allowed = true; 2737 connector->doublescan_allowed = 0; 2738 2739 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, 2740 DRM_MODE_ENCODER_TMDS); 2741 2742 if (IS_HASWELL(dev)) 2743 drm_encoder_helper_add(&intel_encoder->base, 2744 &intel_dp_helper_funcs_hsw); 2745 else 2746 drm_encoder_helper_add(&intel_encoder->base, 2747 &intel_dp_helper_funcs); 2748 2749 intel_connector_attach_encoder(intel_connector, intel_encoder); 2750 drm_sysfs_connector_add(connector); 2751 2752 if (IS_HASWELL(dev)) { 2753 intel_encoder->enable = intel_enable_ddi; 2754 intel_encoder->pre_enable = intel_ddi_pre_enable; 2755 intel_encoder->disable = intel_disable_ddi; 2756 intel_encoder->post_disable = intel_ddi_post_disable; 2757 intel_encoder->get_hw_state = intel_ddi_get_hw_state; 2758 } else { 2759 intel_encoder->enable = intel_enable_dp; 2760 intel_encoder->pre_enable = intel_pre_enable_dp; 2761 intel_encoder->disable = intel_disable_dp; 2762 intel_encoder->post_disable = intel_post_disable_dp; 2763 intel_encoder->get_hw_state = intel_dp_get_hw_state; 2764 } 2765 intel_connector->get_hw_state = intel_connector_get_hw_state; 2766 2767 /* Set up the DDC bus. */ 2768 switch (port) { 2769 case PORT_A: 2770 name = "DPDDC-A"; 2771 break; 2772 case PORT_B: 2773 dev_priv->hotplug_supported_mask |= DPB_HOTPLUG_INT_STATUS; 2774 name = "DPDDC-B"; 2775 break; 2776 case PORT_C: 2777 dev_priv->hotplug_supported_mask |= DPC_HOTPLUG_INT_STATUS; 2778 name = "DPDDC-C"; 2779 break; 2780 case PORT_D: 2781 dev_priv->hotplug_supported_mask |= DPD_HOTPLUG_INT_STATUS; 2782 name = "DPDDC-D"; 2783 break; 2784 default: 2785 WARN(1, "Invalid port %c\n", port_name(port)); 2786 break; 2787 } 2788 2789 if (is_edp(intel_dp)) 2790 intel_dp_init_panel_power_sequencer(dev, intel_dp); 2791 2792 intel_dp_i2c_init(intel_dp, intel_connector, name); 2793 2794 /* Cache DPCD and EDID for edp. */ 2795 if (is_edp(intel_dp)) { 2796 bool ret; 2797 struct drm_display_mode *scan; 2798 struct edid *edid; 2799 2800 ironlake_edp_panel_vdd_on(intel_dp); 2801 ret = intel_dp_get_dpcd(intel_dp); 2802 ironlake_edp_panel_vdd_off(intel_dp, false); 2803 2804 if (ret) { 2805 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) 2806 dev_priv->no_aux_handshake = 2807 intel_dp->dpcd[DP_MAX_DOWNSPREAD] & 2808 DP_NO_AUX_HANDSHAKE_LINK_TRAINING; 2809 } else { 2810 /* if this fails, presume the device is a ghost */ 2811 DRM_INFO("failed to retrieve link info, disabling eDP\n"); 2812 intel_dp_encoder_destroy(&intel_encoder->base); 2813 intel_dp_destroy(connector); 2814 return; 2815 } 2816 2817 ironlake_edp_panel_vdd_on(intel_dp); 2818 edid = drm_get_edid(connector, &intel_dp->adapter); 2819 if (edid) { 2820 if (drm_add_edid_modes(connector, edid)) { 2821 drm_mode_connector_update_edid_property(connector, edid); 2822 drm_edid_to_eld(connector, edid); 2823 } else { 2824 kfree(edid); 2825 edid = ERR_PTR(-EINVAL); 2826 } 2827 } else { 2828 edid = ERR_PTR(-ENOENT); 2829 } 2830 intel_connector->edid = edid; 2831 2832 /* prefer fixed mode from EDID if available */ 2833 list_for_each_entry(scan, &connector->probed_modes, head) { 2834 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) { 2835 fixed_mode = drm_mode_duplicate(dev, scan); 2836 break; 2837 } 2838 } 2839 2840 /* fallback to VBT if available for eDP */ 2841 if (!fixed_mode && dev_priv->lfp_lvds_vbt_mode) { 2842 fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); 2843 if (fixed_mode) 2844 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; 2845 } 2846 2847 ironlake_edp_panel_vdd_off(intel_dp, false); 2848 } 2849 2850 intel_encoder->hot_plug = intel_dp_hot_plug; 2851 2852 if (is_edp(intel_dp)) { 2853 intel_panel_init(&intel_connector->panel, fixed_mode); 2854 intel_panel_setup_backlight(connector); 2855 } 2856 2857 intel_dp_add_properties(intel_dp, connector); 2858 2859 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 2860 * 0xd. Failure to do so will result in spurious interrupts being 2861 * generated on the port when a cable is not attached. 2862 */ 2863 if (IS_G4X(dev) && !IS_GM45(dev)) { 2864 u32 temp = I915_READ(PEG_BAND_GAP_DATA); 2865 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); 2866 } 2867} 2868