intel_dp.c revision fa90ecefdc656b1e25af18251707907dbc1e7609
1/* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Keith Packard <keithp@keithp.com> 25 * 26 */ 27 28#include <linux/i2c.h> 29#include <linux/slab.h> 30#include <linux/export.h> 31#include <drm/drmP.h> 32#include <drm/drm_crtc.h> 33#include <drm/drm_crtc_helper.h> 34#include <drm/drm_edid.h> 35#include "intel_drv.h" 36#include <drm/i915_drm.h> 37#include "i915_drv.h" 38 39#define DP_LINK_CHECK_TIMEOUT (10 * 1000) 40 41/** 42 * is_edp - is the given port attached to an eDP panel (either CPU or PCH) 43 * @intel_dp: DP struct 44 * 45 * If a CPU or PCH DP output is attached to an eDP panel, this function 46 * will return true, and false otherwise. 47 */ 48static bool is_edp(struct intel_dp *intel_dp) 49{ 50 return intel_dp->base.type == INTEL_OUTPUT_EDP; 51} 52 53/** 54 * is_pch_edp - is the port on the PCH and attached to an eDP panel? 55 * @intel_dp: DP struct 56 * 57 * Returns true if the given DP struct corresponds to a PCH DP port attached 58 * to an eDP panel, false otherwise. Helpful for determining whether we 59 * may need FDI resources for a given DP output or not. 60 */ 61static bool is_pch_edp(struct intel_dp *intel_dp) 62{ 63 return intel_dp->is_pch_edp; 64} 65 66/** 67 * is_cpu_edp - is the port on the CPU and attached to an eDP panel? 68 * @intel_dp: DP struct 69 * 70 * Returns true if the given DP struct corresponds to a CPU eDP port. 71 */ 72static bool is_cpu_edp(struct intel_dp *intel_dp) 73{ 74 return is_edp(intel_dp) && !is_pch_edp(intel_dp); 75} 76 77static struct intel_dp *intel_attached_dp(struct drm_connector *connector) 78{ 79 return enc_to_intel_dp(&intel_attached_encoder(connector)->base); 80} 81 82/** 83 * intel_encoder_is_pch_edp - is the given encoder a PCH attached eDP? 84 * @encoder: DRM encoder 85 * 86 * Return true if @encoder corresponds to a PCH attached eDP panel. Needed 87 * by intel_display.c. 88 */ 89bool intel_encoder_is_pch_edp(struct drm_encoder *encoder) 90{ 91 struct intel_dp *intel_dp; 92 93 if (!encoder) 94 return false; 95 96 intel_dp = enc_to_intel_dp(encoder); 97 98 return is_pch_edp(intel_dp); 99} 100 101static void intel_dp_link_down(struct intel_dp *intel_dp); 102 103void 104intel_edp_link_config(struct intel_encoder *intel_encoder, 105 int *lane_num, int *link_bw) 106{ 107 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 108 109 *lane_num = intel_dp->lane_count; 110 *link_bw = drm_dp_bw_code_to_link_rate(intel_dp->link_bw); 111} 112 113int 114intel_edp_target_clock(struct intel_encoder *intel_encoder, 115 struct drm_display_mode *mode) 116{ 117 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 118 struct intel_connector *intel_connector = intel_dp->attached_connector; 119 120 if (intel_connector->panel.fixed_mode) 121 return intel_connector->panel.fixed_mode->clock; 122 else 123 return mode->clock; 124} 125 126static int 127intel_dp_max_link_bw(struct intel_dp *intel_dp) 128{ 129 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; 130 131 switch (max_link_bw) { 132 case DP_LINK_BW_1_62: 133 case DP_LINK_BW_2_7: 134 break; 135 default: 136 max_link_bw = DP_LINK_BW_1_62; 137 break; 138 } 139 return max_link_bw; 140} 141 142static int 143intel_dp_link_clock(uint8_t link_bw) 144{ 145 if (link_bw == DP_LINK_BW_2_7) 146 return 270000; 147 else 148 return 162000; 149} 150 151/* 152 * The units on the numbers in the next two are... bizarre. Examples will 153 * make it clearer; this one parallels an example in the eDP spec. 154 * 155 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as: 156 * 157 * 270000 * 1 * 8 / 10 == 216000 158 * 159 * The actual data capacity of that configuration is 2.16Gbit/s, so the 160 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz - 161 * or equivalently, kilopixels per second - so for 1680x1050R it'd be 162 * 119000. At 18bpp that's 2142000 kilobits per second. 163 * 164 * Thus the strange-looking division by 10 in intel_dp_link_required, to 165 * get the result in decakilobits instead of kilobits. 166 */ 167 168static int 169intel_dp_link_required(int pixel_clock, int bpp) 170{ 171 return (pixel_clock * bpp + 9) / 10; 172} 173 174static int 175intel_dp_max_data_rate(int max_link_clock, int max_lanes) 176{ 177 return (max_link_clock * max_lanes * 8) / 10; 178} 179 180static bool 181intel_dp_adjust_dithering(struct intel_dp *intel_dp, 182 struct drm_display_mode *mode, 183 bool adjust_mode) 184{ 185 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); 186 int max_lanes = drm_dp_max_lane_count(intel_dp->dpcd); 187 int max_rate, mode_rate; 188 189 mode_rate = intel_dp_link_required(mode->clock, 24); 190 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); 191 192 if (mode_rate > max_rate) { 193 mode_rate = intel_dp_link_required(mode->clock, 18); 194 if (mode_rate > max_rate) 195 return false; 196 197 if (adjust_mode) 198 mode->private_flags 199 |= INTEL_MODE_DP_FORCE_6BPC; 200 201 return true; 202 } 203 204 return true; 205} 206 207static int 208intel_dp_mode_valid(struct drm_connector *connector, 209 struct drm_display_mode *mode) 210{ 211 struct intel_dp *intel_dp = intel_attached_dp(connector); 212 struct intel_connector *intel_connector = to_intel_connector(connector); 213 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; 214 215 if (is_edp(intel_dp) && fixed_mode) { 216 if (mode->hdisplay > fixed_mode->hdisplay) 217 return MODE_PANEL; 218 219 if (mode->vdisplay > fixed_mode->vdisplay) 220 return MODE_PANEL; 221 } 222 223 if (!intel_dp_adjust_dithering(intel_dp, mode, false)) 224 return MODE_CLOCK_HIGH; 225 226 if (mode->clock < 10000) 227 return MODE_CLOCK_LOW; 228 229 if (mode->flags & DRM_MODE_FLAG_DBLCLK) 230 return MODE_H_ILLEGAL; 231 232 return MODE_OK; 233} 234 235static uint32_t 236pack_aux(uint8_t *src, int src_bytes) 237{ 238 int i; 239 uint32_t v = 0; 240 241 if (src_bytes > 4) 242 src_bytes = 4; 243 for (i = 0; i < src_bytes; i++) 244 v |= ((uint32_t) src[i]) << ((3-i) * 8); 245 return v; 246} 247 248static void 249unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) 250{ 251 int i; 252 if (dst_bytes > 4) 253 dst_bytes = 4; 254 for (i = 0; i < dst_bytes; i++) 255 dst[i] = src >> ((3-i) * 8); 256} 257 258/* hrawclock is 1/4 the FSB frequency */ 259static int 260intel_hrawclk(struct drm_device *dev) 261{ 262 struct drm_i915_private *dev_priv = dev->dev_private; 263 uint32_t clkcfg; 264 265 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */ 266 if (IS_VALLEYVIEW(dev)) 267 return 200; 268 269 clkcfg = I915_READ(CLKCFG); 270 switch (clkcfg & CLKCFG_FSB_MASK) { 271 case CLKCFG_FSB_400: 272 return 100; 273 case CLKCFG_FSB_533: 274 return 133; 275 case CLKCFG_FSB_667: 276 return 166; 277 case CLKCFG_FSB_800: 278 return 200; 279 case CLKCFG_FSB_1067: 280 return 266; 281 case CLKCFG_FSB_1333: 282 return 333; 283 /* these two are just a guess; one of them might be right */ 284 case CLKCFG_FSB_1600: 285 case CLKCFG_FSB_1600_ALT: 286 return 400; 287 default: 288 return 133; 289 } 290} 291 292static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp) 293{ 294 struct drm_device *dev = intel_dp->base.base.dev; 295 struct drm_i915_private *dev_priv = dev->dev_private; 296 297 return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0; 298} 299 300static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp) 301{ 302 struct drm_device *dev = intel_dp->base.base.dev; 303 struct drm_i915_private *dev_priv = dev->dev_private; 304 305 return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0; 306} 307 308static void 309intel_dp_check_edp(struct intel_dp *intel_dp) 310{ 311 struct drm_device *dev = intel_dp->base.base.dev; 312 struct drm_i915_private *dev_priv = dev->dev_private; 313 314 if (!is_edp(intel_dp)) 315 return; 316 if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) { 317 WARN(1, "eDP powered off while attempting aux channel communication.\n"); 318 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n", 319 I915_READ(PCH_PP_STATUS), 320 I915_READ(PCH_PP_CONTROL)); 321 } 322} 323 324static int 325intel_dp_aux_ch(struct intel_dp *intel_dp, 326 uint8_t *send, int send_bytes, 327 uint8_t *recv, int recv_size) 328{ 329 uint32_t output_reg = intel_dp->output_reg; 330 struct drm_device *dev = intel_dp->base.base.dev; 331 struct drm_i915_private *dev_priv = dev->dev_private; 332 uint32_t ch_ctl = output_reg + 0x10; 333 uint32_t ch_data = ch_ctl + 4; 334 int i; 335 int recv_bytes; 336 uint32_t status; 337 uint32_t aux_clock_divider; 338 int try, precharge; 339 340 if (IS_HASWELL(dev)) { 341 switch (intel_dp->port) { 342 case PORT_A: 343 ch_ctl = DPA_AUX_CH_CTL; 344 ch_data = DPA_AUX_CH_DATA1; 345 break; 346 case PORT_B: 347 ch_ctl = PCH_DPB_AUX_CH_CTL; 348 ch_data = PCH_DPB_AUX_CH_DATA1; 349 break; 350 case PORT_C: 351 ch_ctl = PCH_DPC_AUX_CH_CTL; 352 ch_data = PCH_DPC_AUX_CH_DATA1; 353 break; 354 case PORT_D: 355 ch_ctl = PCH_DPD_AUX_CH_CTL; 356 ch_data = PCH_DPD_AUX_CH_DATA1; 357 break; 358 default: 359 BUG(); 360 } 361 } 362 363 intel_dp_check_edp(intel_dp); 364 /* The clock divider is based off the hrawclk, 365 * and would like to run at 2MHz. So, take the 366 * hrawclk value and divide by 2 and use that 367 * 368 * Note that PCH attached eDP panels should use a 125MHz input 369 * clock divider. 370 */ 371 if (is_cpu_edp(intel_dp)) { 372 if (IS_HASWELL(dev)) 373 aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1; 374 else if (IS_VALLEYVIEW(dev)) 375 aux_clock_divider = 100; 376 else if (IS_GEN6(dev) || IS_GEN7(dev)) 377 aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */ 378 else 379 aux_clock_divider = 225; /* eDP input clock at 450Mhz */ 380 } else if (HAS_PCH_SPLIT(dev)) 381 aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2); 382 else 383 aux_clock_divider = intel_hrawclk(dev) / 2; 384 385 if (IS_GEN6(dev)) 386 precharge = 3; 387 else 388 precharge = 5; 389 390 /* Try to wait for any previous AUX channel activity */ 391 for (try = 0; try < 3; try++) { 392 status = I915_READ(ch_ctl); 393 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 394 break; 395 msleep(1); 396 } 397 398 if (try == 3) { 399 WARN(1, "dp_aux_ch not started status 0x%08x\n", 400 I915_READ(ch_ctl)); 401 return -EBUSY; 402 } 403 404 /* Must try at least 3 times according to DP spec */ 405 for (try = 0; try < 5; try++) { 406 /* Load the send data into the aux channel data registers */ 407 for (i = 0; i < send_bytes; i += 4) 408 I915_WRITE(ch_data + i, 409 pack_aux(send + i, send_bytes - i)); 410 411 /* Send the command and wait for it to complete */ 412 I915_WRITE(ch_ctl, 413 DP_AUX_CH_CTL_SEND_BUSY | 414 DP_AUX_CH_CTL_TIME_OUT_400us | 415 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 416 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 417 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | 418 DP_AUX_CH_CTL_DONE | 419 DP_AUX_CH_CTL_TIME_OUT_ERROR | 420 DP_AUX_CH_CTL_RECEIVE_ERROR); 421 for (;;) { 422 status = I915_READ(ch_ctl); 423 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 424 break; 425 udelay(100); 426 } 427 428 /* Clear done status and any errors */ 429 I915_WRITE(ch_ctl, 430 status | 431 DP_AUX_CH_CTL_DONE | 432 DP_AUX_CH_CTL_TIME_OUT_ERROR | 433 DP_AUX_CH_CTL_RECEIVE_ERROR); 434 435 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR | 436 DP_AUX_CH_CTL_RECEIVE_ERROR)) 437 continue; 438 if (status & DP_AUX_CH_CTL_DONE) 439 break; 440 } 441 442 if ((status & DP_AUX_CH_CTL_DONE) == 0) { 443 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status); 444 return -EBUSY; 445 } 446 447 /* Check for timeout or receive error. 448 * Timeouts occur when the sink is not connected 449 */ 450 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 451 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status); 452 return -EIO; 453 } 454 455 /* Timeouts occur when the device isn't connected, so they're 456 * "normal" -- don't fill the kernel log with these */ 457 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { 458 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status); 459 return -ETIMEDOUT; 460 } 461 462 /* Unload any bytes sent back from the other side */ 463 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> 464 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); 465 if (recv_bytes > recv_size) 466 recv_bytes = recv_size; 467 468 for (i = 0; i < recv_bytes; i += 4) 469 unpack_aux(I915_READ(ch_data + i), 470 recv + i, recv_bytes - i); 471 472 return recv_bytes; 473} 474 475/* Write data to the aux channel in native mode */ 476static int 477intel_dp_aux_native_write(struct intel_dp *intel_dp, 478 uint16_t address, uint8_t *send, int send_bytes) 479{ 480 int ret; 481 uint8_t msg[20]; 482 int msg_bytes; 483 uint8_t ack; 484 485 intel_dp_check_edp(intel_dp); 486 if (send_bytes > 16) 487 return -1; 488 msg[0] = AUX_NATIVE_WRITE << 4; 489 msg[1] = address >> 8; 490 msg[2] = address & 0xff; 491 msg[3] = send_bytes - 1; 492 memcpy(&msg[4], send, send_bytes); 493 msg_bytes = send_bytes + 4; 494 for (;;) { 495 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1); 496 if (ret < 0) 497 return ret; 498 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) 499 break; 500 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) 501 udelay(100); 502 else 503 return -EIO; 504 } 505 return send_bytes; 506} 507 508/* Write a single byte to the aux channel in native mode */ 509static int 510intel_dp_aux_native_write_1(struct intel_dp *intel_dp, 511 uint16_t address, uint8_t byte) 512{ 513 return intel_dp_aux_native_write(intel_dp, address, &byte, 1); 514} 515 516/* read bytes from a native aux channel */ 517static int 518intel_dp_aux_native_read(struct intel_dp *intel_dp, 519 uint16_t address, uint8_t *recv, int recv_bytes) 520{ 521 uint8_t msg[4]; 522 int msg_bytes; 523 uint8_t reply[20]; 524 int reply_bytes; 525 uint8_t ack; 526 int ret; 527 528 intel_dp_check_edp(intel_dp); 529 msg[0] = AUX_NATIVE_READ << 4; 530 msg[1] = address >> 8; 531 msg[2] = address & 0xff; 532 msg[3] = recv_bytes - 1; 533 534 msg_bytes = 4; 535 reply_bytes = recv_bytes + 1; 536 537 for (;;) { 538 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, 539 reply, reply_bytes); 540 if (ret == 0) 541 return -EPROTO; 542 if (ret < 0) 543 return ret; 544 ack = reply[0]; 545 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) { 546 memcpy(recv, reply + 1, ret - 1); 547 return ret - 1; 548 } 549 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) 550 udelay(100); 551 else 552 return -EIO; 553 } 554} 555 556static int 557intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, 558 uint8_t write_byte, uint8_t *read_byte) 559{ 560 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; 561 struct intel_dp *intel_dp = container_of(adapter, 562 struct intel_dp, 563 adapter); 564 uint16_t address = algo_data->address; 565 uint8_t msg[5]; 566 uint8_t reply[2]; 567 unsigned retry; 568 int msg_bytes; 569 int reply_bytes; 570 int ret; 571 572 intel_dp_check_edp(intel_dp); 573 /* Set up the command byte */ 574 if (mode & MODE_I2C_READ) 575 msg[0] = AUX_I2C_READ << 4; 576 else 577 msg[0] = AUX_I2C_WRITE << 4; 578 579 if (!(mode & MODE_I2C_STOP)) 580 msg[0] |= AUX_I2C_MOT << 4; 581 582 msg[1] = address >> 8; 583 msg[2] = address; 584 585 switch (mode) { 586 case MODE_I2C_WRITE: 587 msg[3] = 0; 588 msg[4] = write_byte; 589 msg_bytes = 5; 590 reply_bytes = 1; 591 break; 592 case MODE_I2C_READ: 593 msg[3] = 0; 594 msg_bytes = 4; 595 reply_bytes = 2; 596 break; 597 default: 598 msg_bytes = 3; 599 reply_bytes = 1; 600 break; 601 } 602 603 for (retry = 0; retry < 5; retry++) { 604 ret = intel_dp_aux_ch(intel_dp, 605 msg, msg_bytes, 606 reply, reply_bytes); 607 if (ret < 0) { 608 DRM_DEBUG_KMS("aux_ch failed %d\n", ret); 609 return ret; 610 } 611 612 switch (reply[0] & AUX_NATIVE_REPLY_MASK) { 613 case AUX_NATIVE_REPLY_ACK: 614 /* I2C-over-AUX Reply field is only valid 615 * when paired with AUX ACK. 616 */ 617 break; 618 case AUX_NATIVE_REPLY_NACK: 619 DRM_DEBUG_KMS("aux_ch native nack\n"); 620 return -EREMOTEIO; 621 case AUX_NATIVE_REPLY_DEFER: 622 udelay(100); 623 continue; 624 default: 625 DRM_ERROR("aux_ch invalid native reply 0x%02x\n", 626 reply[0]); 627 return -EREMOTEIO; 628 } 629 630 switch (reply[0] & AUX_I2C_REPLY_MASK) { 631 case AUX_I2C_REPLY_ACK: 632 if (mode == MODE_I2C_READ) { 633 *read_byte = reply[1]; 634 } 635 return reply_bytes - 1; 636 case AUX_I2C_REPLY_NACK: 637 DRM_DEBUG_KMS("aux_i2c nack\n"); 638 return -EREMOTEIO; 639 case AUX_I2C_REPLY_DEFER: 640 DRM_DEBUG_KMS("aux_i2c defer\n"); 641 udelay(100); 642 break; 643 default: 644 DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]); 645 return -EREMOTEIO; 646 } 647 } 648 649 DRM_ERROR("too many retries, giving up\n"); 650 return -EREMOTEIO; 651} 652 653static int 654intel_dp_i2c_init(struct intel_dp *intel_dp, 655 struct intel_connector *intel_connector, const char *name) 656{ 657 int ret; 658 659 DRM_DEBUG_KMS("i2c_init %s\n", name); 660 intel_dp->algo.running = false; 661 intel_dp->algo.address = 0; 662 intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch; 663 664 memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter)); 665 intel_dp->adapter.owner = THIS_MODULE; 666 intel_dp->adapter.class = I2C_CLASS_DDC; 667 strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1); 668 intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0'; 669 intel_dp->adapter.algo_data = &intel_dp->algo; 670 intel_dp->adapter.dev.parent = &intel_connector->base.kdev; 671 672 ironlake_edp_panel_vdd_on(intel_dp); 673 ret = i2c_dp_aux_add_bus(&intel_dp->adapter); 674 ironlake_edp_panel_vdd_off(intel_dp, false); 675 return ret; 676} 677 678static bool 679intel_dp_mode_fixup(struct drm_encoder *encoder, 680 const struct drm_display_mode *mode, 681 struct drm_display_mode *adjusted_mode) 682{ 683 struct drm_device *dev = encoder->dev; 684 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 685 struct intel_connector *intel_connector = intel_dp->attached_connector; 686 int lane_count, clock; 687 int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd); 688 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; 689 int bpp, mode_rate; 690 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; 691 692 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 693 intel_fixed_panel_mode(intel_connector->panel.fixed_mode, 694 adjusted_mode); 695 intel_pch_panel_fitting(dev, 696 intel_connector->panel.fitting_mode, 697 mode, adjusted_mode); 698 } 699 700 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 701 return false; 702 703 DRM_DEBUG_KMS("DP link computation with max lane count %i " 704 "max bw %02x pixel clock %iKHz\n", 705 max_lane_count, bws[max_clock], adjusted_mode->clock); 706 707 if (!intel_dp_adjust_dithering(intel_dp, adjusted_mode, true)) 708 return false; 709 710 bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24; 711 mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp); 712 713 for (clock = 0; clock <= max_clock; clock++) { 714 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { 715 int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); 716 717 if (mode_rate <= link_avail) { 718 intel_dp->link_bw = bws[clock]; 719 intel_dp->lane_count = lane_count; 720 adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw); 721 DRM_DEBUG_KMS("DP link bw %02x lane " 722 "count %d clock %d bpp %d\n", 723 intel_dp->link_bw, intel_dp->lane_count, 724 adjusted_mode->clock, bpp); 725 DRM_DEBUG_KMS("DP link bw required %i available %i\n", 726 mode_rate, link_avail); 727 return true; 728 } 729 } 730 } 731 732 return false; 733} 734 735struct intel_dp_m_n { 736 uint32_t tu; 737 uint32_t gmch_m; 738 uint32_t gmch_n; 739 uint32_t link_m; 740 uint32_t link_n; 741}; 742 743static void 744intel_reduce_ratio(uint32_t *num, uint32_t *den) 745{ 746 while (*num > 0xffffff || *den > 0xffffff) { 747 *num >>= 1; 748 *den >>= 1; 749 } 750} 751 752static void 753intel_dp_compute_m_n(int bpp, 754 int nlanes, 755 int pixel_clock, 756 int link_clock, 757 struct intel_dp_m_n *m_n) 758{ 759 m_n->tu = 64; 760 m_n->gmch_m = (pixel_clock * bpp) >> 3; 761 m_n->gmch_n = link_clock * nlanes; 762 intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); 763 m_n->link_m = pixel_clock; 764 m_n->link_n = link_clock; 765 intel_reduce_ratio(&m_n->link_m, &m_n->link_n); 766} 767 768void 769intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, 770 struct drm_display_mode *adjusted_mode) 771{ 772 struct drm_device *dev = crtc->dev; 773 struct intel_encoder *intel_encoder; 774 struct intel_dp *intel_dp; 775 struct drm_i915_private *dev_priv = dev->dev_private; 776 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 777 int lane_count = 4; 778 struct intel_dp_m_n m_n; 779 int pipe = intel_crtc->pipe; 780 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; 781 782 /* 783 * Find the lane count in the intel_encoder private 784 */ 785 for_each_encoder_on_crtc(dev, crtc, intel_encoder) { 786 intel_dp = enc_to_intel_dp(&intel_encoder->base); 787 788 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || 789 intel_encoder->type == INTEL_OUTPUT_EDP) 790 { 791 lane_count = intel_dp->lane_count; 792 break; 793 } 794 } 795 796 /* 797 * Compute the GMCH and Link ratios. The '3' here is 798 * the number of bytes_per_pixel post-LUT, which we always 799 * set up for 8-bits of R/G/B, or 3 bytes total. 800 */ 801 intel_dp_compute_m_n(intel_crtc->bpp, lane_count, 802 mode->clock, adjusted_mode->clock, &m_n); 803 804 if (IS_HASWELL(dev)) { 805 I915_WRITE(PIPE_DATA_M1(cpu_transcoder), 806 TU_SIZE(m_n.tu) | m_n.gmch_m); 807 I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n); 808 I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m); 809 I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n); 810 } else if (HAS_PCH_SPLIT(dev)) { 811 I915_WRITE(TRANSDATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); 812 I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n); 813 I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m); 814 I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n); 815 } else if (IS_VALLEYVIEW(dev)) { 816 I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); 817 I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n); 818 I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m); 819 I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n); 820 } else { 821 I915_WRITE(PIPE_GMCH_DATA_M(pipe), 822 TU_SIZE(m_n.tu) | m_n.gmch_m); 823 I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n); 824 I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m); 825 I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n); 826 } 827} 828 829void intel_dp_init_link_config(struct intel_dp *intel_dp) 830{ 831 memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); 832 intel_dp->link_configuration[0] = intel_dp->link_bw; 833 intel_dp->link_configuration[1] = intel_dp->lane_count; 834 intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B; 835 /* 836 * Check for DPCD version > 1.1 and enhanced framing support 837 */ 838 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 839 (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) { 840 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; 841 } 842} 843 844static void 845intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, 846 struct drm_display_mode *adjusted_mode) 847{ 848 struct drm_device *dev = encoder->dev; 849 struct drm_i915_private *dev_priv = dev->dev_private; 850 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 851 struct drm_crtc *crtc = encoder->crtc; 852 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 853 854 /* 855 * There are four kinds of DP registers: 856 * 857 * IBX PCH 858 * SNB CPU 859 * IVB CPU 860 * CPT PCH 861 * 862 * IBX PCH and CPU are the same for almost everything, 863 * except that the CPU DP PLL is configured in this 864 * register 865 * 866 * CPT PCH is quite different, having many bits moved 867 * to the TRANS_DP_CTL register instead. That 868 * configuration happens (oddly) in ironlake_pch_enable 869 */ 870 871 /* Preserve the BIOS-computed detected bit. This is 872 * supposed to be read-only. 873 */ 874 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED; 875 876 /* Handle DP bits in common between all three register formats */ 877 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 878 879 switch (intel_dp->lane_count) { 880 case 1: 881 intel_dp->DP |= DP_PORT_WIDTH_1; 882 break; 883 case 2: 884 intel_dp->DP |= DP_PORT_WIDTH_2; 885 break; 886 case 4: 887 intel_dp->DP |= DP_PORT_WIDTH_4; 888 break; 889 } 890 if (intel_dp->has_audio) { 891 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n", 892 pipe_name(intel_crtc->pipe)); 893 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 894 intel_write_eld(encoder, adjusted_mode); 895 } 896 897 intel_dp_init_link_config(intel_dp); 898 899 /* Split out the IBX/CPU vs CPT settings */ 900 901 if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) { 902 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 903 intel_dp->DP |= DP_SYNC_HS_HIGH; 904 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 905 intel_dp->DP |= DP_SYNC_VS_HIGH; 906 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 907 908 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) 909 intel_dp->DP |= DP_ENHANCED_FRAMING; 910 911 intel_dp->DP |= intel_crtc->pipe << 29; 912 913 /* don't miss out required setting for eDP */ 914 if (adjusted_mode->clock < 200000) 915 intel_dp->DP |= DP_PLL_FREQ_160MHZ; 916 else 917 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 918 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { 919 intel_dp->DP |= intel_dp->color_range; 920 921 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 922 intel_dp->DP |= DP_SYNC_HS_HIGH; 923 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 924 intel_dp->DP |= DP_SYNC_VS_HIGH; 925 intel_dp->DP |= DP_LINK_TRAIN_OFF; 926 927 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) 928 intel_dp->DP |= DP_ENHANCED_FRAMING; 929 930 if (intel_crtc->pipe == 1) 931 intel_dp->DP |= DP_PIPEB_SELECT; 932 933 if (is_cpu_edp(intel_dp)) { 934 /* don't miss out required setting for eDP */ 935 if (adjusted_mode->clock < 200000) 936 intel_dp->DP |= DP_PLL_FREQ_160MHZ; 937 else 938 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 939 } 940 } else { 941 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 942 } 943} 944 945#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 946#define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) 947 948#define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 949#define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 950 951#define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) 952#define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 953 954static void ironlake_wait_panel_status(struct intel_dp *intel_dp, 955 u32 mask, 956 u32 value) 957{ 958 struct drm_device *dev = intel_dp->base.base.dev; 959 struct drm_i915_private *dev_priv = dev->dev_private; 960 961 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n", 962 mask, value, 963 I915_READ(PCH_PP_STATUS), 964 I915_READ(PCH_PP_CONTROL)); 965 966 if (_wait_for((I915_READ(PCH_PP_STATUS) & mask) == value, 5000, 10)) { 967 DRM_ERROR("Panel status timeout: status %08x control %08x\n", 968 I915_READ(PCH_PP_STATUS), 969 I915_READ(PCH_PP_CONTROL)); 970 } 971} 972 973static void ironlake_wait_panel_on(struct intel_dp *intel_dp) 974{ 975 DRM_DEBUG_KMS("Wait for panel power on\n"); 976 ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); 977} 978 979static void ironlake_wait_panel_off(struct intel_dp *intel_dp) 980{ 981 DRM_DEBUG_KMS("Wait for panel power off time\n"); 982 ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); 983} 984 985static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp) 986{ 987 DRM_DEBUG_KMS("Wait for panel power cycle\n"); 988 ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); 989} 990 991 992/* Read the current pp_control value, unlocking the register if it 993 * is locked 994 */ 995 996static u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv) 997{ 998 u32 control = I915_READ(PCH_PP_CONTROL); 999 1000 control &= ~PANEL_UNLOCK_MASK; 1001 control |= PANEL_UNLOCK_REGS; 1002 return control; 1003} 1004 1005void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) 1006{ 1007 struct drm_device *dev = intel_dp->base.base.dev; 1008 struct drm_i915_private *dev_priv = dev->dev_private; 1009 u32 pp; 1010 1011 if (!is_edp(intel_dp)) 1012 return; 1013 DRM_DEBUG_KMS("Turn eDP VDD on\n"); 1014 1015 WARN(intel_dp->want_panel_vdd, 1016 "eDP VDD already requested on\n"); 1017 1018 intel_dp->want_panel_vdd = true; 1019 1020 if (ironlake_edp_have_panel_vdd(intel_dp)) { 1021 DRM_DEBUG_KMS("eDP VDD already on\n"); 1022 return; 1023 } 1024 1025 if (!ironlake_edp_have_panel_power(intel_dp)) 1026 ironlake_wait_panel_power_cycle(intel_dp); 1027 1028 pp = ironlake_get_pp_control(dev_priv); 1029 pp |= EDP_FORCE_VDD; 1030 I915_WRITE(PCH_PP_CONTROL, pp); 1031 POSTING_READ(PCH_PP_CONTROL); 1032 DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n", 1033 I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL)); 1034 1035 /* 1036 * If the panel wasn't on, delay before accessing aux channel 1037 */ 1038 if (!ironlake_edp_have_panel_power(intel_dp)) { 1039 DRM_DEBUG_KMS("eDP was not running\n"); 1040 msleep(intel_dp->panel_power_up_delay); 1041 } 1042} 1043 1044static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp) 1045{ 1046 struct drm_device *dev = intel_dp->base.base.dev; 1047 struct drm_i915_private *dev_priv = dev->dev_private; 1048 u32 pp; 1049 1050 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) { 1051 pp = ironlake_get_pp_control(dev_priv); 1052 pp &= ~EDP_FORCE_VDD; 1053 I915_WRITE(PCH_PP_CONTROL, pp); 1054 POSTING_READ(PCH_PP_CONTROL); 1055 1056 /* Make sure sequencer is idle before allowing subsequent activity */ 1057 DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n", 1058 I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL)); 1059 1060 msleep(intel_dp->panel_power_down_delay); 1061 } 1062} 1063 1064static void ironlake_panel_vdd_work(struct work_struct *__work) 1065{ 1066 struct intel_dp *intel_dp = container_of(to_delayed_work(__work), 1067 struct intel_dp, panel_vdd_work); 1068 struct drm_device *dev = intel_dp->base.base.dev; 1069 1070 mutex_lock(&dev->mode_config.mutex); 1071 ironlake_panel_vdd_off_sync(intel_dp); 1072 mutex_unlock(&dev->mode_config.mutex); 1073} 1074 1075void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) 1076{ 1077 if (!is_edp(intel_dp)) 1078 return; 1079 1080 DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd); 1081 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on"); 1082 1083 intel_dp->want_panel_vdd = false; 1084 1085 if (sync) { 1086 ironlake_panel_vdd_off_sync(intel_dp); 1087 } else { 1088 /* 1089 * Queue the timer to fire a long 1090 * time from now (relative to the power down delay) 1091 * to keep the panel power up across a sequence of operations 1092 */ 1093 schedule_delayed_work(&intel_dp->panel_vdd_work, 1094 msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5)); 1095 } 1096} 1097 1098void ironlake_edp_panel_on(struct intel_dp *intel_dp) 1099{ 1100 struct drm_device *dev = intel_dp->base.base.dev; 1101 struct drm_i915_private *dev_priv = dev->dev_private; 1102 u32 pp; 1103 1104 if (!is_edp(intel_dp)) 1105 return; 1106 1107 DRM_DEBUG_KMS("Turn eDP power on\n"); 1108 1109 if (ironlake_edp_have_panel_power(intel_dp)) { 1110 DRM_DEBUG_KMS("eDP power already on\n"); 1111 return; 1112 } 1113 1114 ironlake_wait_panel_power_cycle(intel_dp); 1115 1116 pp = ironlake_get_pp_control(dev_priv); 1117 if (IS_GEN5(dev)) { 1118 /* ILK workaround: disable reset around power sequence */ 1119 pp &= ~PANEL_POWER_RESET; 1120 I915_WRITE(PCH_PP_CONTROL, pp); 1121 POSTING_READ(PCH_PP_CONTROL); 1122 } 1123 1124 pp |= POWER_TARGET_ON; 1125 if (!IS_GEN5(dev)) 1126 pp |= PANEL_POWER_RESET; 1127 1128 I915_WRITE(PCH_PP_CONTROL, pp); 1129 POSTING_READ(PCH_PP_CONTROL); 1130 1131 ironlake_wait_panel_on(intel_dp); 1132 1133 if (IS_GEN5(dev)) { 1134 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 1135 I915_WRITE(PCH_PP_CONTROL, pp); 1136 POSTING_READ(PCH_PP_CONTROL); 1137 } 1138} 1139 1140void ironlake_edp_panel_off(struct intel_dp *intel_dp) 1141{ 1142 struct drm_device *dev = intel_dp->base.base.dev; 1143 struct drm_i915_private *dev_priv = dev->dev_private; 1144 u32 pp; 1145 1146 if (!is_edp(intel_dp)) 1147 return; 1148 1149 DRM_DEBUG_KMS("Turn eDP power off\n"); 1150 1151 WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n"); 1152 1153 pp = ironlake_get_pp_control(dev_priv); 1154 /* We need to switch off panel power _and_ force vdd, for otherwise some 1155 * panels get very unhappy and cease to work. */ 1156 pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE); 1157 I915_WRITE(PCH_PP_CONTROL, pp); 1158 POSTING_READ(PCH_PP_CONTROL); 1159 1160 intel_dp->want_panel_vdd = false; 1161 1162 ironlake_wait_panel_off(intel_dp); 1163} 1164 1165void ironlake_edp_backlight_on(struct intel_dp *intel_dp) 1166{ 1167 struct drm_device *dev = intel_dp->base.base.dev; 1168 struct drm_i915_private *dev_priv = dev->dev_private; 1169 int pipe = to_intel_crtc(intel_dp->base.base.crtc)->pipe; 1170 u32 pp; 1171 1172 if (!is_edp(intel_dp)) 1173 return; 1174 1175 DRM_DEBUG_KMS("\n"); 1176 /* 1177 * If we enable the backlight right away following a panel power 1178 * on, we may see slight flicker as the panel syncs with the eDP 1179 * link. So delay a bit to make sure the image is solid before 1180 * allowing it to appear. 1181 */ 1182 msleep(intel_dp->backlight_on_delay); 1183 pp = ironlake_get_pp_control(dev_priv); 1184 pp |= EDP_BLC_ENABLE; 1185 I915_WRITE(PCH_PP_CONTROL, pp); 1186 POSTING_READ(PCH_PP_CONTROL); 1187 1188 intel_panel_enable_backlight(dev, pipe); 1189} 1190 1191void ironlake_edp_backlight_off(struct intel_dp *intel_dp) 1192{ 1193 struct drm_device *dev = intel_dp->base.base.dev; 1194 struct drm_i915_private *dev_priv = dev->dev_private; 1195 u32 pp; 1196 1197 if (!is_edp(intel_dp)) 1198 return; 1199 1200 intel_panel_disable_backlight(dev); 1201 1202 DRM_DEBUG_KMS("\n"); 1203 pp = ironlake_get_pp_control(dev_priv); 1204 pp &= ~EDP_BLC_ENABLE; 1205 I915_WRITE(PCH_PP_CONTROL, pp); 1206 POSTING_READ(PCH_PP_CONTROL); 1207 msleep(intel_dp->backlight_off_delay); 1208} 1209 1210static void ironlake_edp_pll_on(struct intel_dp *intel_dp) 1211{ 1212 struct drm_device *dev = intel_dp->base.base.dev; 1213 struct drm_crtc *crtc = intel_dp->base.base.crtc; 1214 struct drm_i915_private *dev_priv = dev->dev_private; 1215 u32 dpa_ctl; 1216 1217 assert_pipe_disabled(dev_priv, 1218 to_intel_crtc(crtc)->pipe); 1219 1220 DRM_DEBUG_KMS("\n"); 1221 dpa_ctl = I915_READ(DP_A); 1222 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n"); 1223 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n"); 1224 1225 /* We don't adjust intel_dp->DP while tearing down the link, to 1226 * facilitate link retraining (e.g. after hotplug). Hence clear all 1227 * enable bits here to ensure that we don't enable too much. */ 1228 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE); 1229 intel_dp->DP |= DP_PLL_ENABLE; 1230 I915_WRITE(DP_A, intel_dp->DP); 1231 POSTING_READ(DP_A); 1232 udelay(200); 1233} 1234 1235static void ironlake_edp_pll_off(struct intel_dp *intel_dp) 1236{ 1237 struct drm_device *dev = intel_dp->base.base.dev; 1238 struct drm_crtc *crtc = intel_dp->base.base.crtc; 1239 struct drm_i915_private *dev_priv = dev->dev_private; 1240 u32 dpa_ctl; 1241 1242 assert_pipe_disabled(dev_priv, 1243 to_intel_crtc(crtc)->pipe); 1244 1245 dpa_ctl = I915_READ(DP_A); 1246 WARN((dpa_ctl & DP_PLL_ENABLE) == 0, 1247 "dp pll off, should be on\n"); 1248 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n"); 1249 1250 /* We can't rely on the value tracked for the DP register in 1251 * intel_dp->DP because link_down must not change that (otherwise link 1252 * re-training will fail. */ 1253 dpa_ctl &= ~DP_PLL_ENABLE; 1254 I915_WRITE(DP_A, dpa_ctl); 1255 POSTING_READ(DP_A); 1256 udelay(200); 1257} 1258 1259/* If the sink supports it, try to set the power state appropriately */ 1260void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) 1261{ 1262 int ret, i; 1263 1264 /* Should have a valid DPCD by this point */ 1265 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 1266 return; 1267 1268 if (mode != DRM_MODE_DPMS_ON) { 1269 ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER, 1270 DP_SET_POWER_D3); 1271 if (ret != 1) 1272 DRM_DEBUG_DRIVER("failed to write sink power state\n"); 1273 } else { 1274 /* 1275 * When turning on, we need to retry for 1ms to give the sink 1276 * time to wake up. 1277 */ 1278 for (i = 0; i < 3; i++) { 1279 ret = intel_dp_aux_native_write_1(intel_dp, 1280 DP_SET_POWER, 1281 DP_SET_POWER_D0); 1282 if (ret == 1) 1283 break; 1284 msleep(1); 1285 } 1286 } 1287} 1288 1289static bool intel_dp_get_hw_state(struct intel_encoder *encoder, 1290 enum pipe *pipe) 1291{ 1292 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1293 struct drm_device *dev = encoder->base.dev; 1294 struct drm_i915_private *dev_priv = dev->dev_private; 1295 u32 tmp = I915_READ(intel_dp->output_reg); 1296 1297 if (!(tmp & DP_PORT_EN)) 1298 return false; 1299 1300 if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) { 1301 *pipe = PORT_TO_PIPE_CPT(tmp); 1302 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { 1303 *pipe = PORT_TO_PIPE(tmp); 1304 } else { 1305 u32 trans_sel; 1306 u32 trans_dp; 1307 int i; 1308 1309 switch (intel_dp->output_reg) { 1310 case PCH_DP_B: 1311 trans_sel = TRANS_DP_PORT_SEL_B; 1312 break; 1313 case PCH_DP_C: 1314 trans_sel = TRANS_DP_PORT_SEL_C; 1315 break; 1316 case PCH_DP_D: 1317 trans_sel = TRANS_DP_PORT_SEL_D; 1318 break; 1319 default: 1320 return true; 1321 } 1322 1323 for_each_pipe(i) { 1324 trans_dp = I915_READ(TRANS_DP_CTL(i)); 1325 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) { 1326 *pipe = i; 1327 return true; 1328 } 1329 } 1330 1331 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", 1332 intel_dp->output_reg); 1333 } 1334 1335 return true; 1336} 1337 1338static void intel_disable_dp(struct intel_encoder *encoder) 1339{ 1340 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1341 1342 /* Make sure the panel is off before trying to change the mode. But also 1343 * ensure that we have vdd while we switch off the panel. */ 1344 ironlake_edp_panel_vdd_on(intel_dp); 1345 ironlake_edp_backlight_off(intel_dp); 1346 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1347 ironlake_edp_panel_off(intel_dp); 1348 1349 /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */ 1350 if (!is_cpu_edp(intel_dp)) 1351 intel_dp_link_down(intel_dp); 1352} 1353 1354static void intel_post_disable_dp(struct intel_encoder *encoder) 1355{ 1356 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1357 1358 if (is_cpu_edp(intel_dp)) { 1359 intel_dp_link_down(intel_dp); 1360 ironlake_edp_pll_off(intel_dp); 1361 } 1362} 1363 1364static void intel_enable_dp(struct intel_encoder *encoder) 1365{ 1366 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1367 struct drm_device *dev = encoder->base.dev; 1368 struct drm_i915_private *dev_priv = dev->dev_private; 1369 uint32_t dp_reg = I915_READ(intel_dp->output_reg); 1370 1371 if (WARN_ON(dp_reg & DP_PORT_EN)) 1372 return; 1373 1374 ironlake_edp_panel_vdd_on(intel_dp); 1375 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1376 intel_dp_start_link_train(intel_dp); 1377 ironlake_edp_panel_on(intel_dp); 1378 ironlake_edp_panel_vdd_off(intel_dp, true); 1379 intel_dp_complete_link_train(intel_dp); 1380 ironlake_edp_backlight_on(intel_dp); 1381} 1382 1383static void intel_pre_enable_dp(struct intel_encoder *encoder) 1384{ 1385 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1386 1387 if (is_cpu_edp(intel_dp)) 1388 ironlake_edp_pll_on(intel_dp); 1389} 1390 1391/* 1392 * Native read with retry for link status and receiver capability reads for 1393 * cases where the sink may still be asleep. 1394 */ 1395static bool 1396intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address, 1397 uint8_t *recv, int recv_bytes) 1398{ 1399 int ret, i; 1400 1401 /* 1402 * Sinks are *supposed* to come up within 1ms from an off state, 1403 * but we're also supposed to retry 3 times per the spec. 1404 */ 1405 for (i = 0; i < 3; i++) { 1406 ret = intel_dp_aux_native_read(intel_dp, address, recv, 1407 recv_bytes); 1408 if (ret == recv_bytes) 1409 return true; 1410 msleep(1); 1411 } 1412 1413 return false; 1414} 1415 1416/* 1417 * Fetch AUX CH registers 0x202 - 0x207 which contain 1418 * link status information 1419 */ 1420static bool 1421intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 1422{ 1423 return intel_dp_aux_native_read_retry(intel_dp, 1424 DP_LANE0_1_STATUS, 1425 link_status, 1426 DP_LINK_STATUS_SIZE); 1427} 1428 1429#if 0 1430static char *voltage_names[] = { 1431 "0.4V", "0.6V", "0.8V", "1.2V" 1432}; 1433static char *pre_emph_names[] = { 1434 "0dB", "3.5dB", "6dB", "9.5dB" 1435}; 1436static char *link_train_names[] = { 1437 "pattern 1", "pattern 2", "idle", "off" 1438}; 1439#endif 1440 1441/* 1442 * These are source-specific values; current Intel hardware supports 1443 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB 1444 */ 1445 1446static uint8_t 1447intel_dp_voltage_max(struct intel_dp *intel_dp) 1448{ 1449 struct drm_device *dev = intel_dp->base.base.dev; 1450 1451 if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) 1452 return DP_TRAIN_VOLTAGE_SWING_800; 1453 else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) 1454 return DP_TRAIN_VOLTAGE_SWING_1200; 1455 else 1456 return DP_TRAIN_VOLTAGE_SWING_800; 1457} 1458 1459static uint8_t 1460intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) 1461{ 1462 struct drm_device *dev = intel_dp->base.base.dev; 1463 1464 if (IS_HASWELL(dev)) { 1465 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1466 case DP_TRAIN_VOLTAGE_SWING_400: 1467 return DP_TRAIN_PRE_EMPHASIS_9_5; 1468 case DP_TRAIN_VOLTAGE_SWING_600: 1469 return DP_TRAIN_PRE_EMPHASIS_6; 1470 case DP_TRAIN_VOLTAGE_SWING_800: 1471 return DP_TRAIN_PRE_EMPHASIS_3_5; 1472 case DP_TRAIN_VOLTAGE_SWING_1200: 1473 default: 1474 return DP_TRAIN_PRE_EMPHASIS_0; 1475 } 1476 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { 1477 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1478 case DP_TRAIN_VOLTAGE_SWING_400: 1479 return DP_TRAIN_PRE_EMPHASIS_6; 1480 case DP_TRAIN_VOLTAGE_SWING_600: 1481 case DP_TRAIN_VOLTAGE_SWING_800: 1482 return DP_TRAIN_PRE_EMPHASIS_3_5; 1483 default: 1484 return DP_TRAIN_PRE_EMPHASIS_0; 1485 } 1486 } else { 1487 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1488 case DP_TRAIN_VOLTAGE_SWING_400: 1489 return DP_TRAIN_PRE_EMPHASIS_6; 1490 case DP_TRAIN_VOLTAGE_SWING_600: 1491 return DP_TRAIN_PRE_EMPHASIS_6; 1492 case DP_TRAIN_VOLTAGE_SWING_800: 1493 return DP_TRAIN_PRE_EMPHASIS_3_5; 1494 case DP_TRAIN_VOLTAGE_SWING_1200: 1495 default: 1496 return DP_TRAIN_PRE_EMPHASIS_0; 1497 } 1498 } 1499} 1500 1501static void 1502intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 1503{ 1504 uint8_t v = 0; 1505 uint8_t p = 0; 1506 int lane; 1507 uint8_t voltage_max; 1508 uint8_t preemph_max; 1509 1510 for (lane = 0; lane < intel_dp->lane_count; lane++) { 1511 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane); 1512 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); 1513 1514 if (this_v > v) 1515 v = this_v; 1516 if (this_p > p) 1517 p = this_p; 1518 } 1519 1520 voltage_max = intel_dp_voltage_max(intel_dp); 1521 if (v >= voltage_max) 1522 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; 1523 1524 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v); 1525 if (p >= preemph_max) 1526 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; 1527 1528 for (lane = 0; lane < 4; lane++) 1529 intel_dp->train_set[lane] = v | p; 1530} 1531 1532static uint32_t 1533intel_dp_signal_levels(uint8_t train_set) 1534{ 1535 uint32_t signal_levels = 0; 1536 1537 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 1538 case DP_TRAIN_VOLTAGE_SWING_400: 1539 default: 1540 signal_levels |= DP_VOLTAGE_0_4; 1541 break; 1542 case DP_TRAIN_VOLTAGE_SWING_600: 1543 signal_levels |= DP_VOLTAGE_0_6; 1544 break; 1545 case DP_TRAIN_VOLTAGE_SWING_800: 1546 signal_levels |= DP_VOLTAGE_0_8; 1547 break; 1548 case DP_TRAIN_VOLTAGE_SWING_1200: 1549 signal_levels |= DP_VOLTAGE_1_2; 1550 break; 1551 } 1552 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 1553 case DP_TRAIN_PRE_EMPHASIS_0: 1554 default: 1555 signal_levels |= DP_PRE_EMPHASIS_0; 1556 break; 1557 case DP_TRAIN_PRE_EMPHASIS_3_5: 1558 signal_levels |= DP_PRE_EMPHASIS_3_5; 1559 break; 1560 case DP_TRAIN_PRE_EMPHASIS_6: 1561 signal_levels |= DP_PRE_EMPHASIS_6; 1562 break; 1563 case DP_TRAIN_PRE_EMPHASIS_9_5: 1564 signal_levels |= DP_PRE_EMPHASIS_9_5; 1565 break; 1566 } 1567 return signal_levels; 1568} 1569 1570/* Gen6's DP voltage swing and pre-emphasis control */ 1571static uint32_t 1572intel_gen6_edp_signal_levels(uint8_t train_set) 1573{ 1574 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 1575 DP_TRAIN_PRE_EMPHASIS_MASK); 1576 switch (signal_levels) { 1577 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 1578 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 1579 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 1580 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 1581 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B; 1582 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 1583 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: 1584 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B; 1585 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 1586 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 1587 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B; 1588 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 1589 case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0: 1590 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B; 1591 default: 1592 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 1593 "0x%x\n", signal_levels); 1594 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 1595 } 1596} 1597 1598/* Gen7's DP voltage swing and pre-emphasis control */ 1599static uint32_t 1600intel_gen7_edp_signal_levels(uint8_t train_set) 1601{ 1602 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 1603 DP_TRAIN_PRE_EMPHASIS_MASK); 1604 switch (signal_levels) { 1605 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 1606 return EDP_LINK_TRAIN_400MV_0DB_IVB; 1607 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 1608 return EDP_LINK_TRAIN_400MV_3_5DB_IVB; 1609 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 1610 return EDP_LINK_TRAIN_400MV_6DB_IVB; 1611 1612 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 1613 return EDP_LINK_TRAIN_600MV_0DB_IVB; 1614 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 1615 return EDP_LINK_TRAIN_600MV_3_5DB_IVB; 1616 1617 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 1618 return EDP_LINK_TRAIN_800MV_0DB_IVB; 1619 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 1620 return EDP_LINK_TRAIN_800MV_3_5DB_IVB; 1621 1622 default: 1623 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 1624 "0x%x\n", signal_levels); 1625 return EDP_LINK_TRAIN_500MV_0DB_IVB; 1626 } 1627} 1628 1629/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */ 1630static uint32_t 1631intel_dp_signal_levels_hsw(uint8_t train_set) 1632{ 1633 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 1634 DP_TRAIN_PRE_EMPHASIS_MASK); 1635 switch (signal_levels) { 1636 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 1637 return DDI_BUF_EMP_400MV_0DB_HSW; 1638 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 1639 return DDI_BUF_EMP_400MV_3_5DB_HSW; 1640 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 1641 return DDI_BUF_EMP_400MV_6DB_HSW; 1642 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5: 1643 return DDI_BUF_EMP_400MV_9_5DB_HSW; 1644 1645 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 1646 return DDI_BUF_EMP_600MV_0DB_HSW; 1647 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 1648 return DDI_BUF_EMP_600MV_3_5DB_HSW; 1649 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: 1650 return DDI_BUF_EMP_600MV_6DB_HSW; 1651 1652 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 1653 return DDI_BUF_EMP_800MV_0DB_HSW; 1654 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 1655 return DDI_BUF_EMP_800MV_3_5DB_HSW; 1656 default: 1657 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 1658 "0x%x\n", signal_levels); 1659 return DDI_BUF_EMP_400MV_0DB_HSW; 1660 } 1661} 1662 1663static bool 1664intel_dp_set_link_train(struct intel_dp *intel_dp, 1665 uint32_t dp_reg_value, 1666 uint8_t dp_train_pat) 1667{ 1668 struct drm_device *dev = intel_dp->base.base.dev; 1669 struct drm_i915_private *dev_priv = dev->dev_private; 1670 int ret; 1671 uint32_t temp; 1672 1673 if (IS_HASWELL(dev)) { 1674 temp = I915_READ(DP_TP_CTL(intel_dp->port)); 1675 1676 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) 1677 temp |= DP_TP_CTL_SCRAMBLE_DISABLE; 1678 else 1679 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE; 1680 1681 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; 1682 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 1683 case DP_TRAINING_PATTERN_DISABLE: 1684 temp |= DP_TP_CTL_LINK_TRAIN_IDLE; 1685 I915_WRITE(DP_TP_CTL(intel_dp->port), temp); 1686 1687 if (wait_for((I915_READ(DP_TP_STATUS(intel_dp->port)) & 1688 DP_TP_STATUS_IDLE_DONE), 1)) 1689 DRM_ERROR("Timed out waiting for DP idle patterns\n"); 1690 1691 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; 1692 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; 1693 1694 break; 1695 case DP_TRAINING_PATTERN_1: 1696 temp |= DP_TP_CTL_LINK_TRAIN_PAT1; 1697 break; 1698 case DP_TRAINING_PATTERN_2: 1699 temp |= DP_TP_CTL_LINK_TRAIN_PAT2; 1700 break; 1701 case DP_TRAINING_PATTERN_3: 1702 temp |= DP_TP_CTL_LINK_TRAIN_PAT3; 1703 break; 1704 } 1705 I915_WRITE(DP_TP_CTL(intel_dp->port), temp); 1706 1707 } else if (HAS_PCH_CPT(dev) && 1708 (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) { 1709 dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT; 1710 1711 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 1712 case DP_TRAINING_PATTERN_DISABLE: 1713 dp_reg_value |= DP_LINK_TRAIN_OFF_CPT; 1714 break; 1715 case DP_TRAINING_PATTERN_1: 1716 dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT; 1717 break; 1718 case DP_TRAINING_PATTERN_2: 1719 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT; 1720 break; 1721 case DP_TRAINING_PATTERN_3: 1722 DRM_ERROR("DP training pattern 3 not supported\n"); 1723 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT; 1724 break; 1725 } 1726 1727 } else { 1728 dp_reg_value &= ~DP_LINK_TRAIN_MASK; 1729 1730 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 1731 case DP_TRAINING_PATTERN_DISABLE: 1732 dp_reg_value |= DP_LINK_TRAIN_OFF; 1733 break; 1734 case DP_TRAINING_PATTERN_1: 1735 dp_reg_value |= DP_LINK_TRAIN_PAT_1; 1736 break; 1737 case DP_TRAINING_PATTERN_2: 1738 dp_reg_value |= DP_LINK_TRAIN_PAT_2; 1739 break; 1740 case DP_TRAINING_PATTERN_3: 1741 DRM_ERROR("DP training pattern 3 not supported\n"); 1742 dp_reg_value |= DP_LINK_TRAIN_PAT_2; 1743 break; 1744 } 1745 } 1746 1747 I915_WRITE(intel_dp->output_reg, dp_reg_value); 1748 POSTING_READ(intel_dp->output_reg); 1749 1750 intel_dp_aux_native_write_1(intel_dp, 1751 DP_TRAINING_PATTERN_SET, 1752 dp_train_pat); 1753 1754 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) != 1755 DP_TRAINING_PATTERN_DISABLE) { 1756 ret = intel_dp_aux_native_write(intel_dp, 1757 DP_TRAINING_LANE0_SET, 1758 intel_dp->train_set, 1759 intel_dp->lane_count); 1760 if (ret != intel_dp->lane_count) 1761 return false; 1762 } 1763 1764 return true; 1765} 1766 1767/* Enable corresponding port and start training pattern 1 */ 1768void 1769intel_dp_start_link_train(struct intel_dp *intel_dp) 1770{ 1771 struct drm_encoder *encoder = &intel_dp->base.base; 1772 struct drm_device *dev = encoder->dev; 1773 int i; 1774 uint8_t voltage; 1775 bool clock_recovery = false; 1776 int voltage_tries, loop_tries; 1777 uint32_t DP = intel_dp->DP; 1778 1779 if (IS_HASWELL(dev)) 1780 intel_ddi_prepare_link_retrain(encoder); 1781 1782 /* Write the link configuration data */ 1783 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, 1784 intel_dp->link_configuration, 1785 DP_LINK_CONFIGURATION_SIZE); 1786 1787 DP |= DP_PORT_EN; 1788 1789 memset(intel_dp->train_set, 0, 4); 1790 voltage = 0xff; 1791 voltage_tries = 0; 1792 loop_tries = 0; 1793 clock_recovery = false; 1794 for (;;) { 1795 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 1796 uint8_t link_status[DP_LINK_STATUS_SIZE]; 1797 uint32_t signal_levels; 1798 1799 if (IS_HASWELL(dev)) { 1800 signal_levels = intel_dp_signal_levels_hsw( 1801 intel_dp->train_set[0]); 1802 DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels; 1803 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { 1804 signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]); 1805 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels; 1806 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { 1807 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); 1808 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1809 } else { 1810 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]); 1811 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1812 } 1813 DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", 1814 signal_levels); 1815 1816 /* Set training pattern 1 */ 1817 if (!intel_dp_set_link_train(intel_dp, DP, 1818 DP_TRAINING_PATTERN_1 | 1819 DP_LINK_SCRAMBLING_DISABLE)) 1820 break; 1821 1822 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd); 1823 if (!intel_dp_get_link_status(intel_dp, link_status)) { 1824 DRM_ERROR("failed to get link status\n"); 1825 break; 1826 } 1827 1828 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { 1829 DRM_DEBUG_KMS("clock recovery OK\n"); 1830 clock_recovery = true; 1831 break; 1832 } 1833 1834 /* Check to see if we've tried the max voltage */ 1835 for (i = 0; i < intel_dp->lane_count; i++) 1836 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) 1837 break; 1838 if (i == intel_dp->lane_count && voltage_tries == 5) { 1839 if (++loop_tries == 5) { 1840 DRM_DEBUG_KMS("too many full retries, give up\n"); 1841 break; 1842 } 1843 memset(intel_dp->train_set, 0, 4); 1844 voltage_tries = 0; 1845 continue; 1846 } 1847 1848 /* Check to see if we've tried the same voltage 5 times */ 1849 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) != voltage) { 1850 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; 1851 voltage_tries = 0; 1852 } else 1853 ++voltage_tries; 1854 1855 /* Compute new intel_dp->train_set as requested by target */ 1856 intel_get_adjust_train(intel_dp, link_status); 1857 } 1858 1859 intel_dp->DP = DP; 1860} 1861 1862void 1863intel_dp_complete_link_train(struct intel_dp *intel_dp) 1864{ 1865 struct drm_device *dev = intel_dp->base.base.dev; 1866 bool channel_eq = false; 1867 int tries, cr_tries; 1868 uint32_t DP = intel_dp->DP; 1869 1870 /* channel equalization */ 1871 tries = 0; 1872 cr_tries = 0; 1873 channel_eq = false; 1874 for (;;) { 1875 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 1876 uint32_t signal_levels; 1877 uint8_t link_status[DP_LINK_STATUS_SIZE]; 1878 1879 if (cr_tries > 5) { 1880 DRM_ERROR("failed to train DP, aborting\n"); 1881 intel_dp_link_down(intel_dp); 1882 break; 1883 } 1884 1885 if (IS_HASWELL(dev)) { 1886 signal_levels = intel_dp_signal_levels_hsw(intel_dp->train_set[0]); 1887 DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels; 1888 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { 1889 signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]); 1890 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels; 1891 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { 1892 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); 1893 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1894 } else { 1895 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]); 1896 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1897 } 1898 1899 /* channel eq pattern */ 1900 if (!intel_dp_set_link_train(intel_dp, DP, 1901 DP_TRAINING_PATTERN_2 | 1902 DP_LINK_SCRAMBLING_DISABLE)) 1903 break; 1904 1905 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd); 1906 if (!intel_dp_get_link_status(intel_dp, link_status)) 1907 break; 1908 1909 /* Make sure clock is still ok */ 1910 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { 1911 intel_dp_start_link_train(intel_dp); 1912 cr_tries++; 1913 continue; 1914 } 1915 1916 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { 1917 channel_eq = true; 1918 break; 1919 } 1920 1921 /* Try 5 times, then try clock recovery if that fails */ 1922 if (tries > 5) { 1923 intel_dp_link_down(intel_dp); 1924 intel_dp_start_link_train(intel_dp); 1925 tries = 0; 1926 cr_tries++; 1927 continue; 1928 } 1929 1930 /* Compute new intel_dp->train_set as requested by target */ 1931 intel_get_adjust_train(intel_dp, link_status); 1932 ++tries; 1933 } 1934 1935 if (channel_eq) 1936 DRM_DEBUG_KMS("Channel EQ done. DP Training successfull\n"); 1937 1938 intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE); 1939} 1940 1941static void 1942intel_dp_link_down(struct intel_dp *intel_dp) 1943{ 1944 struct drm_device *dev = intel_dp->base.base.dev; 1945 struct drm_i915_private *dev_priv = dev->dev_private; 1946 uint32_t DP = intel_dp->DP; 1947 1948 /* 1949 * DDI code has a strict mode set sequence and we should try to respect 1950 * it, otherwise we might hang the machine in many different ways. So we 1951 * really should be disabling the port only on a complete crtc_disable 1952 * sequence. This function is just called under two conditions on DDI 1953 * code: 1954 * - Link train failed while doing crtc_enable, and on this case we 1955 * really should respect the mode set sequence and wait for a 1956 * crtc_disable. 1957 * - Someone turned the monitor off and intel_dp_check_link_status 1958 * called us. We don't need to disable the whole port on this case, so 1959 * when someone turns the monitor on again, 1960 * intel_ddi_prepare_link_retrain will take care of redoing the link 1961 * train. 1962 */ 1963 if (IS_HASWELL(dev)) 1964 return; 1965 1966 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)) 1967 return; 1968 1969 DRM_DEBUG_KMS("\n"); 1970 1971 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) { 1972 DP &= ~DP_LINK_TRAIN_MASK_CPT; 1973 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); 1974 } else { 1975 DP &= ~DP_LINK_TRAIN_MASK; 1976 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); 1977 } 1978 POSTING_READ(intel_dp->output_reg); 1979 1980 msleep(17); 1981 1982 if (HAS_PCH_IBX(dev) && 1983 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { 1984 struct drm_crtc *crtc = intel_dp->base.base.crtc; 1985 1986 /* Hardware workaround: leaving our transcoder select 1987 * set to transcoder B while it's off will prevent the 1988 * corresponding HDMI output on transcoder A. 1989 * 1990 * Combine this with another hardware workaround: 1991 * transcoder select bit can only be cleared while the 1992 * port is enabled. 1993 */ 1994 DP &= ~DP_PIPEB_SELECT; 1995 I915_WRITE(intel_dp->output_reg, DP); 1996 1997 /* Changes to enable or select take place the vblank 1998 * after being written. 1999 */ 2000 if (crtc == NULL) { 2001 /* We can arrive here never having been attached 2002 * to a CRTC, for instance, due to inheriting 2003 * random state from the BIOS. 2004 * 2005 * If the pipe is not running, play safe and 2006 * wait for the clocks to stabilise before 2007 * continuing. 2008 */ 2009 POSTING_READ(intel_dp->output_reg); 2010 msleep(50); 2011 } else 2012 intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe); 2013 } 2014 2015 DP &= ~DP_AUDIO_OUTPUT_ENABLE; 2016 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); 2017 POSTING_READ(intel_dp->output_reg); 2018 msleep(intel_dp->panel_power_down_delay); 2019} 2020 2021static bool 2022intel_dp_get_dpcd(struct intel_dp *intel_dp) 2023{ 2024 if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd, 2025 sizeof(intel_dp->dpcd)) == 0) 2026 return false; /* aux transfer failed */ 2027 2028 if (intel_dp->dpcd[DP_DPCD_REV] == 0) 2029 return false; /* DPCD not present */ 2030 2031 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 2032 DP_DWN_STRM_PORT_PRESENT)) 2033 return true; /* native DP sink */ 2034 2035 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10) 2036 return true; /* no per-port downstream info */ 2037 2038 if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0, 2039 intel_dp->downstream_ports, 2040 DP_MAX_DOWNSTREAM_PORTS) == 0) 2041 return false; /* downstream port status fetch failed */ 2042 2043 return true; 2044} 2045 2046static void 2047intel_dp_probe_oui(struct intel_dp *intel_dp) 2048{ 2049 u8 buf[3]; 2050 2051 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) 2052 return; 2053 2054 ironlake_edp_panel_vdd_on(intel_dp); 2055 2056 if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3)) 2057 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", 2058 buf[0], buf[1], buf[2]); 2059 2060 if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3)) 2061 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", 2062 buf[0], buf[1], buf[2]); 2063 2064 ironlake_edp_panel_vdd_off(intel_dp, false); 2065} 2066 2067static bool 2068intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector) 2069{ 2070 int ret; 2071 2072 ret = intel_dp_aux_native_read_retry(intel_dp, 2073 DP_DEVICE_SERVICE_IRQ_VECTOR, 2074 sink_irq_vector, 1); 2075 if (!ret) 2076 return false; 2077 2078 return true; 2079} 2080 2081static void 2082intel_dp_handle_test_request(struct intel_dp *intel_dp) 2083{ 2084 /* NAK by default */ 2085 intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK); 2086} 2087 2088/* 2089 * According to DP spec 2090 * 5.1.2: 2091 * 1. Read DPCD 2092 * 2. Configure link according to Receiver Capabilities 2093 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 2094 * 4. Check link status on receipt of hot-plug interrupt 2095 */ 2096 2097static void 2098intel_dp_check_link_status(struct intel_dp *intel_dp) 2099{ 2100 u8 sink_irq_vector; 2101 u8 link_status[DP_LINK_STATUS_SIZE]; 2102 2103 if (!intel_dp->base.connectors_active) 2104 return; 2105 2106 if (WARN_ON(!intel_dp->base.base.crtc)) 2107 return; 2108 2109 /* Try to read receiver status if the link appears to be up */ 2110 if (!intel_dp_get_link_status(intel_dp, link_status)) { 2111 intel_dp_link_down(intel_dp); 2112 return; 2113 } 2114 2115 /* Now read the DPCD to see if it's actually running */ 2116 if (!intel_dp_get_dpcd(intel_dp)) { 2117 intel_dp_link_down(intel_dp); 2118 return; 2119 } 2120 2121 /* Try to read the source of the interrupt */ 2122 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 2123 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) { 2124 /* Clear interrupt source */ 2125 intel_dp_aux_native_write_1(intel_dp, 2126 DP_DEVICE_SERVICE_IRQ_VECTOR, 2127 sink_irq_vector); 2128 2129 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST) 2130 intel_dp_handle_test_request(intel_dp); 2131 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ)) 2132 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); 2133 } 2134 2135 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { 2136 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", 2137 drm_get_encoder_name(&intel_dp->base.base)); 2138 intel_dp_start_link_train(intel_dp); 2139 intel_dp_complete_link_train(intel_dp); 2140 } 2141} 2142 2143/* XXX this is probably wrong for multiple downstream ports */ 2144static enum drm_connector_status 2145intel_dp_detect_dpcd(struct intel_dp *intel_dp) 2146{ 2147 uint8_t *dpcd = intel_dp->dpcd; 2148 bool hpd; 2149 uint8_t type; 2150 2151 if (!intel_dp_get_dpcd(intel_dp)) 2152 return connector_status_disconnected; 2153 2154 /* if there's no downstream port, we're done */ 2155 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT)) 2156 return connector_status_connected; 2157 2158 /* If we're HPD-aware, SINK_COUNT changes dynamically */ 2159 hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD); 2160 if (hpd) { 2161 uint8_t reg; 2162 if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT, 2163 ®, 1)) 2164 return connector_status_unknown; 2165 return DP_GET_SINK_COUNT(reg) ? connector_status_connected 2166 : connector_status_disconnected; 2167 } 2168 2169 /* If no HPD, poke DDC gently */ 2170 if (drm_probe_ddc(&intel_dp->adapter)) 2171 return connector_status_connected; 2172 2173 /* Well we tried, say unknown for unreliable port types */ 2174 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 2175 if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID) 2176 return connector_status_unknown; 2177 2178 /* Anything else is out of spec, warn and ignore */ 2179 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n"); 2180 return connector_status_disconnected; 2181} 2182 2183static enum drm_connector_status 2184ironlake_dp_detect(struct intel_dp *intel_dp) 2185{ 2186 enum drm_connector_status status; 2187 2188 /* Can't disconnect eDP, but you can close the lid... */ 2189 if (is_edp(intel_dp)) { 2190 status = intel_panel_detect(intel_dp->base.base.dev); 2191 if (status == connector_status_unknown) 2192 status = connector_status_connected; 2193 return status; 2194 } 2195 2196 return intel_dp_detect_dpcd(intel_dp); 2197} 2198 2199static enum drm_connector_status 2200g4x_dp_detect(struct intel_dp *intel_dp) 2201{ 2202 struct drm_device *dev = intel_dp->base.base.dev; 2203 struct drm_i915_private *dev_priv = dev->dev_private; 2204 uint32_t bit; 2205 2206 switch (intel_dp->output_reg) { 2207 case DP_B: 2208 bit = DPB_HOTPLUG_LIVE_STATUS; 2209 break; 2210 case DP_C: 2211 bit = DPC_HOTPLUG_LIVE_STATUS; 2212 break; 2213 case DP_D: 2214 bit = DPD_HOTPLUG_LIVE_STATUS; 2215 break; 2216 default: 2217 return connector_status_unknown; 2218 } 2219 2220 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0) 2221 return connector_status_disconnected; 2222 2223 return intel_dp_detect_dpcd(intel_dp); 2224} 2225 2226static struct edid * 2227intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) 2228{ 2229 struct intel_connector *intel_connector = to_intel_connector(connector); 2230 2231 /* use cached edid if we have one */ 2232 if (intel_connector->edid) { 2233 struct edid *edid; 2234 int size; 2235 2236 /* invalid edid */ 2237 if (IS_ERR(intel_connector->edid)) 2238 return NULL; 2239 2240 size = (intel_connector->edid->extensions + 1) * EDID_LENGTH; 2241 edid = kmalloc(size, GFP_KERNEL); 2242 if (!edid) 2243 return NULL; 2244 2245 memcpy(edid, intel_connector->edid, size); 2246 return edid; 2247 } 2248 2249 return drm_get_edid(connector, adapter); 2250} 2251 2252static int 2253intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter) 2254{ 2255 struct intel_connector *intel_connector = to_intel_connector(connector); 2256 2257 /* use cached edid if we have one */ 2258 if (intel_connector->edid) { 2259 /* invalid edid */ 2260 if (IS_ERR(intel_connector->edid)) 2261 return 0; 2262 2263 return intel_connector_update_modes(connector, 2264 intel_connector->edid); 2265 } 2266 2267 return intel_ddc_get_modes(connector, adapter); 2268} 2269 2270 2271/** 2272 * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection. 2273 * 2274 * \return true if DP port is connected. 2275 * \return false if DP port is disconnected. 2276 */ 2277static enum drm_connector_status 2278intel_dp_detect(struct drm_connector *connector, bool force) 2279{ 2280 struct intel_dp *intel_dp = intel_attached_dp(connector); 2281 struct drm_device *dev = connector->dev; 2282 enum drm_connector_status status; 2283 struct edid *edid = NULL; 2284 char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3]; 2285 2286 intel_dp->has_audio = false; 2287 2288 if (HAS_PCH_SPLIT(dev)) 2289 status = ironlake_dp_detect(intel_dp); 2290 else 2291 status = g4x_dp_detect(intel_dp); 2292 2293 hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd), 2294 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false); 2295 DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump); 2296 2297 if (status != connector_status_connected) 2298 return status; 2299 2300 intel_dp_probe_oui(intel_dp); 2301 2302 if (intel_dp->force_audio != HDMI_AUDIO_AUTO) { 2303 intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON); 2304 } else { 2305 edid = intel_dp_get_edid(connector, &intel_dp->adapter); 2306 if (edid) { 2307 intel_dp->has_audio = drm_detect_monitor_audio(edid); 2308 kfree(edid); 2309 } 2310 } 2311 2312 return connector_status_connected; 2313} 2314 2315static int intel_dp_get_modes(struct drm_connector *connector) 2316{ 2317 struct intel_dp *intel_dp = intel_attached_dp(connector); 2318 struct intel_connector *intel_connector = to_intel_connector(connector); 2319 struct drm_device *dev = connector->dev; 2320 int ret; 2321 2322 /* We should parse the EDID data and find out if it has an audio sink 2323 */ 2324 2325 ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter); 2326 if (ret) 2327 return ret; 2328 2329 /* if eDP has no EDID, fall back to fixed mode */ 2330 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 2331 struct drm_display_mode *mode; 2332 mode = drm_mode_duplicate(dev, 2333 intel_connector->panel.fixed_mode); 2334 if (mode) { 2335 drm_mode_probed_add(connector, mode); 2336 return 1; 2337 } 2338 } 2339 return 0; 2340} 2341 2342static bool 2343intel_dp_detect_audio(struct drm_connector *connector) 2344{ 2345 struct intel_dp *intel_dp = intel_attached_dp(connector); 2346 struct edid *edid; 2347 bool has_audio = false; 2348 2349 edid = intel_dp_get_edid(connector, &intel_dp->adapter); 2350 if (edid) { 2351 has_audio = drm_detect_monitor_audio(edid); 2352 kfree(edid); 2353 } 2354 2355 return has_audio; 2356} 2357 2358static int 2359intel_dp_set_property(struct drm_connector *connector, 2360 struct drm_property *property, 2361 uint64_t val) 2362{ 2363 struct drm_i915_private *dev_priv = connector->dev->dev_private; 2364 struct intel_connector *intel_connector = to_intel_connector(connector); 2365 struct intel_dp *intel_dp = intel_attached_dp(connector); 2366 int ret; 2367 2368 ret = drm_connector_property_set_value(connector, property, val); 2369 if (ret) 2370 return ret; 2371 2372 if (property == dev_priv->force_audio_property) { 2373 int i = val; 2374 bool has_audio; 2375 2376 if (i == intel_dp->force_audio) 2377 return 0; 2378 2379 intel_dp->force_audio = i; 2380 2381 if (i == HDMI_AUDIO_AUTO) 2382 has_audio = intel_dp_detect_audio(connector); 2383 else 2384 has_audio = (i == HDMI_AUDIO_ON); 2385 2386 if (has_audio == intel_dp->has_audio) 2387 return 0; 2388 2389 intel_dp->has_audio = has_audio; 2390 goto done; 2391 } 2392 2393 if (property == dev_priv->broadcast_rgb_property) { 2394 if (val == !!intel_dp->color_range) 2395 return 0; 2396 2397 intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0; 2398 goto done; 2399 } 2400 2401 if (is_edp(intel_dp) && 2402 property == connector->dev->mode_config.scaling_mode_property) { 2403 if (val == DRM_MODE_SCALE_NONE) { 2404 DRM_DEBUG_KMS("no scaling not supported\n"); 2405 return -EINVAL; 2406 } 2407 2408 if (intel_connector->panel.fitting_mode == val) { 2409 /* the eDP scaling property is not changed */ 2410 return 0; 2411 } 2412 intel_connector->panel.fitting_mode = val; 2413 2414 goto done; 2415 } 2416 2417 return -EINVAL; 2418 2419done: 2420 if (intel_dp->base.base.crtc) { 2421 struct drm_crtc *crtc = intel_dp->base.base.crtc; 2422 intel_set_mode(crtc, &crtc->mode, 2423 crtc->x, crtc->y, crtc->fb); 2424 } 2425 2426 return 0; 2427} 2428 2429static void 2430intel_dp_destroy(struct drm_connector *connector) 2431{ 2432 struct drm_device *dev = connector->dev; 2433 struct intel_dp *intel_dp = intel_attached_dp(connector); 2434 struct intel_connector *intel_connector = to_intel_connector(connector); 2435 2436 if (!IS_ERR_OR_NULL(intel_connector->edid)) 2437 kfree(intel_connector->edid); 2438 2439 if (is_edp(intel_dp)) { 2440 intel_panel_destroy_backlight(dev); 2441 intel_panel_fini(&intel_connector->panel); 2442 } 2443 2444 drm_sysfs_connector_remove(connector); 2445 drm_connector_cleanup(connector); 2446 kfree(connector); 2447} 2448 2449static void intel_dp_encoder_destroy(struct drm_encoder *encoder) 2450{ 2451 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2452 2453 i2c_del_adapter(&intel_dp->adapter); 2454 drm_encoder_cleanup(encoder); 2455 if (is_edp(intel_dp)) { 2456 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 2457 ironlake_panel_vdd_off_sync(intel_dp); 2458 } 2459 kfree(intel_dp); 2460} 2461 2462static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { 2463 .mode_fixup = intel_dp_mode_fixup, 2464 .mode_set = intel_dp_mode_set, 2465 .disable = intel_encoder_noop, 2466}; 2467 2468static const struct drm_encoder_helper_funcs intel_dp_helper_funcs_hsw = { 2469 .mode_fixup = intel_dp_mode_fixup, 2470 .mode_set = intel_ddi_mode_set, 2471 .disable = intel_encoder_noop, 2472}; 2473 2474static const struct drm_connector_funcs intel_dp_connector_funcs = { 2475 .dpms = intel_connector_dpms, 2476 .detect = intel_dp_detect, 2477 .fill_modes = drm_helper_probe_single_connector_modes, 2478 .set_property = intel_dp_set_property, 2479 .destroy = intel_dp_destroy, 2480}; 2481 2482static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 2483 .get_modes = intel_dp_get_modes, 2484 .mode_valid = intel_dp_mode_valid, 2485 .best_encoder = intel_best_encoder, 2486}; 2487 2488static const struct drm_encoder_funcs intel_dp_enc_funcs = { 2489 .destroy = intel_dp_encoder_destroy, 2490}; 2491 2492static void 2493intel_dp_hot_plug(struct intel_encoder *intel_encoder) 2494{ 2495 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 2496 2497 intel_dp_check_link_status(intel_dp); 2498} 2499 2500/* Return which DP Port should be selected for Transcoder DP control */ 2501int 2502intel_trans_dp_port_sel(struct drm_crtc *crtc) 2503{ 2504 struct drm_device *dev = crtc->dev; 2505 struct intel_encoder *intel_encoder; 2506 struct intel_dp *intel_dp; 2507 2508 for_each_encoder_on_crtc(dev, crtc, intel_encoder) { 2509 intel_dp = enc_to_intel_dp(&intel_encoder->base); 2510 2511 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || 2512 intel_encoder->type == INTEL_OUTPUT_EDP) 2513 return intel_dp->output_reg; 2514 } 2515 2516 return -1; 2517} 2518 2519/* check the VBT to see whether the eDP is on DP-D port */ 2520bool intel_dpd_is_edp(struct drm_device *dev) 2521{ 2522 struct drm_i915_private *dev_priv = dev->dev_private; 2523 struct child_device_config *p_child; 2524 int i; 2525 2526 if (!dev_priv->child_dev_num) 2527 return false; 2528 2529 for (i = 0; i < dev_priv->child_dev_num; i++) { 2530 p_child = dev_priv->child_dev + i; 2531 2532 if (p_child->dvo_port == PORT_IDPD && 2533 p_child->device_type == DEVICE_TYPE_eDP) 2534 return true; 2535 } 2536 return false; 2537} 2538 2539static void 2540intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) 2541{ 2542 struct intel_connector *intel_connector = to_intel_connector(connector); 2543 2544 intel_attach_force_audio_property(connector); 2545 intel_attach_broadcast_rgb_property(connector); 2546 2547 if (is_edp(intel_dp)) { 2548 drm_mode_create_scaling_mode_property(connector->dev); 2549 drm_connector_attach_property( 2550 connector, 2551 connector->dev->mode_config.scaling_mode_property, 2552 DRM_MODE_SCALE_ASPECT); 2553 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT; 2554 } 2555} 2556 2557static void 2558intel_dp_init_panel_power_sequencer(struct drm_device *dev, 2559 struct intel_dp *intel_dp) 2560{ 2561 struct drm_i915_private *dev_priv = dev->dev_private; 2562 struct edp_power_seq cur, vbt, spec, final; 2563 u32 pp_on, pp_off, pp_div, pp; 2564 2565 /* Workaround: Need to write PP_CONTROL with the unlock key as 2566 * the very first thing. */ 2567 pp = ironlake_get_pp_control(dev_priv); 2568 I915_WRITE(PCH_PP_CONTROL, pp); 2569 2570 pp_on = I915_READ(PCH_PP_ON_DELAYS); 2571 pp_off = I915_READ(PCH_PP_OFF_DELAYS); 2572 pp_div = I915_READ(PCH_PP_DIVISOR); 2573 2574 /* Pull timing values out of registers */ 2575 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> 2576 PANEL_POWER_UP_DELAY_SHIFT; 2577 2578 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> 2579 PANEL_LIGHT_ON_DELAY_SHIFT; 2580 2581 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> 2582 PANEL_LIGHT_OFF_DELAY_SHIFT; 2583 2584 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> 2585 PANEL_POWER_DOWN_DELAY_SHIFT; 2586 2587 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> 2588 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000; 2589 2590 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 2591 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12); 2592 2593 vbt = dev_priv->edp.pps; 2594 2595 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of 2596 * our hw here, which are all in 100usec. */ 2597 spec.t1_t3 = 210 * 10; 2598 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */ 2599 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */ 2600 spec.t10 = 500 * 10; 2601 /* This one is special and actually in units of 100ms, but zero 2602 * based in the hw (so we need to add 100 ms). But the sw vbt 2603 * table multiplies it with 1000 to make it in units of 100usec, 2604 * too. */ 2605 spec.t11_t12 = (510 + 100) * 10; 2606 2607 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 2608 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12); 2609 2610 /* Use the max of the register settings and vbt. If both are 2611 * unset, fall back to the spec limits. */ 2612#define assign_final(field) final.field = (max(cur.field, vbt.field) == 0 ? \ 2613 spec.field : \ 2614 max(cur.field, vbt.field)) 2615 assign_final(t1_t3); 2616 assign_final(t8); 2617 assign_final(t9); 2618 assign_final(t10); 2619 assign_final(t11_t12); 2620#undef assign_final 2621 2622#define get_delay(field) (DIV_ROUND_UP(final.field, 10)) 2623 intel_dp->panel_power_up_delay = get_delay(t1_t3); 2624 intel_dp->backlight_on_delay = get_delay(t8); 2625 intel_dp->backlight_off_delay = get_delay(t9); 2626 intel_dp->panel_power_down_delay = get_delay(t10); 2627 intel_dp->panel_power_cycle_delay = get_delay(t11_t12); 2628#undef get_delay 2629 2630 /* And finally store the new values in the power sequencer. */ 2631 pp_on = (final.t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | 2632 (final.t8 << PANEL_LIGHT_ON_DELAY_SHIFT); 2633 pp_off = (final.t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) | 2634 (final.t10 << PANEL_POWER_DOWN_DELAY_SHIFT); 2635 /* Compute the divisor for the pp clock, simply match the Bspec 2636 * formula. */ 2637 pp_div = ((100 * intel_pch_rawclk(dev))/2 - 1) 2638 << PP_REFERENCE_DIVIDER_SHIFT; 2639 pp_div |= (DIV_ROUND_UP(final.t11_t12, 1000) 2640 << PANEL_POWER_CYCLE_DELAY_SHIFT); 2641 2642 /* Haswell doesn't have any port selection bits for the panel 2643 * power sequencer any more. */ 2644 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { 2645 if (is_cpu_edp(intel_dp)) 2646 pp_on |= PANEL_POWER_PORT_DP_A; 2647 else 2648 pp_on |= PANEL_POWER_PORT_DP_D; 2649 } 2650 2651 I915_WRITE(PCH_PP_ON_DELAYS, pp_on); 2652 I915_WRITE(PCH_PP_OFF_DELAYS, pp_off); 2653 I915_WRITE(PCH_PP_DIVISOR, pp_div); 2654 2655 2656 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n", 2657 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay, 2658 intel_dp->panel_power_cycle_delay); 2659 2660 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", 2661 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); 2662 2663 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", 2664 I915_READ(PCH_PP_ON_DELAYS), 2665 I915_READ(PCH_PP_OFF_DELAYS), 2666 I915_READ(PCH_PP_DIVISOR)); 2667} 2668 2669void 2670intel_dp_init(struct drm_device *dev, int output_reg, enum port port) 2671{ 2672 struct drm_i915_private *dev_priv = dev->dev_private; 2673 struct drm_connector *connector; 2674 struct intel_dp *intel_dp; 2675 struct intel_encoder *intel_encoder; 2676 struct intel_connector *intel_connector; 2677 struct drm_display_mode *fixed_mode = NULL; 2678 const char *name = NULL; 2679 int type; 2680 2681 intel_dp = kzalloc(sizeof(struct intel_dp), GFP_KERNEL); 2682 if (!intel_dp) 2683 return; 2684 2685 intel_dp->output_reg = output_reg; 2686 intel_dp->port = port; 2687 /* Preserve the current hw state. */ 2688 intel_dp->DP = I915_READ(intel_dp->output_reg); 2689 2690 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 2691 if (!intel_connector) { 2692 kfree(intel_dp); 2693 return; 2694 } 2695 intel_encoder = &intel_dp->base; 2696 intel_dp->attached_connector = intel_connector; 2697 2698 if (HAS_PCH_SPLIT(dev) && output_reg == PCH_DP_D) 2699 if (intel_dpd_is_edp(dev)) 2700 intel_dp->is_pch_edp = true; 2701 2702 /* 2703 * FIXME : We need to initialize built-in panels before external panels. 2704 * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup 2705 */ 2706 if (IS_VALLEYVIEW(dev) && output_reg == DP_C) { 2707 type = DRM_MODE_CONNECTOR_eDP; 2708 intel_encoder->type = INTEL_OUTPUT_EDP; 2709 } else if (output_reg == DP_A || is_pch_edp(intel_dp)) { 2710 type = DRM_MODE_CONNECTOR_eDP; 2711 intel_encoder->type = INTEL_OUTPUT_EDP; 2712 } else { 2713 type = DRM_MODE_CONNECTOR_DisplayPort; 2714 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; 2715 } 2716 2717 connector = &intel_connector->base; 2718 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); 2719 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 2720 2721 connector->polled = DRM_CONNECTOR_POLL_HPD; 2722 2723 intel_encoder->cloneable = false; 2724 2725 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, 2726 ironlake_panel_vdd_work); 2727 2728 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); 2729 2730 connector->interlace_allowed = true; 2731 connector->doublescan_allowed = 0; 2732 2733 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, 2734 DRM_MODE_ENCODER_TMDS); 2735 2736 if (IS_HASWELL(dev)) 2737 drm_encoder_helper_add(&intel_encoder->base, 2738 &intel_dp_helper_funcs_hsw); 2739 else 2740 drm_encoder_helper_add(&intel_encoder->base, 2741 &intel_dp_helper_funcs); 2742 2743 intel_connector_attach_encoder(intel_connector, intel_encoder); 2744 drm_sysfs_connector_add(connector); 2745 2746 if (IS_HASWELL(dev)) { 2747 intel_encoder->enable = intel_enable_ddi; 2748 intel_encoder->pre_enable = intel_ddi_pre_enable; 2749 intel_encoder->disable = intel_disable_ddi; 2750 intel_encoder->post_disable = intel_ddi_post_disable; 2751 intel_encoder->get_hw_state = intel_ddi_get_hw_state; 2752 } else { 2753 intel_encoder->enable = intel_enable_dp; 2754 intel_encoder->pre_enable = intel_pre_enable_dp; 2755 intel_encoder->disable = intel_disable_dp; 2756 intel_encoder->post_disable = intel_post_disable_dp; 2757 intel_encoder->get_hw_state = intel_dp_get_hw_state; 2758 } 2759 intel_connector->get_hw_state = intel_connector_get_hw_state; 2760 2761 /* Set up the DDC bus. */ 2762 switch (port) { 2763 case PORT_A: 2764 name = "DPDDC-A"; 2765 break; 2766 case PORT_B: 2767 dev_priv->hotplug_supported_mask |= DPB_HOTPLUG_INT_STATUS; 2768 name = "DPDDC-B"; 2769 break; 2770 case PORT_C: 2771 dev_priv->hotplug_supported_mask |= DPC_HOTPLUG_INT_STATUS; 2772 name = "DPDDC-C"; 2773 break; 2774 case PORT_D: 2775 dev_priv->hotplug_supported_mask |= DPD_HOTPLUG_INT_STATUS; 2776 name = "DPDDC-D"; 2777 break; 2778 default: 2779 WARN(1, "Invalid port %c\n", port_name(port)); 2780 break; 2781 } 2782 2783 if (is_edp(intel_dp)) 2784 intel_dp_init_panel_power_sequencer(dev, intel_dp); 2785 2786 intel_dp_i2c_init(intel_dp, intel_connector, name); 2787 2788 /* Cache DPCD and EDID for edp. */ 2789 if (is_edp(intel_dp)) { 2790 bool ret; 2791 struct drm_display_mode *scan; 2792 struct edid *edid; 2793 2794 ironlake_edp_panel_vdd_on(intel_dp); 2795 ret = intel_dp_get_dpcd(intel_dp); 2796 ironlake_edp_panel_vdd_off(intel_dp, false); 2797 2798 if (ret) { 2799 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) 2800 dev_priv->no_aux_handshake = 2801 intel_dp->dpcd[DP_MAX_DOWNSPREAD] & 2802 DP_NO_AUX_HANDSHAKE_LINK_TRAINING; 2803 } else { 2804 /* if this fails, presume the device is a ghost */ 2805 DRM_INFO("failed to retrieve link info, disabling eDP\n"); 2806 intel_dp_encoder_destroy(&intel_encoder->base); 2807 intel_dp_destroy(connector); 2808 return; 2809 } 2810 2811 ironlake_edp_panel_vdd_on(intel_dp); 2812 edid = drm_get_edid(connector, &intel_dp->adapter); 2813 if (edid) { 2814 if (drm_add_edid_modes(connector, edid)) { 2815 drm_mode_connector_update_edid_property(connector, edid); 2816 drm_edid_to_eld(connector, edid); 2817 } else { 2818 kfree(edid); 2819 edid = ERR_PTR(-EINVAL); 2820 } 2821 } else { 2822 edid = ERR_PTR(-ENOENT); 2823 } 2824 intel_connector->edid = edid; 2825 2826 /* prefer fixed mode from EDID if available */ 2827 list_for_each_entry(scan, &connector->probed_modes, head) { 2828 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) { 2829 fixed_mode = drm_mode_duplicate(dev, scan); 2830 break; 2831 } 2832 } 2833 2834 /* fallback to VBT if available for eDP */ 2835 if (!fixed_mode && dev_priv->lfp_lvds_vbt_mode) { 2836 fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); 2837 if (fixed_mode) 2838 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; 2839 } 2840 2841 ironlake_edp_panel_vdd_off(intel_dp, false); 2842 } 2843 2844 intel_encoder->hot_plug = intel_dp_hot_plug; 2845 2846 if (is_edp(intel_dp)) { 2847 intel_panel_init(&intel_connector->panel, fixed_mode); 2848 intel_panel_setup_backlight(connector); 2849 } 2850 2851 intel_dp_add_properties(intel_dp, connector); 2852 2853 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 2854 * 0xd. Failure to do so will result in spurious interrupts being 2855 * generated on the port when a cable is not attached. 2856 */ 2857 if (IS_G4X(dev) && !IS_GM45(dev)) { 2858 u32 temp = I915_READ(PEG_BAND_GAP_DATA); 2859 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); 2860 } 2861} 2862