intel_dp.c revision 53b41837935a4016852b30a6242a510e6927f9c7
1/* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Keith Packard <keithp@keithp.com> 25 * 26 */ 27 28#include <linux/i2c.h> 29#include <linux/slab.h> 30#include <linux/export.h> 31#include <drm/drmP.h> 32#include <drm/drm_crtc.h> 33#include <drm/drm_crtc_helper.h> 34#include <drm/drm_edid.h> 35#include "intel_drv.h" 36#include <drm/i915_drm.h> 37#include "i915_drv.h" 38 39#define DP_LINK_CHECK_TIMEOUT (10 * 1000) 40 41/** 42 * is_edp - is the given port attached to an eDP panel (either CPU or PCH) 43 * @intel_dp: DP struct 44 * 45 * If a CPU or PCH DP output is attached to an eDP panel, this function 46 * will return true, and false otherwise. 47 */ 48static bool is_edp(struct intel_dp *intel_dp) 49{ 50 return intel_dp->base.type == INTEL_OUTPUT_EDP; 51} 52 53/** 54 * is_pch_edp - is the port on the PCH and attached to an eDP panel? 55 * @intel_dp: DP struct 56 * 57 * Returns true if the given DP struct corresponds to a PCH DP port attached 58 * to an eDP panel, false otherwise. Helpful for determining whether we 59 * may need FDI resources for a given DP output or not. 60 */ 61static bool is_pch_edp(struct intel_dp *intel_dp) 62{ 63 return intel_dp->is_pch_edp; 64} 65 66/** 67 * is_cpu_edp - is the port on the CPU and attached to an eDP panel? 68 * @intel_dp: DP struct 69 * 70 * Returns true if the given DP struct corresponds to a CPU eDP port. 71 */ 72static bool is_cpu_edp(struct intel_dp *intel_dp) 73{ 74 return is_edp(intel_dp) && !is_pch_edp(intel_dp); 75} 76 77static struct intel_dp *intel_attached_dp(struct drm_connector *connector) 78{ 79 return container_of(intel_attached_encoder(connector), 80 struct intel_dp, base); 81} 82 83/** 84 * intel_encoder_is_pch_edp - is the given encoder a PCH attached eDP? 85 * @encoder: DRM encoder 86 * 87 * Return true if @encoder corresponds to a PCH attached eDP panel. Needed 88 * by intel_display.c. 89 */ 90bool intel_encoder_is_pch_edp(struct drm_encoder *encoder) 91{ 92 struct intel_dp *intel_dp; 93 94 if (!encoder) 95 return false; 96 97 intel_dp = enc_to_intel_dp(encoder); 98 99 return is_pch_edp(intel_dp); 100} 101 102static void intel_dp_link_down(struct intel_dp *intel_dp); 103 104void 105intel_edp_link_config(struct intel_encoder *intel_encoder, 106 int *lane_num, int *link_bw) 107{ 108 struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); 109 110 *lane_num = intel_dp->lane_count; 111 *link_bw = drm_dp_bw_code_to_link_rate(intel_dp->link_bw); 112} 113 114int 115intel_edp_target_clock(struct intel_encoder *intel_encoder, 116 struct drm_display_mode *mode) 117{ 118 struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); 119 struct intel_connector *intel_connector = intel_dp->attached_connector; 120 121 if (intel_connector->panel.fixed_mode) 122 return intel_connector->panel.fixed_mode->clock; 123 else 124 return mode->clock; 125} 126 127static int 128intel_dp_max_link_bw(struct intel_dp *intel_dp) 129{ 130 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; 131 132 switch (max_link_bw) { 133 case DP_LINK_BW_1_62: 134 case DP_LINK_BW_2_7: 135 break; 136 default: 137 max_link_bw = DP_LINK_BW_1_62; 138 break; 139 } 140 return max_link_bw; 141} 142 143static int 144intel_dp_link_clock(uint8_t link_bw) 145{ 146 if (link_bw == DP_LINK_BW_2_7) 147 return 270000; 148 else 149 return 162000; 150} 151 152/* 153 * The units on the numbers in the next two are... bizarre. Examples will 154 * make it clearer; this one parallels an example in the eDP spec. 155 * 156 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as: 157 * 158 * 270000 * 1 * 8 / 10 == 216000 159 * 160 * The actual data capacity of that configuration is 2.16Gbit/s, so the 161 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz - 162 * or equivalently, kilopixels per second - so for 1680x1050R it'd be 163 * 119000. At 18bpp that's 2142000 kilobits per second. 164 * 165 * Thus the strange-looking division by 10 in intel_dp_link_required, to 166 * get the result in decakilobits instead of kilobits. 167 */ 168 169static int 170intel_dp_link_required(int pixel_clock, int bpp) 171{ 172 return (pixel_clock * bpp + 9) / 10; 173} 174 175static int 176intel_dp_max_data_rate(int max_link_clock, int max_lanes) 177{ 178 return (max_link_clock * max_lanes * 8) / 10; 179} 180 181static bool 182intel_dp_adjust_dithering(struct intel_dp *intel_dp, 183 struct drm_display_mode *mode, 184 bool adjust_mode) 185{ 186 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); 187 int max_lanes = drm_dp_max_lane_count(intel_dp->dpcd); 188 int max_rate, mode_rate; 189 190 mode_rate = intel_dp_link_required(mode->clock, 24); 191 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); 192 193 if (mode_rate > max_rate) { 194 mode_rate = intel_dp_link_required(mode->clock, 18); 195 if (mode_rate > max_rate) 196 return false; 197 198 if (adjust_mode) 199 mode->private_flags 200 |= INTEL_MODE_DP_FORCE_6BPC; 201 202 return true; 203 } 204 205 return true; 206} 207 208static int 209intel_dp_mode_valid(struct drm_connector *connector, 210 struct drm_display_mode *mode) 211{ 212 struct intel_dp *intel_dp = intel_attached_dp(connector); 213 struct intel_connector *intel_connector = to_intel_connector(connector); 214 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; 215 216 if (is_edp(intel_dp) && fixed_mode) { 217 if (mode->hdisplay > fixed_mode->hdisplay) 218 return MODE_PANEL; 219 220 if (mode->vdisplay > fixed_mode->vdisplay) 221 return MODE_PANEL; 222 } 223 224 if (!intel_dp_adjust_dithering(intel_dp, mode, false)) 225 return MODE_CLOCK_HIGH; 226 227 if (mode->clock < 10000) 228 return MODE_CLOCK_LOW; 229 230 if (mode->flags & DRM_MODE_FLAG_DBLCLK) 231 return MODE_H_ILLEGAL; 232 233 return MODE_OK; 234} 235 236static uint32_t 237pack_aux(uint8_t *src, int src_bytes) 238{ 239 int i; 240 uint32_t v = 0; 241 242 if (src_bytes > 4) 243 src_bytes = 4; 244 for (i = 0; i < src_bytes; i++) 245 v |= ((uint32_t) src[i]) << ((3-i) * 8); 246 return v; 247} 248 249static void 250unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) 251{ 252 int i; 253 if (dst_bytes > 4) 254 dst_bytes = 4; 255 for (i = 0; i < dst_bytes; i++) 256 dst[i] = src >> ((3-i) * 8); 257} 258 259/* hrawclock is 1/4 the FSB frequency */ 260static int 261intel_hrawclk(struct drm_device *dev) 262{ 263 struct drm_i915_private *dev_priv = dev->dev_private; 264 uint32_t clkcfg; 265 266 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */ 267 if (IS_VALLEYVIEW(dev)) 268 return 200; 269 270 clkcfg = I915_READ(CLKCFG); 271 switch (clkcfg & CLKCFG_FSB_MASK) { 272 case CLKCFG_FSB_400: 273 return 100; 274 case CLKCFG_FSB_533: 275 return 133; 276 case CLKCFG_FSB_667: 277 return 166; 278 case CLKCFG_FSB_800: 279 return 200; 280 case CLKCFG_FSB_1067: 281 return 266; 282 case CLKCFG_FSB_1333: 283 return 333; 284 /* these two are just a guess; one of them might be right */ 285 case CLKCFG_FSB_1600: 286 case CLKCFG_FSB_1600_ALT: 287 return 400; 288 default: 289 return 133; 290 } 291} 292 293static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp) 294{ 295 struct drm_device *dev = intel_dp->base.base.dev; 296 struct drm_i915_private *dev_priv = dev->dev_private; 297 298 return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0; 299} 300 301static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp) 302{ 303 struct drm_device *dev = intel_dp->base.base.dev; 304 struct drm_i915_private *dev_priv = dev->dev_private; 305 306 return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0; 307} 308 309static void 310intel_dp_check_edp(struct intel_dp *intel_dp) 311{ 312 struct drm_device *dev = intel_dp->base.base.dev; 313 struct drm_i915_private *dev_priv = dev->dev_private; 314 315 if (!is_edp(intel_dp)) 316 return; 317 if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) { 318 WARN(1, "eDP powered off while attempting aux channel communication.\n"); 319 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n", 320 I915_READ(PCH_PP_STATUS), 321 I915_READ(PCH_PP_CONTROL)); 322 } 323} 324 325static int 326intel_dp_aux_ch(struct intel_dp *intel_dp, 327 uint8_t *send, int send_bytes, 328 uint8_t *recv, int recv_size) 329{ 330 uint32_t output_reg = intel_dp->output_reg; 331 struct drm_device *dev = intel_dp->base.base.dev; 332 struct drm_i915_private *dev_priv = dev->dev_private; 333 uint32_t ch_ctl = output_reg + 0x10; 334 uint32_t ch_data = ch_ctl + 4; 335 int i; 336 int recv_bytes; 337 uint32_t status; 338 uint32_t aux_clock_divider; 339 int try, precharge; 340 341 if (IS_HASWELL(dev)) { 342 switch (intel_dp->port) { 343 case PORT_A: 344 ch_ctl = DPA_AUX_CH_CTL; 345 ch_data = DPA_AUX_CH_DATA1; 346 break; 347 case PORT_B: 348 ch_ctl = PCH_DPB_AUX_CH_CTL; 349 ch_data = PCH_DPB_AUX_CH_DATA1; 350 break; 351 case PORT_C: 352 ch_ctl = PCH_DPC_AUX_CH_CTL; 353 ch_data = PCH_DPC_AUX_CH_DATA1; 354 break; 355 case PORT_D: 356 ch_ctl = PCH_DPD_AUX_CH_CTL; 357 ch_data = PCH_DPD_AUX_CH_DATA1; 358 break; 359 default: 360 BUG(); 361 } 362 } 363 364 intel_dp_check_edp(intel_dp); 365 /* The clock divider is based off the hrawclk, 366 * and would like to run at 2MHz. So, take the 367 * hrawclk value and divide by 2 and use that 368 * 369 * Note that PCH attached eDP panels should use a 125MHz input 370 * clock divider. 371 */ 372 if (is_cpu_edp(intel_dp)) { 373 if (IS_HASWELL(dev)) 374 aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1; 375 else if (IS_VALLEYVIEW(dev)) 376 aux_clock_divider = 100; 377 else if (IS_GEN6(dev) || IS_GEN7(dev)) 378 aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */ 379 else 380 aux_clock_divider = 225; /* eDP input clock at 450Mhz */ 381 } else if (HAS_PCH_SPLIT(dev)) 382 aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2); 383 else 384 aux_clock_divider = intel_hrawclk(dev) / 2; 385 386 if (IS_GEN6(dev)) 387 precharge = 3; 388 else 389 precharge = 5; 390 391 /* Try to wait for any previous AUX channel activity */ 392 for (try = 0; try < 3; try++) { 393 status = I915_READ(ch_ctl); 394 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 395 break; 396 msleep(1); 397 } 398 399 if (try == 3) { 400 WARN(1, "dp_aux_ch not started status 0x%08x\n", 401 I915_READ(ch_ctl)); 402 return -EBUSY; 403 } 404 405 /* Must try at least 3 times according to DP spec */ 406 for (try = 0; try < 5; try++) { 407 /* Load the send data into the aux channel data registers */ 408 for (i = 0; i < send_bytes; i += 4) 409 I915_WRITE(ch_data + i, 410 pack_aux(send + i, send_bytes - i)); 411 412 /* Send the command and wait for it to complete */ 413 I915_WRITE(ch_ctl, 414 DP_AUX_CH_CTL_SEND_BUSY | 415 DP_AUX_CH_CTL_TIME_OUT_400us | 416 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 417 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 418 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | 419 DP_AUX_CH_CTL_DONE | 420 DP_AUX_CH_CTL_TIME_OUT_ERROR | 421 DP_AUX_CH_CTL_RECEIVE_ERROR); 422 for (;;) { 423 status = I915_READ(ch_ctl); 424 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 425 break; 426 udelay(100); 427 } 428 429 /* Clear done status and any errors */ 430 I915_WRITE(ch_ctl, 431 status | 432 DP_AUX_CH_CTL_DONE | 433 DP_AUX_CH_CTL_TIME_OUT_ERROR | 434 DP_AUX_CH_CTL_RECEIVE_ERROR); 435 436 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR | 437 DP_AUX_CH_CTL_RECEIVE_ERROR)) 438 continue; 439 if (status & DP_AUX_CH_CTL_DONE) 440 break; 441 } 442 443 if ((status & DP_AUX_CH_CTL_DONE) == 0) { 444 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status); 445 return -EBUSY; 446 } 447 448 /* Check for timeout or receive error. 449 * Timeouts occur when the sink is not connected 450 */ 451 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 452 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status); 453 return -EIO; 454 } 455 456 /* Timeouts occur when the device isn't connected, so they're 457 * "normal" -- don't fill the kernel log with these */ 458 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { 459 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status); 460 return -ETIMEDOUT; 461 } 462 463 /* Unload any bytes sent back from the other side */ 464 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> 465 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); 466 if (recv_bytes > recv_size) 467 recv_bytes = recv_size; 468 469 for (i = 0; i < recv_bytes; i += 4) 470 unpack_aux(I915_READ(ch_data + i), 471 recv + i, recv_bytes - i); 472 473 return recv_bytes; 474} 475 476/* Write data to the aux channel in native mode */ 477static int 478intel_dp_aux_native_write(struct intel_dp *intel_dp, 479 uint16_t address, uint8_t *send, int send_bytes) 480{ 481 int ret; 482 uint8_t msg[20]; 483 int msg_bytes; 484 uint8_t ack; 485 486 intel_dp_check_edp(intel_dp); 487 if (send_bytes > 16) 488 return -1; 489 msg[0] = AUX_NATIVE_WRITE << 4; 490 msg[1] = address >> 8; 491 msg[2] = address & 0xff; 492 msg[3] = send_bytes - 1; 493 memcpy(&msg[4], send, send_bytes); 494 msg_bytes = send_bytes + 4; 495 for (;;) { 496 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1); 497 if (ret < 0) 498 return ret; 499 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) 500 break; 501 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) 502 udelay(100); 503 else 504 return -EIO; 505 } 506 return send_bytes; 507} 508 509/* Write a single byte to the aux channel in native mode */ 510static int 511intel_dp_aux_native_write_1(struct intel_dp *intel_dp, 512 uint16_t address, uint8_t byte) 513{ 514 return intel_dp_aux_native_write(intel_dp, address, &byte, 1); 515} 516 517/* read bytes from a native aux channel */ 518static int 519intel_dp_aux_native_read(struct intel_dp *intel_dp, 520 uint16_t address, uint8_t *recv, int recv_bytes) 521{ 522 uint8_t msg[4]; 523 int msg_bytes; 524 uint8_t reply[20]; 525 int reply_bytes; 526 uint8_t ack; 527 int ret; 528 529 intel_dp_check_edp(intel_dp); 530 msg[0] = AUX_NATIVE_READ << 4; 531 msg[1] = address >> 8; 532 msg[2] = address & 0xff; 533 msg[3] = recv_bytes - 1; 534 535 msg_bytes = 4; 536 reply_bytes = recv_bytes + 1; 537 538 for (;;) { 539 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, 540 reply, reply_bytes); 541 if (ret == 0) 542 return -EPROTO; 543 if (ret < 0) 544 return ret; 545 ack = reply[0]; 546 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) { 547 memcpy(recv, reply + 1, ret - 1); 548 return ret - 1; 549 } 550 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) 551 udelay(100); 552 else 553 return -EIO; 554 } 555} 556 557static int 558intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, 559 uint8_t write_byte, uint8_t *read_byte) 560{ 561 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; 562 struct intel_dp *intel_dp = container_of(adapter, 563 struct intel_dp, 564 adapter); 565 uint16_t address = algo_data->address; 566 uint8_t msg[5]; 567 uint8_t reply[2]; 568 unsigned retry; 569 int msg_bytes; 570 int reply_bytes; 571 int ret; 572 573 intel_dp_check_edp(intel_dp); 574 /* Set up the command byte */ 575 if (mode & MODE_I2C_READ) 576 msg[0] = AUX_I2C_READ << 4; 577 else 578 msg[0] = AUX_I2C_WRITE << 4; 579 580 if (!(mode & MODE_I2C_STOP)) 581 msg[0] |= AUX_I2C_MOT << 4; 582 583 msg[1] = address >> 8; 584 msg[2] = address; 585 586 switch (mode) { 587 case MODE_I2C_WRITE: 588 msg[3] = 0; 589 msg[4] = write_byte; 590 msg_bytes = 5; 591 reply_bytes = 1; 592 break; 593 case MODE_I2C_READ: 594 msg[3] = 0; 595 msg_bytes = 4; 596 reply_bytes = 2; 597 break; 598 default: 599 msg_bytes = 3; 600 reply_bytes = 1; 601 break; 602 } 603 604 for (retry = 0; retry < 5; retry++) { 605 ret = intel_dp_aux_ch(intel_dp, 606 msg, msg_bytes, 607 reply, reply_bytes); 608 if (ret < 0) { 609 DRM_DEBUG_KMS("aux_ch failed %d\n", ret); 610 return ret; 611 } 612 613 switch (reply[0] & AUX_NATIVE_REPLY_MASK) { 614 case AUX_NATIVE_REPLY_ACK: 615 /* I2C-over-AUX Reply field is only valid 616 * when paired with AUX ACK. 617 */ 618 break; 619 case AUX_NATIVE_REPLY_NACK: 620 DRM_DEBUG_KMS("aux_ch native nack\n"); 621 return -EREMOTEIO; 622 case AUX_NATIVE_REPLY_DEFER: 623 udelay(100); 624 continue; 625 default: 626 DRM_ERROR("aux_ch invalid native reply 0x%02x\n", 627 reply[0]); 628 return -EREMOTEIO; 629 } 630 631 switch (reply[0] & AUX_I2C_REPLY_MASK) { 632 case AUX_I2C_REPLY_ACK: 633 if (mode == MODE_I2C_READ) { 634 *read_byte = reply[1]; 635 } 636 return reply_bytes - 1; 637 case AUX_I2C_REPLY_NACK: 638 DRM_DEBUG_KMS("aux_i2c nack\n"); 639 return -EREMOTEIO; 640 case AUX_I2C_REPLY_DEFER: 641 DRM_DEBUG_KMS("aux_i2c defer\n"); 642 udelay(100); 643 break; 644 default: 645 DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]); 646 return -EREMOTEIO; 647 } 648 } 649 650 DRM_ERROR("too many retries, giving up\n"); 651 return -EREMOTEIO; 652} 653 654static int 655intel_dp_i2c_init(struct intel_dp *intel_dp, 656 struct intel_connector *intel_connector, const char *name) 657{ 658 int ret; 659 660 DRM_DEBUG_KMS("i2c_init %s\n", name); 661 intel_dp->algo.running = false; 662 intel_dp->algo.address = 0; 663 intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch; 664 665 memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter)); 666 intel_dp->adapter.owner = THIS_MODULE; 667 intel_dp->adapter.class = I2C_CLASS_DDC; 668 strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1); 669 intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0'; 670 intel_dp->adapter.algo_data = &intel_dp->algo; 671 intel_dp->adapter.dev.parent = &intel_connector->base.kdev; 672 673 ironlake_edp_panel_vdd_on(intel_dp); 674 ret = i2c_dp_aux_add_bus(&intel_dp->adapter); 675 ironlake_edp_panel_vdd_off(intel_dp, false); 676 return ret; 677} 678 679static bool 680intel_dp_mode_fixup(struct drm_encoder *encoder, 681 const struct drm_display_mode *mode, 682 struct drm_display_mode *adjusted_mode) 683{ 684 struct drm_device *dev = encoder->dev; 685 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 686 struct intel_connector *intel_connector = intel_dp->attached_connector; 687 int lane_count, clock; 688 int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd); 689 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; 690 int bpp, mode_rate; 691 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; 692 693 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 694 intel_fixed_panel_mode(intel_connector->panel.fixed_mode, 695 adjusted_mode); 696 intel_pch_panel_fitting(dev, 697 intel_connector->panel.fitting_mode, 698 mode, adjusted_mode); 699 } 700 701 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 702 return false; 703 704 DRM_DEBUG_KMS("DP link computation with max lane count %i " 705 "max bw %02x pixel clock %iKHz\n", 706 max_lane_count, bws[max_clock], adjusted_mode->clock); 707 708 if (!intel_dp_adjust_dithering(intel_dp, adjusted_mode, true)) 709 return false; 710 711 bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24; 712 mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp); 713 714 for (clock = 0; clock <= max_clock; clock++) { 715 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { 716 int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); 717 718 if (mode_rate <= link_avail) { 719 intel_dp->link_bw = bws[clock]; 720 intel_dp->lane_count = lane_count; 721 adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw); 722 DRM_DEBUG_KMS("DP link bw %02x lane " 723 "count %d clock %d bpp %d\n", 724 intel_dp->link_bw, intel_dp->lane_count, 725 adjusted_mode->clock, bpp); 726 DRM_DEBUG_KMS("DP link bw required %i available %i\n", 727 mode_rate, link_avail); 728 return true; 729 } 730 } 731 } 732 733 return false; 734} 735 736struct intel_dp_m_n { 737 uint32_t tu; 738 uint32_t gmch_m; 739 uint32_t gmch_n; 740 uint32_t link_m; 741 uint32_t link_n; 742}; 743 744static void 745intel_reduce_ratio(uint32_t *num, uint32_t *den) 746{ 747 while (*num > 0xffffff || *den > 0xffffff) { 748 *num >>= 1; 749 *den >>= 1; 750 } 751} 752 753static void 754intel_dp_compute_m_n(int bpp, 755 int nlanes, 756 int pixel_clock, 757 int link_clock, 758 struct intel_dp_m_n *m_n) 759{ 760 m_n->tu = 64; 761 m_n->gmch_m = (pixel_clock * bpp) >> 3; 762 m_n->gmch_n = link_clock * nlanes; 763 intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); 764 m_n->link_m = pixel_clock; 765 m_n->link_n = link_clock; 766 intel_reduce_ratio(&m_n->link_m, &m_n->link_n); 767} 768 769void 770intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, 771 struct drm_display_mode *adjusted_mode) 772{ 773 struct drm_device *dev = crtc->dev; 774 struct intel_encoder *encoder; 775 struct drm_i915_private *dev_priv = dev->dev_private; 776 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 777 int lane_count = 4; 778 struct intel_dp_m_n m_n; 779 int pipe = intel_crtc->pipe; 780 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; 781 782 /* 783 * Find the lane count in the intel_encoder private 784 */ 785 for_each_encoder_on_crtc(dev, crtc, encoder) { 786 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 787 788 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT || 789 intel_dp->base.type == INTEL_OUTPUT_EDP) 790 { 791 lane_count = intel_dp->lane_count; 792 break; 793 } 794 } 795 796 /* 797 * Compute the GMCH and Link ratios. The '3' here is 798 * the number of bytes_per_pixel post-LUT, which we always 799 * set up for 8-bits of R/G/B, or 3 bytes total. 800 */ 801 intel_dp_compute_m_n(intel_crtc->bpp, lane_count, 802 mode->clock, adjusted_mode->clock, &m_n); 803 804 if (IS_HASWELL(dev)) { 805 I915_WRITE(PIPE_DATA_M1(cpu_transcoder), 806 TU_SIZE(m_n.tu) | m_n.gmch_m); 807 I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n); 808 I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m); 809 I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n); 810 } else if (HAS_PCH_SPLIT(dev)) { 811 I915_WRITE(TRANSDATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); 812 I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n); 813 I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m); 814 I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n); 815 } else if (IS_VALLEYVIEW(dev)) { 816 I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); 817 I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n); 818 I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m); 819 I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n); 820 } else { 821 I915_WRITE(PIPE_GMCH_DATA_M(pipe), 822 TU_SIZE(m_n.tu) | m_n.gmch_m); 823 I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n); 824 I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m); 825 I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n); 826 } 827} 828 829void intel_dp_init_link_config(struct intel_dp *intel_dp) 830{ 831 memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); 832 intel_dp->link_configuration[0] = intel_dp->link_bw; 833 intel_dp->link_configuration[1] = intel_dp->lane_count; 834 intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B; 835 /* 836 * Check for DPCD version > 1.1 and enhanced framing support 837 */ 838 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 839 (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) { 840 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; 841 } 842} 843 844static void 845intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, 846 struct drm_display_mode *adjusted_mode) 847{ 848 struct drm_device *dev = encoder->dev; 849 struct drm_i915_private *dev_priv = dev->dev_private; 850 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 851 struct drm_crtc *crtc = intel_dp->base.base.crtc; 852 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 853 854 /* 855 * There are four kinds of DP registers: 856 * 857 * IBX PCH 858 * SNB CPU 859 * IVB CPU 860 * CPT PCH 861 * 862 * IBX PCH and CPU are the same for almost everything, 863 * except that the CPU DP PLL is configured in this 864 * register 865 * 866 * CPT PCH is quite different, having many bits moved 867 * to the TRANS_DP_CTL register instead. That 868 * configuration happens (oddly) in ironlake_pch_enable 869 */ 870 871 /* Preserve the BIOS-computed detected bit. This is 872 * supposed to be read-only. 873 */ 874 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED; 875 876 /* Handle DP bits in common between all three register formats */ 877 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 878 879 switch (intel_dp->lane_count) { 880 case 1: 881 intel_dp->DP |= DP_PORT_WIDTH_1; 882 break; 883 case 2: 884 intel_dp->DP |= DP_PORT_WIDTH_2; 885 break; 886 case 4: 887 intel_dp->DP |= DP_PORT_WIDTH_4; 888 break; 889 } 890 if (intel_dp->has_audio) { 891 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n", 892 pipe_name(intel_crtc->pipe)); 893 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 894 intel_write_eld(encoder, adjusted_mode); 895 } 896 897 intel_dp_init_link_config(intel_dp); 898 899 /* Split out the IBX/CPU vs CPT settings */ 900 901 if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) { 902 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 903 intel_dp->DP |= DP_SYNC_HS_HIGH; 904 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 905 intel_dp->DP |= DP_SYNC_VS_HIGH; 906 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 907 908 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) 909 intel_dp->DP |= DP_ENHANCED_FRAMING; 910 911 intel_dp->DP |= intel_crtc->pipe << 29; 912 913 /* don't miss out required setting for eDP */ 914 if (adjusted_mode->clock < 200000) 915 intel_dp->DP |= DP_PLL_FREQ_160MHZ; 916 else 917 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 918 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { 919 intel_dp->DP |= intel_dp->color_range; 920 921 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 922 intel_dp->DP |= DP_SYNC_HS_HIGH; 923 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 924 intel_dp->DP |= DP_SYNC_VS_HIGH; 925 intel_dp->DP |= DP_LINK_TRAIN_OFF; 926 927 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) 928 intel_dp->DP |= DP_ENHANCED_FRAMING; 929 930 if (intel_crtc->pipe == 1) 931 intel_dp->DP |= DP_PIPEB_SELECT; 932 933 if (is_cpu_edp(intel_dp)) { 934 /* don't miss out required setting for eDP */ 935 if (adjusted_mode->clock < 200000) 936 intel_dp->DP |= DP_PLL_FREQ_160MHZ; 937 else 938 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 939 } 940 } else { 941 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 942 } 943} 944 945#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 946#define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) 947 948#define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 949#define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 950 951#define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) 952#define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 953 954static void ironlake_wait_panel_status(struct intel_dp *intel_dp, 955 u32 mask, 956 u32 value) 957{ 958 struct drm_device *dev = intel_dp->base.base.dev; 959 struct drm_i915_private *dev_priv = dev->dev_private; 960 961 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n", 962 mask, value, 963 I915_READ(PCH_PP_STATUS), 964 I915_READ(PCH_PP_CONTROL)); 965 966 if (_wait_for((I915_READ(PCH_PP_STATUS) & mask) == value, 5000, 10)) { 967 DRM_ERROR("Panel status timeout: status %08x control %08x\n", 968 I915_READ(PCH_PP_STATUS), 969 I915_READ(PCH_PP_CONTROL)); 970 } 971} 972 973static void ironlake_wait_panel_on(struct intel_dp *intel_dp) 974{ 975 DRM_DEBUG_KMS("Wait for panel power on\n"); 976 ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); 977} 978 979static void ironlake_wait_panel_off(struct intel_dp *intel_dp) 980{ 981 DRM_DEBUG_KMS("Wait for panel power off time\n"); 982 ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); 983} 984 985static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp) 986{ 987 DRM_DEBUG_KMS("Wait for panel power cycle\n"); 988 ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); 989} 990 991 992/* Read the current pp_control value, unlocking the register if it 993 * is locked 994 */ 995 996static u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv) 997{ 998 u32 control = I915_READ(PCH_PP_CONTROL); 999 1000 control &= ~PANEL_UNLOCK_MASK; 1001 control |= PANEL_UNLOCK_REGS; 1002 return control; 1003} 1004 1005void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) 1006{ 1007 struct drm_device *dev = intel_dp->base.base.dev; 1008 struct drm_i915_private *dev_priv = dev->dev_private; 1009 u32 pp; 1010 1011 if (!is_edp(intel_dp)) 1012 return; 1013 DRM_DEBUG_KMS("Turn eDP VDD on\n"); 1014 1015 WARN(intel_dp->want_panel_vdd, 1016 "eDP VDD already requested on\n"); 1017 1018 intel_dp->want_panel_vdd = true; 1019 1020 if (ironlake_edp_have_panel_vdd(intel_dp)) { 1021 DRM_DEBUG_KMS("eDP VDD already on\n"); 1022 return; 1023 } 1024 1025 if (!ironlake_edp_have_panel_power(intel_dp)) 1026 ironlake_wait_panel_power_cycle(intel_dp); 1027 1028 pp = ironlake_get_pp_control(dev_priv); 1029 pp |= EDP_FORCE_VDD; 1030 I915_WRITE(PCH_PP_CONTROL, pp); 1031 POSTING_READ(PCH_PP_CONTROL); 1032 DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n", 1033 I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL)); 1034 1035 /* 1036 * If the panel wasn't on, delay before accessing aux channel 1037 */ 1038 if (!ironlake_edp_have_panel_power(intel_dp)) { 1039 DRM_DEBUG_KMS("eDP was not running\n"); 1040 msleep(intel_dp->panel_power_up_delay); 1041 } 1042} 1043 1044static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp) 1045{ 1046 struct drm_device *dev = intel_dp->base.base.dev; 1047 struct drm_i915_private *dev_priv = dev->dev_private; 1048 u32 pp; 1049 1050 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) { 1051 pp = ironlake_get_pp_control(dev_priv); 1052 pp &= ~EDP_FORCE_VDD; 1053 I915_WRITE(PCH_PP_CONTROL, pp); 1054 POSTING_READ(PCH_PP_CONTROL); 1055 1056 /* Make sure sequencer is idle before allowing subsequent activity */ 1057 DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n", 1058 I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL)); 1059 1060 msleep(intel_dp->panel_power_down_delay); 1061 } 1062} 1063 1064static void ironlake_panel_vdd_work(struct work_struct *__work) 1065{ 1066 struct intel_dp *intel_dp = container_of(to_delayed_work(__work), 1067 struct intel_dp, panel_vdd_work); 1068 struct drm_device *dev = intel_dp->base.base.dev; 1069 1070 mutex_lock(&dev->mode_config.mutex); 1071 ironlake_panel_vdd_off_sync(intel_dp); 1072 mutex_unlock(&dev->mode_config.mutex); 1073} 1074 1075void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) 1076{ 1077 if (!is_edp(intel_dp)) 1078 return; 1079 1080 DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd); 1081 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on"); 1082 1083 intel_dp->want_panel_vdd = false; 1084 1085 if (sync) { 1086 ironlake_panel_vdd_off_sync(intel_dp); 1087 } else { 1088 /* 1089 * Queue the timer to fire a long 1090 * time from now (relative to the power down delay) 1091 * to keep the panel power up across a sequence of operations 1092 */ 1093 schedule_delayed_work(&intel_dp->panel_vdd_work, 1094 msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5)); 1095 } 1096} 1097 1098void ironlake_edp_panel_on(struct intel_dp *intel_dp) 1099{ 1100 struct drm_device *dev = intel_dp->base.base.dev; 1101 struct drm_i915_private *dev_priv = dev->dev_private; 1102 u32 pp; 1103 1104 if (!is_edp(intel_dp)) 1105 return; 1106 1107 DRM_DEBUG_KMS("Turn eDP power on\n"); 1108 1109 if (ironlake_edp_have_panel_power(intel_dp)) { 1110 DRM_DEBUG_KMS("eDP power already on\n"); 1111 return; 1112 } 1113 1114 ironlake_wait_panel_power_cycle(intel_dp); 1115 1116 pp = ironlake_get_pp_control(dev_priv); 1117 if (IS_GEN5(dev)) { 1118 /* ILK workaround: disable reset around power sequence */ 1119 pp &= ~PANEL_POWER_RESET; 1120 I915_WRITE(PCH_PP_CONTROL, pp); 1121 POSTING_READ(PCH_PP_CONTROL); 1122 } 1123 1124 pp |= POWER_TARGET_ON; 1125 if (!IS_GEN5(dev)) 1126 pp |= PANEL_POWER_RESET; 1127 1128 I915_WRITE(PCH_PP_CONTROL, pp); 1129 POSTING_READ(PCH_PP_CONTROL); 1130 1131 ironlake_wait_panel_on(intel_dp); 1132 1133 if (IS_GEN5(dev)) { 1134 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 1135 I915_WRITE(PCH_PP_CONTROL, pp); 1136 POSTING_READ(PCH_PP_CONTROL); 1137 } 1138} 1139 1140void ironlake_edp_panel_off(struct intel_dp *intel_dp) 1141{ 1142 struct drm_device *dev = intel_dp->base.base.dev; 1143 struct drm_i915_private *dev_priv = dev->dev_private; 1144 u32 pp; 1145 1146 if (!is_edp(intel_dp)) 1147 return; 1148 1149 DRM_DEBUG_KMS("Turn eDP power off\n"); 1150 1151 WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n"); 1152 1153 pp = ironlake_get_pp_control(dev_priv); 1154 /* We need to switch off panel power _and_ force vdd, for otherwise some 1155 * panels get very unhappy and cease to work. */ 1156 pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE); 1157 I915_WRITE(PCH_PP_CONTROL, pp); 1158 POSTING_READ(PCH_PP_CONTROL); 1159 1160 intel_dp->want_panel_vdd = false; 1161 1162 ironlake_wait_panel_off(intel_dp); 1163} 1164 1165void ironlake_edp_backlight_on(struct intel_dp *intel_dp) 1166{ 1167 struct drm_device *dev = intel_dp->base.base.dev; 1168 struct drm_i915_private *dev_priv = dev->dev_private; 1169 int pipe = to_intel_crtc(intel_dp->base.base.crtc)->pipe; 1170 u32 pp; 1171 1172 if (!is_edp(intel_dp)) 1173 return; 1174 1175 DRM_DEBUG_KMS("\n"); 1176 /* 1177 * If we enable the backlight right away following a panel power 1178 * on, we may see slight flicker as the panel syncs with the eDP 1179 * link. So delay a bit to make sure the image is solid before 1180 * allowing it to appear. 1181 */ 1182 msleep(intel_dp->backlight_on_delay); 1183 pp = ironlake_get_pp_control(dev_priv); 1184 pp |= EDP_BLC_ENABLE; 1185 I915_WRITE(PCH_PP_CONTROL, pp); 1186 POSTING_READ(PCH_PP_CONTROL); 1187 1188 intel_panel_enable_backlight(dev, pipe); 1189} 1190 1191void ironlake_edp_backlight_off(struct intel_dp *intel_dp) 1192{ 1193 struct drm_device *dev = intel_dp->base.base.dev; 1194 struct drm_i915_private *dev_priv = dev->dev_private; 1195 u32 pp; 1196 1197 if (!is_edp(intel_dp)) 1198 return; 1199 1200 intel_panel_disable_backlight(dev); 1201 1202 DRM_DEBUG_KMS("\n"); 1203 pp = ironlake_get_pp_control(dev_priv); 1204 pp &= ~EDP_BLC_ENABLE; 1205 I915_WRITE(PCH_PP_CONTROL, pp); 1206 POSTING_READ(PCH_PP_CONTROL); 1207 msleep(intel_dp->backlight_off_delay); 1208} 1209 1210static void ironlake_edp_pll_on(struct intel_dp *intel_dp) 1211{ 1212 struct drm_device *dev = intel_dp->base.base.dev; 1213 struct drm_crtc *crtc = intel_dp->base.base.crtc; 1214 struct drm_i915_private *dev_priv = dev->dev_private; 1215 u32 dpa_ctl; 1216 1217 assert_pipe_disabled(dev_priv, 1218 to_intel_crtc(crtc)->pipe); 1219 1220 DRM_DEBUG_KMS("\n"); 1221 dpa_ctl = I915_READ(DP_A); 1222 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n"); 1223 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n"); 1224 1225 /* We don't adjust intel_dp->DP while tearing down the link, to 1226 * facilitate link retraining (e.g. after hotplug). Hence clear all 1227 * enable bits here to ensure that we don't enable too much. */ 1228 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE); 1229 intel_dp->DP |= DP_PLL_ENABLE; 1230 I915_WRITE(DP_A, intel_dp->DP); 1231 POSTING_READ(DP_A); 1232 udelay(200); 1233} 1234 1235static void ironlake_edp_pll_off(struct intel_dp *intel_dp) 1236{ 1237 struct drm_device *dev = intel_dp->base.base.dev; 1238 struct drm_crtc *crtc = intel_dp->base.base.crtc; 1239 struct drm_i915_private *dev_priv = dev->dev_private; 1240 u32 dpa_ctl; 1241 1242 assert_pipe_disabled(dev_priv, 1243 to_intel_crtc(crtc)->pipe); 1244 1245 dpa_ctl = I915_READ(DP_A); 1246 WARN((dpa_ctl & DP_PLL_ENABLE) == 0, 1247 "dp pll off, should be on\n"); 1248 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n"); 1249 1250 /* We can't rely on the value tracked for the DP register in 1251 * intel_dp->DP because link_down must not change that (otherwise link 1252 * re-training will fail. */ 1253 dpa_ctl &= ~DP_PLL_ENABLE; 1254 I915_WRITE(DP_A, dpa_ctl); 1255 POSTING_READ(DP_A); 1256 udelay(200); 1257} 1258 1259/* If the sink supports it, try to set the power state appropriately */ 1260void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) 1261{ 1262 int ret, i; 1263 1264 /* Should have a valid DPCD by this point */ 1265 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 1266 return; 1267 1268 if (mode != DRM_MODE_DPMS_ON) { 1269 ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER, 1270 DP_SET_POWER_D3); 1271 if (ret != 1) 1272 DRM_DEBUG_DRIVER("failed to write sink power state\n"); 1273 } else { 1274 /* 1275 * When turning on, we need to retry for 1ms to give the sink 1276 * time to wake up. 1277 */ 1278 for (i = 0; i < 3; i++) { 1279 ret = intel_dp_aux_native_write_1(intel_dp, 1280 DP_SET_POWER, 1281 DP_SET_POWER_D0); 1282 if (ret == 1) 1283 break; 1284 msleep(1); 1285 } 1286 } 1287} 1288 1289static bool intel_dp_get_hw_state(struct intel_encoder *encoder, 1290 enum pipe *pipe) 1291{ 1292 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1293 struct drm_device *dev = encoder->base.dev; 1294 struct drm_i915_private *dev_priv = dev->dev_private; 1295 u32 tmp = I915_READ(intel_dp->output_reg); 1296 1297 if (!(tmp & DP_PORT_EN)) 1298 return false; 1299 1300 if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) { 1301 *pipe = PORT_TO_PIPE_CPT(tmp); 1302 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { 1303 *pipe = PORT_TO_PIPE(tmp); 1304 } else { 1305 u32 trans_sel; 1306 u32 trans_dp; 1307 int i; 1308 1309 switch (intel_dp->output_reg) { 1310 case PCH_DP_B: 1311 trans_sel = TRANS_DP_PORT_SEL_B; 1312 break; 1313 case PCH_DP_C: 1314 trans_sel = TRANS_DP_PORT_SEL_C; 1315 break; 1316 case PCH_DP_D: 1317 trans_sel = TRANS_DP_PORT_SEL_D; 1318 break; 1319 default: 1320 return true; 1321 } 1322 1323 for_each_pipe(i) { 1324 trans_dp = I915_READ(TRANS_DP_CTL(i)); 1325 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) { 1326 *pipe = i; 1327 return true; 1328 } 1329 } 1330 } 1331 1332 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", intel_dp->output_reg); 1333 1334 return true; 1335} 1336 1337static void intel_disable_dp(struct intel_encoder *encoder) 1338{ 1339 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1340 1341 /* Make sure the panel is off before trying to change the mode. But also 1342 * ensure that we have vdd while we switch off the panel. */ 1343 ironlake_edp_panel_vdd_on(intel_dp); 1344 ironlake_edp_backlight_off(intel_dp); 1345 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1346 ironlake_edp_panel_off(intel_dp); 1347 1348 /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */ 1349 if (!is_cpu_edp(intel_dp)) 1350 intel_dp_link_down(intel_dp); 1351} 1352 1353static void intel_post_disable_dp(struct intel_encoder *encoder) 1354{ 1355 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1356 1357 if (is_cpu_edp(intel_dp)) { 1358 intel_dp_link_down(intel_dp); 1359 ironlake_edp_pll_off(intel_dp); 1360 } 1361} 1362 1363static void intel_enable_dp(struct intel_encoder *encoder) 1364{ 1365 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1366 struct drm_device *dev = encoder->base.dev; 1367 struct drm_i915_private *dev_priv = dev->dev_private; 1368 uint32_t dp_reg = I915_READ(intel_dp->output_reg); 1369 1370 if (WARN_ON(dp_reg & DP_PORT_EN)) 1371 return; 1372 1373 ironlake_edp_panel_vdd_on(intel_dp); 1374 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1375 intel_dp_start_link_train(intel_dp); 1376 ironlake_edp_panel_on(intel_dp); 1377 ironlake_edp_panel_vdd_off(intel_dp, true); 1378 intel_dp_complete_link_train(intel_dp); 1379 ironlake_edp_backlight_on(intel_dp); 1380} 1381 1382static void intel_pre_enable_dp(struct intel_encoder *encoder) 1383{ 1384 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1385 1386 if (is_cpu_edp(intel_dp)) 1387 ironlake_edp_pll_on(intel_dp); 1388} 1389 1390/* 1391 * Native read with retry for link status and receiver capability reads for 1392 * cases where the sink may still be asleep. 1393 */ 1394static bool 1395intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address, 1396 uint8_t *recv, int recv_bytes) 1397{ 1398 int ret, i; 1399 1400 /* 1401 * Sinks are *supposed* to come up within 1ms from an off state, 1402 * but we're also supposed to retry 3 times per the spec. 1403 */ 1404 for (i = 0; i < 3; i++) { 1405 ret = intel_dp_aux_native_read(intel_dp, address, recv, 1406 recv_bytes); 1407 if (ret == recv_bytes) 1408 return true; 1409 msleep(1); 1410 } 1411 1412 return false; 1413} 1414 1415/* 1416 * Fetch AUX CH registers 0x202 - 0x207 which contain 1417 * link status information 1418 */ 1419static bool 1420intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 1421{ 1422 return intel_dp_aux_native_read_retry(intel_dp, 1423 DP_LANE0_1_STATUS, 1424 link_status, 1425 DP_LINK_STATUS_SIZE); 1426} 1427 1428#if 0 1429static char *voltage_names[] = { 1430 "0.4V", "0.6V", "0.8V", "1.2V" 1431}; 1432static char *pre_emph_names[] = { 1433 "0dB", "3.5dB", "6dB", "9.5dB" 1434}; 1435static char *link_train_names[] = { 1436 "pattern 1", "pattern 2", "idle", "off" 1437}; 1438#endif 1439 1440/* 1441 * These are source-specific values; current Intel hardware supports 1442 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB 1443 */ 1444 1445static uint8_t 1446intel_dp_voltage_max(struct intel_dp *intel_dp) 1447{ 1448 struct drm_device *dev = intel_dp->base.base.dev; 1449 1450 if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) 1451 return DP_TRAIN_VOLTAGE_SWING_800; 1452 else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) 1453 return DP_TRAIN_VOLTAGE_SWING_1200; 1454 else 1455 return DP_TRAIN_VOLTAGE_SWING_800; 1456} 1457 1458static uint8_t 1459intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) 1460{ 1461 struct drm_device *dev = intel_dp->base.base.dev; 1462 1463 if (IS_HASWELL(dev)) { 1464 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1465 case DP_TRAIN_VOLTAGE_SWING_400: 1466 return DP_TRAIN_PRE_EMPHASIS_9_5; 1467 case DP_TRAIN_VOLTAGE_SWING_600: 1468 return DP_TRAIN_PRE_EMPHASIS_6; 1469 case DP_TRAIN_VOLTAGE_SWING_800: 1470 return DP_TRAIN_PRE_EMPHASIS_3_5; 1471 case DP_TRAIN_VOLTAGE_SWING_1200: 1472 default: 1473 return DP_TRAIN_PRE_EMPHASIS_0; 1474 } 1475 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { 1476 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1477 case DP_TRAIN_VOLTAGE_SWING_400: 1478 return DP_TRAIN_PRE_EMPHASIS_6; 1479 case DP_TRAIN_VOLTAGE_SWING_600: 1480 case DP_TRAIN_VOLTAGE_SWING_800: 1481 return DP_TRAIN_PRE_EMPHASIS_3_5; 1482 default: 1483 return DP_TRAIN_PRE_EMPHASIS_0; 1484 } 1485 } else { 1486 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1487 case DP_TRAIN_VOLTAGE_SWING_400: 1488 return DP_TRAIN_PRE_EMPHASIS_6; 1489 case DP_TRAIN_VOLTAGE_SWING_600: 1490 return DP_TRAIN_PRE_EMPHASIS_6; 1491 case DP_TRAIN_VOLTAGE_SWING_800: 1492 return DP_TRAIN_PRE_EMPHASIS_3_5; 1493 case DP_TRAIN_VOLTAGE_SWING_1200: 1494 default: 1495 return DP_TRAIN_PRE_EMPHASIS_0; 1496 } 1497 } 1498} 1499 1500static void 1501intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 1502{ 1503 uint8_t v = 0; 1504 uint8_t p = 0; 1505 int lane; 1506 uint8_t voltage_max; 1507 uint8_t preemph_max; 1508 1509 for (lane = 0; lane < intel_dp->lane_count; lane++) { 1510 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane); 1511 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); 1512 1513 if (this_v > v) 1514 v = this_v; 1515 if (this_p > p) 1516 p = this_p; 1517 } 1518 1519 voltage_max = intel_dp_voltage_max(intel_dp); 1520 if (v >= voltage_max) 1521 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; 1522 1523 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v); 1524 if (p >= preemph_max) 1525 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; 1526 1527 for (lane = 0; lane < 4; lane++) 1528 intel_dp->train_set[lane] = v | p; 1529} 1530 1531static uint32_t 1532intel_dp_signal_levels(uint8_t train_set) 1533{ 1534 uint32_t signal_levels = 0; 1535 1536 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 1537 case DP_TRAIN_VOLTAGE_SWING_400: 1538 default: 1539 signal_levels |= DP_VOLTAGE_0_4; 1540 break; 1541 case DP_TRAIN_VOLTAGE_SWING_600: 1542 signal_levels |= DP_VOLTAGE_0_6; 1543 break; 1544 case DP_TRAIN_VOLTAGE_SWING_800: 1545 signal_levels |= DP_VOLTAGE_0_8; 1546 break; 1547 case DP_TRAIN_VOLTAGE_SWING_1200: 1548 signal_levels |= DP_VOLTAGE_1_2; 1549 break; 1550 } 1551 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 1552 case DP_TRAIN_PRE_EMPHASIS_0: 1553 default: 1554 signal_levels |= DP_PRE_EMPHASIS_0; 1555 break; 1556 case DP_TRAIN_PRE_EMPHASIS_3_5: 1557 signal_levels |= DP_PRE_EMPHASIS_3_5; 1558 break; 1559 case DP_TRAIN_PRE_EMPHASIS_6: 1560 signal_levels |= DP_PRE_EMPHASIS_6; 1561 break; 1562 case DP_TRAIN_PRE_EMPHASIS_9_5: 1563 signal_levels |= DP_PRE_EMPHASIS_9_5; 1564 break; 1565 } 1566 return signal_levels; 1567} 1568 1569/* Gen6's DP voltage swing and pre-emphasis control */ 1570static uint32_t 1571intel_gen6_edp_signal_levels(uint8_t train_set) 1572{ 1573 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 1574 DP_TRAIN_PRE_EMPHASIS_MASK); 1575 switch (signal_levels) { 1576 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 1577 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 1578 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 1579 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 1580 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B; 1581 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 1582 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: 1583 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B; 1584 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 1585 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 1586 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B; 1587 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 1588 case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0: 1589 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B; 1590 default: 1591 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 1592 "0x%x\n", signal_levels); 1593 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 1594 } 1595} 1596 1597/* Gen7's DP voltage swing and pre-emphasis control */ 1598static uint32_t 1599intel_gen7_edp_signal_levels(uint8_t train_set) 1600{ 1601 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 1602 DP_TRAIN_PRE_EMPHASIS_MASK); 1603 switch (signal_levels) { 1604 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 1605 return EDP_LINK_TRAIN_400MV_0DB_IVB; 1606 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 1607 return EDP_LINK_TRAIN_400MV_3_5DB_IVB; 1608 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 1609 return EDP_LINK_TRAIN_400MV_6DB_IVB; 1610 1611 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 1612 return EDP_LINK_TRAIN_600MV_0DB_IVB; 1613 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 1614 return EDP_LINK_TRAIN_600MV_3_5DB_IVB; 1615 1616 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 1617 return EDP_LINK_TRAIN_800MV_0DB_IVB; 1618 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 1619 return EDP_LINK_TRAIN_800MV_3_5DB_IVB; 1620 1621 default: 1622 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 1623 "0x%x\n", signal_levels); 1624 return EDP_LINK_TRAIN_500MV_0DB_IVB; 1625 } 1626} 1627 1628/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */ 1629static uint32_t 1630intel_dp_signal_levels_hsw(uint8_t train_set) 1631{ 1632 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 1633 DP_TRAIN_PRE_EMPHASIS_MASK); 1634 switch (signal_levels) { 1635 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 1636 return DDI_BUF_EMP_400MV_0DB_HSW; 1637 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 1638 return DDI_BUF_EMP_400MV_3_5DB_HSW; 1639 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 1640 return DDI_BUF_EMP_400MV_6DB_HSW; 1641 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5: 1642 return DDI_BUF_EMP_400MV_9_5DB_HSW; 1643 1644 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 1645 return DDI_BUF_EMP_600MV_0DB_HSW; 1646 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 1647 return DDI_BUF_EMP_600MV_3_5DB_HSW; 1648 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: 1649 return DDI_BUF_EMP_600MV_6DB_HSW; 1650 1651 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 1652 return DDI_BUF_EMP_800MV_0DB_HSW; 1653 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 1654 return DDI_BUF_EMP_800MV_3_5DB_HSW; 1655 default: 1656 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 1657 "0x%x\n", signal_levels); 1658 return DDI_BUF_EMP_400MV_0DB_HSW; 1659 } 1660} 1661 1662static bool 1663intel_dp_set_link_train(struct intel_dp *intel_dp, 1664 uint32_t dp_reg_value, 1665 uint8_t dp_train_pat) 1666{ 1667 struct drm_device *dev = intel_dp->base.base.dev; 1668 struct drm_i915_private *dev_priv = dev->dev_private; 1669 int ret; 1670 uint32_t temp; 1671 1672 if (IS_HASWELL(dev)) { 1673 temp = I915_READ(DP_TP_CTL(intel_dp->port)); 1674 1675 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) 1676 temp |= DP_TP_CTL_SCRAMBLE_DISABLE; 1677 else 1678 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE; 1679 1680 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; 1681 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 1682 case DP_TRAINING_PATTERN_DISABLE: 1683 temp |= DP_TP_CTL_LINK_TRAIN_IDLE; 1684 I915_WRITE(DP_TP_CTL(intel_dp->port), temp); 1685 1686 if (wait_for((I915_READ(DP_TP_STATUS(intel_dp->port)) & 1687 DP_TP_STATUS_IDLE_DONE), 1)) 1688 DRM_ERROR("Timed out waiting for DP idle patterns\n"); 1689 1690 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; 1691 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; 1692 1693 break; 1694 case DP_TRAINING_PATTERN_1: 1695 temp |= DP_TP_CTL_LINK_TRAIN_PAT1; 1696 break; 1697 case DP_TRAINING_PATTERN_2: 1698 temp |= DP_TP_CTL_LINK_TRAIN_PAT2; 1699 break; 1700 case DP_TRAINING_PATTERN_3: 1701 temp |= DP_TP_CTL_LINK_TRAIN_PAT3; 1702 break; 1703 } 1704 I915_WRITE(DP_TP_CTL(intel_dp->port), temp); 1705 1706 } else if (HAS_PCH_CPT(dev) && 1707 (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) { 1708 dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT; 1709 1710 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 1711 case DP_TRAINING_PATTERN_DISABLE: 1712 dp_reg_value |= DP_LINK_TRAIN_OFF_CPT; 1713 break; 1714 case DP_TRAINING_PATTERN_1: 1715 dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT; 1716 break; 1717 case DP_TRAINING_PATTERN_2: 1718 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT; 1719 break; 1720 case DP_TRAINING_PATTERN_3: 1721 DRM_ERROR("DP training pattern 3 not supported\n"); 1722 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT; 1723 break; 1724 } 1725 1726 } else { 1727 dp_reg_value &= ~DP_LINK_TRAIN_MASK; 1728 1729 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 1730 case DP_TRAINING_PATTERN_DISABLE: 1731 dp_reg_value |= DP_LINK_TRAIN_OFF; 1732 break; 1733 case DP_TRAINING_PATTERN_1: 1734 dp_reg_value |= DP_LINK_TRAIN_PAT_1; 1735 break; 1736 case DP_TRAINING_PATTERN_2: 1737 dp_reg_value |= DP_LINK_TRAIN_PAT_2; 1738 break; 1739 case DP_TRAINING_PATTERN_3: 1740 DRM_ERROR("DP training pattern 3 not supported\n"); 1741 dp_reg_value |= DP_LINK_TRAIN_PAT_2; 1742 break; 1743 } 1744 } 1745 1746 I915_WRITE(intel_dp->output_reg, dp_reg_value); 1747 POSTING_READ(intel_dp->output_reg); 1748 1749 intel_dp_aux_native_write_1(intel_dp, 1750 DP_TRAINING_PATTERN_SET, 1751 dp_train_pat); 1752 1753 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) != 1754 DP_TRAINING_PATTERN_DISABLE) { 1755 ret = intel_dp_aux_native_write(intel_dp, 1756 DP_TRAINING_LANE0_SET, 1757 intel_dp->train_set, 1758 intel_dp->lane_count); 1759 if (ret != intel_dp->lane_count) 1760 return false; 1761 } 1762 1763 return true; 1764} 1765 1766/* Enable corresponding port and start training pattern 1 */ 1767void 1768intel_dp_start_link_train(struct intel_dp *intel_dp) 1769{ 1770 struct drm_encoder *encoder = &intel_dp->base.base; 1771 struct drm_device *dev = encoder->dev; 1772 int i; 1773 uint8_t voltage; 1774 bool clock_recovery = false; 1775 int voltage_tries, loop_tries; 1776 uint32_t DP = intel_dp->DP; 1777 1778 if (IS_HASWELL(dev)) 1779 intel_ddi_prepare_link_retrain(encoder); 1780 1781 /* Write the link configuration data */ 1782 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, 1783 intel_dp->link_configuration, 1784 DP_LINK_CONFIGURATION_SIZE); 1785 1786 DP |= DP_PORT_EN; 1787 1788 memset(intel_dp->train_set, 0, 4); 1789 voltage = 0xff; 1790 voltage_tries = 0; 1791 loop_tries = 0; 1792 clock_recovery = false; 1793 for (;;) { 1794 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 1795 uint8_t link_status[DP_LINK_STATUS_SIZE]; 1796 uint32_t signal_levels; 1797 1798 if (IS_HASWELL(dev)) { 1799 signal_levels = intel_dp_signal_levels_hsw( 1800 intel_dp->train_set[0]); 1801 DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels; 1802 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { 1803 signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]); 1804 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels; 1805 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { 1806 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); 1807 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1808 } else { 1809 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]); 1810 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1811 } 1812 DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", 1813 signal_levels); 1814 1815 /* Set training pattern 1 */ 1816 if (!intel_dp_set_link_train(intel_dp, DP, 1817 DP_TRAINING_PATTERN_1 | 1818 DP_LINK_SCRAMBLING_DISABLE)) 1819 break; 1820 1821 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd); 1822 if (!intel_dp_get_link_status(intel_dp, link_status)) { 1823 DRM_ERROR("failed to get link status\n"); 1824 break; 1825 } 1826 1827 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { 1828 DRM_DEBUG_KMS("clock recovery OK\n"); 1829 clock_recovery = true; 1830 break; 1831 } 1832 1833 /* Check to see if we've tried the max voltage */ 1834 for (i = 0; i < intel_dp->lane_count; i++) 1835 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) 1836 break; 1837 if (i == intel_dp->lane_count && voltage_tries == 5) { 1838 if (++loop_tries == 5) { 1839 DRM_DEBUG_KMS("too many full retries, give up\n"); 1840 break; 1841 } 1842 memset(intel_dp->train_set, 0, 4); 1843 voltage_tries = 0; 1844 continue; 1845 } 1846 1847 /* Check to see if we've tried the same voltage 5 times */ 1848 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) != voltage) { 1849 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; 1850 voltage_tries = 0; 1851 } else 1852 ++voltage_tries; 1853 1854 /* Compute new intel_dp->train_set as requested by target */ 1855 intel_get_adjust_train(intel_dp, link_status); 1856 } 1857 1858 intel_dp->DP = DP; 1859} 1860 1861void 1862intel_dp_complete_link_train(struct intel_dp *intel_dp) 1863{ 1864 struct drm_device *dev = intel_dp->base.base.dev; 1865 bool channel_eq = false; 1866 int tries, cr_tries; 1867 uint32_t DP = intel_dp->DP; 1868 1869 /* channel equalization */ 1870 tries = 0; 1871 cr_tries = 0; 1872 channel_eq = false; 1873 for (;;) { 1874 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 1875 uint32_t signal_levels; 1876 uint8_t link_status[DP_LINK_STATUS_SIZE]; 1877 1878 if (cr_tries > 5) { 1879 DRM_ERROR("failed to train DP, aborting\n"); 1880 intel_dp_link_down(intel_dp); 1881 break; 1882 } 1883 1884 if (IS_HASWELL(dev)) { 1885 signal_levels = intel_dp_signal_levels_hsw(intel_dp->train_set[0]); 1886 DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels; 1887 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { 1888 signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]); 1889 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels; 1890 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { 1891 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); 1892 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1893 } else { 1894 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]); 1895 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1896 } 1897 1898 /* channel eq pattern */ 1899 if (!intel_dp_set_link_train(intel_dp, DP, 1900 DP_TRAINING_PATTERN_2 | 1901 DP_LINK_SCRAMBLING_DISABLE)) 1902 break; 1903 1904 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd); 1905 if (!intel_dp_get_link_status(intel_dp, link_status)) 1906 break; 1907 1908 /* Make sure clock is still ok */ 1909 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { 1910 intel_dp_start_link_train(intel_dp); 1911 cr_tries++; 1912 continue; 1913 } 1914 1915 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { 1916 channel_eq = true; 1917 break; 1918 } 1919 1920 /* Try 5 times, then try clock recovery if that fails */ 1921 if (tries > 5) { 1922 intel_dp_link_down(intel_dp); 1923 intel_dp_start_link_train(intel_dp); 1924 tries = 0; 1925 cr_tries++; 1926 continue; 1927 } 1928 1929 /* Compute new intel_dp->train_set as requested by target */ 1930 intel_get_adjust_train(intel_dp, link_status); 1931 ++tries; 1932 } 1933 1934 if (channel_eq) 1935 DRM_DEBUG_KMS("Channel EQ done. DP Training successfull\n"); 1936 1937 intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE); 1938} 1939 1940static void 1941intel_dp_link_down(struct intel_dp *intel_dp) 1942{ 1943 struct drm_device *dev = intel_dp->base.base.dev; 1944 struct drm_i915_private *dev_priv = dev->dev_private; 1945 uint32_t DP = intel_dp->DP; 1946 1947 /* 1948 * DDI code has a strict mode set sequence and we should try to respect 1949 * it, otherwise we might hang the machine in many different ways. So we 1950 * really should be disabling the port only on a complete crtc_disable 1951 * sequence. This function is just called under two conditions on DDI 1952 * code: 1953 * - Link train failed while doing crtc_enable, and on this case we 1954 * really should respect the mode set sequence and wait for a 1955 * crtc_disable. 1956 * - Someone turned the monitor off and intel_dp_check_link_status 1957 * called us. We don't need to disable the whole port on this case, so 1958 * when someone turns the monitor on again, 1959 * intel_ddi_prepare_link_retrain will take care of redoing the link 1960 * train. 1961 */ 1962 if (IS_HASWELL(dev)) 1963 return; 1964 1965 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)) 1966 return; 1967 1968 DRM_DEBUG_KMS("\n"); 1969 1970 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) { 1971 DP &= ~DP_LINK_TRAIN_MASK_CPT; 1972 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); 1973 } else { 1974 DP &= ~DP_LINK_TRAIN_MASK; 1975 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); 1976 } 1977 POSTING_READ(intel_dp->output_reg); 1978 1979 msleep(17); 1980 1981 if (HAS_PCH_IBX(dev) && 1982 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { 1983 struct drm_crtc *crtc = intel_dp->base.base.crtc; 1984 1985 /* Hardware workaround: leaving our transcoder select 1986 * set to transcoder B while it's off will prevent the 1987 * corresponding HDMI output on transcoder A. 1988 * 1989 * Combine this with another hardware workaround: 1990 * transcoder select bit can only be cleared while the 1991 * port is enabled. 1992 */ 1993 DP &= ~DP_PIPEB_SELECT; 1994 I915_WRITE(intel_dp->output_reg, DP); 1995 1996 /* Changes to enable or select take place the vblank 1997 * after being written. 1998 */ 1999 if (crtc == NULL) { 2000 /* We can arrive here never having been attached 2001 * to a CRTC, for instance, due to inheriting 2002 * random state from the BIOS. 2003 * 2004 * If the pipe is not running, play safe and 2005 * wait for the clocks to stabilise before 2006 * continuing. 2007 */ 2008 POSTING_READ(intel_dp->output_reg); 2009 msleep(50); 2010 } else 2011 intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe); 2012 } 2013 2014 DP &= ~DP_AUDIO_OUTPUT_ENABLE; 2015 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); 2016 POSTING_READ(intel_dp->output_reg); 2017 msleep(intel_dp->panel_power_down_delay); 2018} 2019 2020static bool 2021intel_dp_get_dpcd(struct intel_dp *intel_dp) 2022{ 2023 if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd, 2024 sizeof(intel_dp->dpcd)) == 0) 2025 return false; /* aux transfer failed */ 2026 2027 if (intel_dp->dpcd[DP_DPCD_REV] == 0) 2028 return false; /* DPCD not present */ 2029 2030 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 2031 DP_DWN_STRM_PORT_PRESENT)) 2032 return true; /* native DP sink */ 2033 2034 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10) 2035 return true; /* no per-port downstream info */ 2036 2037 if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0, 2038 intel_dp->downstream_ports, 2039 DP_MAX_DOWNSTREAM_PORTS) == 0) 2040 return false; /* downstream port status fetch failed */ 2041 2042 return true; 2043} 2044 2045static void 2046intel_dp_probe_oui(struct intel_dp *intel_dp) 2047{ 2048 u8 buf[3]; 2049 2050 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) 2051 return; 2052 2053 ironlake_edp_panel_vdd_on(intel_dp); 2054 2055 if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3)) 2056 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", 2057 buf[0], buf[1], buf[2]); 2058 2059 if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3)) 2060 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", 2061 buf[0], buf[1], buf[2]); 2062 2063 ironlake_edp_panel_vdd_off(intel_dp, false); 2064} 2065 2066static bool 2067intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector) 2068{ 2069 int ret; 2070 2071 ret = intel_dp_aux_native_read_retry(intel_dp, 2072 DP_DEVICE_SERVICE_IRQ_VECTOR, 2073 sink_irq_vector, 1); 2074 if (!ret) 2075 return false; 2076 2077 return true; 2078} 2079 2080static void 2081intel_dp_handle_test_request(struct intel_dp *intel_dp) 2082{ 2083 /* NAK by default */ 2084 intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK); 2085} 2086 2087/* 2088 * According to DP spec 2089 * 5.1.2: 2090 * 1. Read DPCD 2091 * 2. Configure link according to Receiver Capabilities 2092 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 2093 * 4. Check link status on receipt of hot-plug interrupt 2094 */ 2095 2096static void 2097intel_dp_check_link_status(struct intel_dp *intel_dp) 2098{ 2099 u8 sink_irq_vector; 2100 u8 link_status[DP_LINK_STATUS_SIZE]; 2101 2102 if (!intel_dp->base.connectors_active) 2103 return; 2104 2105 if (WARN_ON(!intel_dp->base.base.crtc)) 2106 return; 2107 2108 /* Try to read receiver status if the link appears to be up */ 2109 if (!intel_dp_get_link_status(intel_dp, link_status)) { 2110 intel_dp_link_down(intel_dp); 2111 return; 2112 } 2113 2114 /* Now read the DPCD to see if it's actually running */ 2115 if (!intel_dp_get_dpcd(intel_dp)) { 2116 intel_dp_link_down(intel_dp); 2117 return; 2118 } 2119 2120 /* Try to read the source of the interrupt */ 2121 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 2122 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) { 2123 /* Clear interrupt source */ 2124 intel_dp_aux_native_write_1(intel_dp, 2125 DP_DEVICE_SERVICE_IRQ_VECTOR, 2126 sink_irq_vector); 2127 2128 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST) 2129 intel_dp_handle_test_request(intel_dp); 2130 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ)) 2131 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); 2132 } 2133 2134 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { 2135 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", 2136 drm_get_encoder_name(&intel_dp->base.base)); 2137 intel_dp_start_link_train(intel_dp); 2138 intel_dp_complete_link_train(intel_dp); 2139 } 2140} 2141 2142/* XXX this is probably wrong for multiple downstream ports */ 2143static enum drm_connector_status 2144intel_dp_detect_dpcd(struct intel_dp *intel_dp) 2145{ 2146 uint8_t *dpcd = intel_dp->dpcd; 2147 bool hpd; 2148 uint8_t type; 2149 2150 if (!intel_dp_get_dpcd(intel_dp)) 2151 return connector_status_disconnected; 2152 2153 /* if there's no downstream port, we're done */ 2154 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT)) 2155 return connector_status_connected; 2156 2157 /* If we're HPD-aware, SINK_COUNT changes dynamically */ 2158 hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD); 2159 if (hpd) { 2160 uint8_t reg; 2161 if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT, 2162 ®, 1)) 2163 return connector_status_unknown; 2164 return DP_GET_SINK_COUNT(reg) ? connector_status_connected 2165 : connector_status_disconnected; 2166 } 2167 2168 /* If no HPD, poke DDC gently */ 2169 if (drm_probe_ddc(&intel_dp->adapter)) 2170 return connector_status_connected; 2171 2172 /* Well we tried, say unknown for unreliable port types */ 2173 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 2174 if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID) 2175 return connector_status_unknown; 2176 2177 /* Anything else is out of spec, warn and ignore */ 2178 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n"); 2179 return connector_status_disconnected; 2180} 2181 2182static enum drm_connector_status 2183ironlake_dp_detect(struct intel_dp *intel_dp) 2184{ 2185 enum drm_connector_status status; 2186 2187 /* Can't disconnect eDP, but you can close the lid... */ 2188 if (is_edp(intel_dp)) { 2189 status = intel_panel_detect(intel_dp->base.base.dev); 2190 if (status == connector_status_unknown) 2191 status = connector_status_connected; 2192 return status; 2193 } 2194 2195 return intel_dp_detect_dpcd(intel_dp); 2196} 2197 2198static enum drm_connector_status 2199g4x_dp_detect(struct intel_dp *intel_dp) 2200{ 2201 struct drm_device *dev = intel_dp->base.base.dev; 2202 struct drm_i915_private *dev_priv = dev->dev_private; 2203 uint32_t bit; 2204 2205 switch (intel_dp->output_reg) { 2206 case DP_B: 2207 bit = DPB_HOTPLUG_LIVE_STATUS; 2208 break; 2209 case DP_C: 2210 bit = DPC_HOTPLUG_LIVE_STATUS; 2211 break; 2212 case DP_D: 2213 bit = DPD_HOTPLUG_LIVE_STATUS; 2214 break; 2215 default: 2216 return connector_status_unknown; 2217 } 2218 2219 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0) 2220 return connector_status_disconnected; 2221 2222 return intel_dp_detect_dpcd(intel_dp); 2223} 2224 2225static struct edid * 2226intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) 2227{ 2228 struct intel_connector *intel_connector = to_intel_connector(connector); 2229 2230 /* use cached edid if we have one */ 2231 if (intel_connector->edid) { 2232 struct edid *edid; 2233 int size; 2234 2235 /* invalid edid */ 2236 if (IS_ERR(intel_connector->edid)) 2237 return NULL; 2238 2239 size = (intel_connector->edid->extensions + 1) * EDID_LENGTH; 2240 edid = kmalloc(size, GFP_KERNEL); 2241 if (!edid) 2242 return NULL; 2243 2244 memcpy(edid, intel_connector->edid, size); 2245 return edid; 2246 } 2247 2248 return drm_get_edid(connector, adapter); 2249} 2250 2251static int 2252intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter) 2253{ 2254 struct intel_connector *intel_connector = to_intel_connector(connector); 2255 2256 /* use cached edid if we have one */ 2257 if (intel_connector->edid) { 2258 /* invalid edid */ 2259 if (IS_ERR(intel_connector->edid)) 2260 return 0; 2261 2262 return intel_connector_update_modes(connector, 2263 intel_connector->edid); 2264 } 2265 2266 return intel_ddc_get_modes(connector, adapter); 2267} 2268 2269 2270/** 2271 * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection. 2272 * 2273 * \return true if DP port is connected. 2274 * \return false if DP port is disconnected. 2275 */ 2276static enum drm_connector_status 2277intel_dp_detect(struct drm_connector *connector, bool force) 2278{ 2279 struct intel_dp *intel_dp = intel_attached_dp(connector); 2280 struct drm_device *dev = intel_dp->base.base.dev; 2281 enum drm_connector_status status; 2282 struct edid *edid = NULL; 2283 char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3]; 2284 2285 intel_dp->has_audio = false; 2286 2287 if (HAS_PCH_SPLIT(dev)) 2288 status = ironlake_dp_detect(intel_dp); 2289 else 2290 status = g4x_dp_detect(intel_dp); 2291 2292 hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd), 2293 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false); 2294 DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump); 2295 2296 if (status != connector_status_connected) 2297 return status; 2298 2299 intel_dp_probe_oui(intel_dp); 2300 2301 if (intel_dp->force_audio != HDMI_AUDIO_AUTO) { 2302 intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON); 2303 } else { 2304 edid = intel_dp_get_edid(connector, &intel_dp->adapter); 2305 if (edid) { 2306 intel_dp->has_audio = drm_detect_monitor_audio(edid); 2307 kfree(edid); 2308 } 2309 } 2310 2311 return connector_status_connected; 2312} 2313 2314static int intel_dp_get_modes(struct drm_connector *connector) 2315{ 2316 struct intel_dp *intel_dp = intel_attached_dp(connector); 2317 struct intel_connector *intel_connector = to_intel_connector(connector); 2318 struct drm_device *dev = intel_dp->base.base.dev; 2319 int ret; 2320 2321 /* We should parse the EDID data and find out if it has an audio sink 2322 */ 2323 2324 ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter); 2325 if (ret) 2326 return ret; 2327 2328 /* if eDP has no EDID, fall back to fixed mode */ 2329 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 2330 struct drm_display_mode *mode; 2331 mode = drm_mode_duplicate(dev, 2332 intel_connector->panel.fixed_mode); 2333 if (mode) { 2334 drm_mode_probed_add(connector, mode); 2335 return 1; 2336 } 2337 } 2338 return 0; 2339} 2340 2341static bool 2342intel_dp_detect_audio(struct drm_connector *connector) 2343{ 2344 struct intel_dp *intel_dp = intel_attached_dp(connector); 2345 struct edid *edid; 2346 bool has_audio = false; 2347 2348 edid = intel_dp_get_edid(connector, &intel_dp->adapter); 2349 if (edid) { 2350 has_audio = drm_detect_monitor_audio(edid); 2351 kfree(edid); 2352 } 2353 2354 return has_audio; 2355} 2356 2357static int 2358intel_dp_set_property(struct drm_connector *connector, 2359 struct drm_property *property, 2360 uint64_t val) 2361{ 2362 struct drm_i915_private *dev_priv = connector->dev->dev_private; 2363 struct intel_connector *intel_connector = to_intel_connector(connector); 2364 struct intel_dp *intel_dp = intel_attached_dp(connector); 2365 int ret; 2366 2367 ret = drm_connector_property_set_value(connector, property, val); 2368 if (ret) 2369 return ret; 2370 2371 if (property == dev_priv->force_audio_property) { 2372 int i = val; 2373 bool has_audio; 2374 2375 if (i == intel_dp->force_audio) 2376 return 0; 2377 2378 intel_dp->force_audio = i; 2379 2380 if (i == HDMI_AUDIO_AUTO) 2381 has_audio = intel_dp_detect_audio(connector); 2382 else 2383 has_audio = (i == HDMI_AUDIO_ON); 2384 2385 if (has_audio == intel_dp->has_audio) 2386 return 0; 2387 2388 intel_dp->has_audio = has_audio; 2389 goto done; 2390 } 2391 2392 if (property == dev_priv->broadcast_rgb_property) { 2393 if (val == !!intel_dp->color_range) 2394 return 0; 2395 2396 intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0; 2397 goto done; 2398 } 2399 2400 if (is_edp(intel_dp) && 2401 property == connector->dev->mode_config.scaling_mode_property) { 2402 if (val == DRM_MODE_SCALE_NONE) { 2403 DRM_DEBUG_KMS("no scaling not supported\n"); 2404 return -EINVAL; 2405 } 2406 2407 if (intel_connector->panel.fitting_mode == val) { 2408 /* the eDP scaling property is not changed */ 2409 return 0; 2410 } 2411 intel_connector->panel.fitting_mode = val; 2412 2413 goto done; 2414 } 2415 2416 return -EINVAL; 2417 2418done: 2419 if (intel_dp->base.base.crtc) { 2420 struct drm_crtc *crtc = intel_dp->base.base.crtc; 2421 intel_set_mode(crtc, &crtc->mode, 2422 crtc->x, crtc->y, crtc->fb); 2423 } 2424 2425 return 0; 2426} 2427 2428static void 2429intel_dp_destroy(struct drm_connector *connector) 2430{ 2431 struct drm_device *dev = connector->dev; 2432 struct intel_dp *intel_dp = intel_attached_dp(connector); 2433 struct intel_connector *intel_connector = to_intel_connector(connector); 2434 2435 if (!IS_ERR_OR_NULL(intel_connector->edid)) 2436 kfree(intel_connector->edid); 2437 2438 if (is_edp(intel_dp)) { 2439 intel_panel_destroy_backlight(dev); 2440 intel_panel_fini(&intel_connector->panel); 2441 } 2442 2443 drm_sysfs_connector_remove(connector); 2444 drm_connector_cleanup(connector); 2445 kfree(connector); 2446} 2447 2448static void intel_dp_encoder_destroy(struct drm_encoder *encoder) 2449{ 2450 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2451 2452 i2c_del_adapter(&intel_dp->adapter); 2453 drm_encoder_cleanup(encoder); 2454 if (is_edp(intel_dp)) { 2455 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 2456 ironlake_panel_vdd_off_sync(intel_dp); 2457 } 2458 kfree(intel_dp); 2459} 2460 2461static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { 2462 .mode_fixup = intel_dp_mode_fixup, 2463 .mode_set = intel_dp_mode_set, 2464 .disable = intel_encoder_noop, 2465}; 2466 2467static const struct drm_encoder_helper_funcs intel_dp_helper_funcs_hsw = { 2468 .mode_fixup = intel_dp_mode_fixup, 2469 .mode_set = intel_ddi_mode_set, 2470 .disable = intel_encoder_noop, 2471}; 2472 2473static const struct drm_connector_funcs intel_dp_connector_funcs = { 2474 .dpms = intel_connector_dpms, 2475 .detect = intel_dp_detect, 2476 .fill_modes = drm_helper_probe_single_connector_modes, 2477 .set_property = intel_dp_set_property, 2478 .destroy = intel_dp_destroy, 2479}; 2480 2481static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 2482 .get_modes = intel_dp_get_modes, 2483 .mode_valid = intel_dp_mode_valid, 2484 .best_encoder = intel_best_encoder, 2485}; 2486 2487static const struct drm_encoder_funcs intel_dp_enc_funcs = { 2488 .destroy = intel_dp_encoder_destroy, 2489}; 2490 2491static void 2492intel_dp_hot_plug(struct intel_encoder *intel_encoder) 2493{ 2494 struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); 2495 2496 intel_dp_check_link_status(intel_dp); 2497} 2498 2499/* Return which DP Port should be selected for Transcoder DP control */ 2500int 2501intel_trans_dp_port_sel(struct drm_crtc *crtc) 2502{ 2503 struct drm_device *dev = crtc->dev; 2504 struct intel_encoder *encoder; 2505 2506 for_each_encoder_on_crtc(dev, crtc, encoder) { 2507 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2508 2509 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT || 2510 intel_dp->base.type == INTEL_OUTPUT_EDP) 2511 return intel_dp->output_reg; 2512 } 2513 2514 return -1; 2515} 2516 2517/* check the VBT to see whether the eDP is on DP-D port */ 2518bool intel_dpd_is_edp(struct drm_device *dev) 2519{ 2520 struct drm_i915_private *dev_priv = dev->dev_private; 2521 struct child_device_config *p_child; 2522 int i; 2523 2524 if (!dev_priv->child_dev_num) 2525 return false; 2526 2527 for (i = 0; i < dev_priv->child_dev_num; i++) { 2528 p_child = dev_priv->child_dev + i; 2529 2530 if (p_child->dvo_port == PORT_IDPD && 2531 p_child->device_type == DEVICE_TYPE_eDP) 2532 return true; 2533 } 2534 return false; 2535} 2536 2537static void 2538intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) 2539{ 2540 struct intel_connector *intel_connector = to_intel_connector(connector); 2541 2542 intel_attach_force_audio_property(connector); 2543 intel_attach_broadcast_rgb_property(connector); 2544 2545 if (is_edp(intel_dp)) { 2546 drm_mode_create_scaling_mode_property(connector->dev); 2547 drm_connector_attach_property( 2548 connector, 2549 connector->dev->mode_config.scaling_mode_property, 2550 DRM_MODE_SCALE_FULLSCREEN); 2551 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_FULLSCREEN; 2552 } 2553} 2554 2555static void 2556intel_dp_init_panel_power_sequencer(struct drm_device *dev, 2557 struct intel_dp *intel_dp) 2558{ 2559 struct drm_i915_private *dev_priv = dev->dev_private; 2560 struct edp_power_seq cur, vbt, spec, final; 2561 u32 pp_on, pp_off, pp_div, pp; 2562 2563 /* Workaround: Need to write PP_CONTROL with the unlock key as 2564 * the very first thing. */ 2565 pp = ironlake_get_pp_control(dev_priv); 2566 I915_WRITE(PCH_PP_CONTROL, pp); 2567 2568 pp_on = I915_READ(PCH_PP_ON_DELAYS); 2569 pp_off = I915_READ(PCH_PP_OFF_DELAYS); 2570 pp_div = I915_READ(PCH_PP_DIVISOR); 2571 2572 /* Pull timing values out of registers */ 2573 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> 2574 PANEL_POWER_UP_DELAY_SHIFT; 2575 2576 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> 2577 PANEL_LIGHT_ON_DELAY_SHIFT; 2578 2579 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> 2580 PANEL_LIGHT_OFF_DELAY_SHIFT; 2581 2582 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> 2583 PANEL_POWER_DOWN_DELAY_SHIFT; 2584 2585 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> 2586 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000; 2587 2588 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 2589 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12); 2590 2591 vbt = dev_priv->edp.pps; 2592 2593 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of 2594 * our hw here, which are all in 100usec. */ 2595 spec.t1_t3 = 210 * 10; 2596 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */ 2597 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */ 2598 spec.t10 = 500 * 10; 2599 /* This one is special and actually in units of 100ms, but zero 2600 * based in the hw (so we need to add 100 ms). But the sw vbt 2601 * table multiplies it with 1000 to make it in units of 100usec, 2602 * too. */ 2603 spec.t11_t12 = (510 + 100) * 10; 2604 2605 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 2606 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12); 2607 2608 /* Use the max of the register settings and vbt. If both are 2609 * unset, fall back to the spec limits. */ 2610#define assign_final(field) final.field = (max(cur.field, vbt.field) == 0 ? \ 2611 spec.field : \ 2612 max(cur.field, vbt.field)) 2613 assign_final(t1_t3); 2614 assign_final(t8); 2615 assign_final(t9); 2616 assign_final(t10); 2617 assign_final(t11_t12); 2618#undef assign_final 2619 2620#define get_delay(field) (DIV_ROUND_UP(final.field, 10)) 2621 intel_dp->panel_power_up_delay = get_delay(t1_t3); 2622 intel_dp->backlight_on_delay = get_delay(t8); 2623 intel_dp->backlight_off_delay = get_delay(t9); 2624 intel_dp->panel_power_down_delay = get_delay(t10); 2625 intel_dp->panel_power_cycle_delay = get_delay(t11_t12); 2626#undef get_delay 2627 2628 /* And finally store the new values in the power sequencer. */ 2629 pp_on = (final.t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | 2630 (final.t8 << PANEL_LIGHT_ON_DELAY_SHIFT); 2631 pp_off = (final.t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) | 2632 (final.t10 << PANEL_POWER_DOWN_DELAY_SHIFT); 2633 /* Compute the divisor for the pp clock, simply match the Bspec 2634 * formula. */ 2635 pp_div = ((100 * intel_pch_rawclk(dev))/2 - 1) 2636 << PP_REFERENCE_DIVIDER_SHIFT; 2637 pp_div |= (DIV_ROUND_UP(final.t11_t12, 1000) 2638 << PANEL_POWER_CYCLE_DELAY_SHIFT); 2639 2640 /* Haswell doesn't have any port selection bits for the panel 2641 * power sequencer any more. */ 2642 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { 2643 if (is_cpu_edp(intel_dp)) 2644 pp_on |= PANEL_POWER_PORT_DP_A; 2645 else 2646 pp_on |= PANEL_POWER_PORT_DP_D; 2647 } 2648 2649 I915_WRITE(PCH_PP_ON_DELAYS, pp_on); 2650 I915_WRITE(PCH_PP_OFF_DELAYS, pp_off); 2651 I915_WRITE(PCH_PP_DIVISOR, pp_div); 2652 2653 2654 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n", 2655 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay, 2656 intel_dp->panel_power_cycle_delay); 2657 2658 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", 2659 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); 2660 2661 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", 2662 I915_READ(PCH_PP_ON_DELAYS), 2663 I915_READ(PCH_PP_OFF_DELAYS), 2664 I915_READ(PCH_PP_DIVISOR)); 2665} 2666 2667void 2668intel_dp_init(struct drm_device *dev, int output_reg, enum port port) 2669{ 2670 struct drm_i915_private *dev_priv = dev->dev_private; 2671 struct drm_connector *connector; 2672 struct intel_dp *intel_dp; 2673 struct intel_encoder *intel_encoder; 2674 struct intel_connector *intel_connector; 2675 struct drm_display_mode *fixed_mode = NULL; 2676 const char *name = NULL; 2677 int type; 2678 2679 intel_dp = kzalloc(sizeof(struct intel_dp), GFP_KERNEL); 2680 if (!intel_dp) 2681 return; 2682 2683 intel_dp->output_reg = output_reg; 2684 intel_dp->port = port; 2685 /* Preserve the current hw state. */ 2686 intel_dp->DP = I915_READ(intel_dp->output_reg); 2687 2688 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 2689 if (!intel_connector) { 2690 kfree(intel_dp); 2691 return; 2692 } 2693 intel_encoder = &intel_dp->base; 2694 intel_dp->attached_connector = intel_connector; 2695 2696 if (HAS_PCH_SPLIT(dev) && output_reg == PCH_DP_D) 2697 if (intel_dpd_is_edp(dev)) 2698 intel_dp->is_pch_edp = true; 2699 2700 /* 2701 * FIXME : We need to initialize built-in panels before external panels. 2702 * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup 2703 */ 2704 if (IS_VALLEYVIEW(dev) && output_reg == DP_C) { 2705 type = DRM_MODE_CONNECTOR_eDP; 2706 intel_encoder->type = INTEL_OUTPUT_EDP; 2707 } else if (output_reg == DP_A || is_pch_edp(intel_dp)) { 2708 type = DRM_MODE_CONNECTOR_eDP; 2709 intel_encoder->type = INTEL_OUTPUT_EDP; 2710 } else { 2711 type = DRM_MODE_CONNECTOR_DisplayPort; 2712 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; 2713 } 2714 2715 connector = &intel_connector->base; 2716 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); 2717 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 2718 2719 connector->polled = DRM_CONNECTOR_POLL_HPD; 2720 2721 intel_encoder->cloneable = false; 2722 2723 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, 2724 ironlake_panel_vdd_work); 2725 2726 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); 2727 2728 connector->interlace_allowed = true; 2729 connector->doublescan_allowed = 0; 2730 2731 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, 2732 DRM_MODE_ENCODER_TMDS); 2733 2734 if (IS_HASWELL(dev)) 2735 drm_encoder_helper_add(&intel_encoder->base, 2736 &intel_dp_helper_funcs_hsw); 2737 else 2738 drm_encoder_helper_add(&intel_encoder->base, 2739 &intel_dp_helper_funcs); 2740 2741 intel_connector_attach_encoder(intel_connector, intel_encoder); 2742 drm_sysfs_connector_add(connector); 2743 2744 if (IS_HASWELL(dev)) { 2745 intel_encoder->enable = intel_enable_ddi; 2746 intel_encoder->pre_enable = intel_ddi_pre_enable; 2747 intel_encoder->disable = intel_disable_ddi; 2748 intel_encoder->post_disable = intel_ddi_post_disable; 2749 intel_encoder->get_hw_state = intel_ddi_get_hw_state; 2750 } else { 2751 intel_encoder->enable = intel_enable_dp; 2752 intel_encoder->pre_enable = intel_pre_enable_dp; 2753 intel_encoder->disable = intel_disable_dp; 2754 intel_encoder->post_disable = intel_post_disable_dp; 2755 intel_encoder->get_hw_state = intel_dp_get_hw_state; 2756 } 2757 intel_connector->get_hw_state = intel_connector_get_hw_state; 2758 2759 /* Set up the DDC bus. */ 2760 switch (port) { 2761 case PORT_A: 2762 name = "DPDDC-A"; 2763 break; 2764 case PORT_B: 2765 dev_priv->hotplug_supported_mask |= DPB_HOTPLUG_INT_STATUS; 2766 name = "DPDDC-B"; 2767 break; 2768 case PORT_C: 2769 dev_priv->hotplug_supported_mask |= DPC_HOTPLUG_INT_STATUS; 2770 name = "DPDDC-C"; 2771 break; 2772 case PORT_D: 2773 dev_priv->hotplug_supported_mask |= DPD_HOTPLUG_INT_STATUS; 2774 name = "DPDDC-D"; 2775 break; 2776 default: 2777 WARN(1, "Invalid port %c\n", port_name(port)); 2778 break; 2779 } 2780 2781 if (is_edp(intel_dp)) 2782 intel_dp_init_panel_power_sequencer(dev, intel_dp); 2783 2784 intel_dp_i2c_init(intel_dp, intel_connector, name); 2785 2786 /* Cache DPCD and EDID for edp. */ 2787 if (is_edp(intel_dp)) { 2788 bool ret; 2789 struct drm_display_mode *scan; 2790 struct edid *edid; 2791 2792 ironlake_edp_panel_vdd_on(intel_dp); 2793 ret = intel_dp_get_dpcd(intel_dp); 2794 ironlake_edp_panel_vdd_off(intel_dp, false); 2795 2796 if (ret) { 2797 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) 2798 dev_priv->no_aux_handshake = 2799 intel_dp->dpcd[DP_MAX_DOWNSPREAD] & 2800 DP_NO_AUX_HANDSHAKE_LINK_TRAINING; 2801 } else { 2802 /* if this fails, presume the device is a ghost */ 2803 DRM_INFO("failed to retrieve link info, disabling eDP\n"); 2804 intel_dp_encoder_destroy(&intel_dp->base.base); 2805 intel_dp_destroy(&intel_connector->base); 2806 return; 2807 } 2808 2809 ironlake_edp_panel_vdd_on(intel_dp); 2810 edid = drm_get_edid(connector, &intel_dp->adapter); 2811 if (edid) { 2812 if (drm_add_edid_modes(connector, edid)) { 2813 drm_mode_connector_update_edid_property(connector, edid); 2814 drm_edid_to_eld(connector, edid); 2815 } else { 2816 kfree(edid); 2817 edid = ERR_PTR(-EINVAL); 2818 } 2819 } else { 2820 edid = ERR_PTR(-ENOENT); 2821 } 2822 intel_connector->edid = edid; 2823 2824 /* prefer fixed mode from EDID if available */ 2825 list_for_each_entry(scan, &connector->probed_modes, head) { 2826 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) { 2827 fixed_mode = drm_mode_duplicate(dev, scan); 2828 break; 2829 } 2830 } 2831 2832 /* fallback to VBT if available for eDP */ 2833 if (!fixed_mode && dev_priv->lfp_lvds_vbt_mode) { 2834 fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); 2835 if (fixed_mode) 2836 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; 2837 } 2838 2839 ironlake_edp_panel_vdd_off(intel_dp, false); 2840 } 2841 2842 intel_encoder->hot_plug = intel_dp_hot_plug; 2843 2844 if (is_edp(intel_dp)) { 2845 intel_panel_init(&intel_connector->panel, fixed_mode); 2846 intel_panel_setup_backlight(connector); 2847 } 2848 2849 intel_dp_add_properties(intel_dp, connector); 2850 2851 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 2852 * 0xd. Failure to do so will result in spurious interrupts being 2853 * generated on the port when a cable is not attached. 2854 */ 2855 if (IS_G4X(dev) && !IS_GM45(dev)) { 2856 u32 temp = I915_READ(PEG_BAND_GAP_DATA); 2857 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); 2858 } 2859} 2860