intel_dp.c revision 4b6ed685e4cfe850250d2681025df44e5e05ad6c
1/* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Keith Packard <keithp@keithp.com> 25 * 26 */ 27 28#include <linux/i2c.h> 29#include <linux/slab.h> 30#include <linux/export.h> 31#include <drm/drmP.h> 32#include <drm/drm_crtc.h> 33#include <drm/drm_crtc_helper.h> 34#include <drm/drm_edid.h> 35#include "intel_drv.h" 36#include <drm/i915_drm.h> 37#include "i915_drv.h" 38 39#define DP_LINK_CHECK_TIMEOUT (10 * 1000) 40 41struct dp_link_dpll { 42 int link_bw; 43 struct dpll dpll; 44}; 45 46static const struct dp_link_dpll gen4_dpll[] = { 47 { DP_LINK_BW_1_62, 48 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } }, 49 { DP_LINK_BW_2_7, 50 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } } 51}; 52 53static const struct dp_link_dpll pch_dpll[] = { 54 { DP_LINK_BW_1_62, 55 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } }, 56 { DP_LINK_BW_2_7, 57 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } } 58}; 59 60static const struct dp_link_dpll vlv_dpll[] = { 61 { DP_LINK_BW_1_62, 62 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } }, 63 { DP_LINK_BW_2_7, 64 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } } 65}; 66 67/** 68 * is_edp - is the given port attached to an eDP panel (either CPU or PCH) 69 * @intel_dp: DP struct 70 * 71 * If a CPU or PCH DP output is attached to an eDP panel, this function 72 * will return true, and false otherwise. 73 */ 74static bool is_edp(struct intel_dp *intel_dp) 75{ 76 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 77 78 return intel_dig_port->base.type == INTEL_OUTPUT_EDP; 79} 80 81static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp) 82{ 83 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 84 85 return intel_dig_port->base.base.dev; 86} 87 88static struct intel_dp *intel_attached_dp(struct drm_connector *connector) 89{ 90 return enc_to_intel_dp(&intel_attached_encoder(connector)->base); 91} 92 93static void intel_dp_link_down(struct intel_dp *intel_dp); 94static void edp_panel_vdd_on(struct intel_dp *intel_dp); 95static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); 96 97static int 98intel_dp_max_link_bw(struct intel_dp *intel_dp) 99{ 100 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; 101 struct drm_device *dev = intel_dp->attached_connector->base.dev; 102 103 switch (max_link_bw) { 104 case DP_LINK_BW_1_62: 105 case DP_LINK_BW_2_7: 106 break; 107 case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */ 108 if ((IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) && 109 intel_dp->dpcd[DP_DPCD_REV] >= 0x12) 110 max_link_bw = DP_LINK_BW_5_4; 111 else 112 max_link_bw = DP_LINK_BW_2_7; 113 break; 114 default: 115 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n", 116 max_link_bw); 117 max_link_bw = DP_LINK_BW_1_62; 118 break; 119 } 120 return max_link_bw; 121} 122 123/* 124 * The units on the numbers in the next two are... bizarre. Examples will 125 * make it clearer; this one parallels an example in the eDP spec. 126 * 127 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as: 128 * 129 * 270000 * 1 * 8 / 10 == 216000 130 * 131 * The actual data capacity of that configuration is 2.16Gbit/s, so the 132 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz - 133 * or equivalently, kilopixels per second - so for 1680x1050R it'd be 134 * 119000. At 18bpp that's 2142000 kilobits per second. 135 * 136 * Thus the strange-looking division by 10 in intel_dp_link_required, to 137 * get the result in decakilobits instead of kilobits. 138 */ 139 140static int 141intel_dp_link_required(int pixel_clock, int bpp) 142{ 143 return (pixel_clock * bpp + 9) / 10; 144} 145 146static int 147intel_dp_max_data_rate(int max_link_clock, int max_lanes) 148{ 149 return (max_link_clock * max_lanes * 8) / 10; 150} 151 152static enum drm_mode_status 153intel_dp_mode_valid(struct drm_connector *connector, 154 struct drm_display_mode *mode) 155{ 156 struct intel_dp *intel_dp = intel_attached_dp(connector); 157 struct intel_connector *intel_connector = to_intel_connector(connector); 158 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; 159 int target_clock = mode->clock; 160 int max_rate, mode_rate, max_lanes, max_link_clock; 161 162 if (is_edp(intel_dp) && fixed_mode) { 163 if (mode->hdisplay > fixed_mode->hdisplay) 164 return MODE_PANEL; 165 166 if (mode->vdisplay > fixed_mode->vdisplay) 167 return MODE_PANEL; 168 169 target_clock = fixed_mode->clock; 170 } 171 172 max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp)); 173 max_lanes = drm_dp_max_lane_count(intel_dp->dpcd); 174 175 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); 176 mode_rate = intel_dp_link_required(target_clock, 18); 177 178 if (mode_rate > max_rate) 179 return MODE_CLOCK_HIGH; 180 181 if (mode->clock < 10000) 182 return MODE_CLOCK_LOW; 183 184 if (mode->flags & DRM_MODE_FLAG_DBLCLK) 185 return MODE_H_ILLEGAL; 186 187 return MODE_OK; 188} 189 190static uint32_t 191pack_aux(uint8_t *src, int src_bytes) 192{ 193 int i; 194 uint32_t v = 0; 195 196 if (src_bytes > 4) 197 src_bytes = 4; 198 for (i = 0; i < src_bytes; i++) 199 v |= ((uint32_t) src[i]) << ((3-i) * 8); 200 return v; 201} 202 203static void 204unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) 205{ 206 int i; 207 if (dst_bytes > 4) 208 dst_bytes = 4; 209 for (i = 0; i < dst_bytes; i++) 210 dst[i] = src >> ((3-i) * 8); 211} 212 213/* hrawclock is 1/4 the FSB frequency */ 214static int 215intel_hrawclk(struct drm_device *dev) 216{ 217 struct drm_i915_private *dev_priv = dev->dev_private; 218 uint32_t clkcfg; 219 220 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */ 221 if (IS_VALLEYVIEW(dev)) 222 return 200; 223 224 clkcfg = I915_READ(CLKCFG); 225 switch (clkcfg & CLKCFG_FSB_MASK) { 226 case CLKCFG_FSB_400: 227 return 100; 228 case CLKCFG_FSB_533: 229 return 133; 230 case CLKCFG_FSB_667: 231 return 166; 232 case CLKCFG_FSB_800: 233 return 200; 234 case CLKCFG_FSB_1067: 235 return 266; 236 case CLKCFG_FSB_1333: 237 return 333; 238 /* these two are just a guess; one of them might be right */ 239 case CLKCFG_FSB_1600: 240 case CLKCFG_FSB_1600_ALT: 241 return 400; 242 default: 243 return 133; 244 } 245} 246 247static void 248intel_dp_init_panel_power_sequencer(struct drm_device *dev, 249 struct intel_dp *intel_dp, 250 struct edp_power_seq *out); 251static void 252intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, 253 struct intel_dp *intel_dp, 254 struct edp_power_seq *out); 255 256static enum pipe 257vlv_power_sequencer_pipe(struct intel_dp *intel_dp) 258{ 259 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 260 struct drm_crtc *crtc = intel_dig_port->base.base.crtc; 261 struct drm_device *dev = intel_dig_port->base.base.dev; 262 struct drm_i915_private *dev_priv = dev->dev_private; 263 enum port port = intel_dig_port->port; 264 enum pipe pipe; 265 266 /* modeset should have pipe */ 267 if (crtc) 268 return to_intel_crtc(crtc)->pipe; 269 270 /* init time, try to find a pipe with this port selected */ 271 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) { 272 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) & 273 PANEL_PORT_SELECT_MASK; 274 if (port_sel == PANEL_PORT_SELECT_DPB_VLV && port == PORT_B) 275 return pipe; 276 if (port_sel == PANEL_PORT_SELECT_DPC_VLV && port == PORT_C) 277 return pipe; 278 } 279 280 /* shrug */ 281 return PIPE_A; 282} 283 284static u32 _pp_ctrl_reg(struct intel_dp *intel_dp) 285{ 286 struct drm_device *dev = intel_dp_to_dev(intel_dp); 287 288 if (HAS_PCH_SPLIT(dev)) 289 return PCH_PP_CONTROL; 290 else 291 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp)); 292} 293 294static u32 _pp_stat_reg(struct intel_dp *intel_dp) 295{ 296 struct drm_device *dev = intel_dp_to_dev(intel_dp); 297 298 if (HAS_PCH_SPLIT(dev)) 299 return PCH_PP_STATUS; 300 else 301 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp)); 302} 303 304static bool edp_have_panel_power(struct intel_dp *intel_dp) 305{ 306 struct drm_device *dev = intel_dp_to_dev(intel_dp); 307 struct drm_i915_private *dev_priv = dev->dev_private; 308 309 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0; 310} 311 312static bool edp_have_panel_vdd(struct intel_dp *intel_dp) 313{ 314 struct drm_device *dev = intel_dp_to_dev(intel_dp); 315 struct drm_i915_private *dev_priv = dev->dev_private; 316 317 return (I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0; 318} 319 320static void 321intel_dp_check_edp(struct intel_dp *intel_dp) 322{ 323 struct drm_device *dev = intel_dp_to_dev(intel_dp); 324 struct drm_i915_private *dev_priv = dev->dev_private; 325 326 if (!is_edp(intel_dp)) 327 return; 328 329 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) { 330 WARN(1, "eDP powered off while attempting aux channel communication.\n"); 331 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n", 332 I915_READ(_pp_stat_reg(intel_dp)), 333 I915_READ(_pp_ctrl_reg(intel_dp))); 334 } 335} 336 337static uint32_t 338intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq) 339{ 340 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 341 struct drm_device *dev = intel_dig_port->base.base.dev; 342 struct drm_i915_private *dev_priv = dev->dev_private; 343 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg; 344 uint32_t status; 345 bool done; 346 347#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) 348 if (has_aux_irq) 349 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 350 msecs_to_jiffies_timeout(10)); 351 else 352 done = wait_for_atomic(C, 10) == 0; 353 if (!done) 354 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n", 355 has_aux_irq); 356#undef C 357 358 return status; 359} 360 361static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 362{ 363 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 364 struct drm_device *dev = intel_dig_port->base.base.dev; 365 366 /* 367 * The clock divider is based off the hrawclk, and would like to run at 368 * 2MHz. So, take the hrawclk value and divide by 2 and use that 369 */ 370 return index ? 0 : intel_hrawclk(dev) / 2; 371} 372 373static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 374{ 375 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 376 struct drm_device *dev = intel_dig_port->base.base.dev; 377 378 if (index) 379 return 0; 380 381 if (intel_dig_port->port == PORT_A) { 382 if (IS_GEN6(dev) || IS_GEN7(dev)) 383 return 200; /* SNB & IVB eDP input clock at 400Mhz */ 384 else 385 return 225; /* eDP input clock at 450Mhz */ 386 } else { 387 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2); 388 } 389} 390 391static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 392{ 393 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 394 struct drm_device *dev = intel_dig_port->base.base.dev; 395 struct drm_i915_private *dev_priv = dev->dev_private; 396 397 if (intel_dig_port->port == PORT_A) { 398 if (index) 399 return 0; 400 return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000); 401 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { 402 /* Workaround for non-ULT HSW */ 403 switch (index) { 404 case 0: return 63; 405 case 1: return 72; 406 default: return 0; 407 } 408 } else { 409 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2); 410 } 411} 412 413static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 414{ 415 return index ? 0 : 100; 416} 417 418static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp, 419 bool has_aux_irq, 420 int send_bytes, 421 uint32_t aux_clock_divider) 422{ 423 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 424 struct drm_device *dev = intel_dig_port->base.base.dev; 425 uint32_t precharge, timeout; 426 427 if (IS_GEN6(dev)) 428 precharge = 3; 429 else 430 precharge = 5; 431 432 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL) 433 timeout = DP_AUX_CH_CTL_TIME_OUT_600us; 434 else 435 timeout = DP_AUX_CH_CTL_TIME_OUT_400us; 436 437 return DP_AUX_CH_CTL_SEND_BUSY | 438 DP_AUX_CH_CTL_DONE | 439 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) | 440 DP_AUX_CH_CTL_TIME_OUT_ERROR | 441 timeout | 442 DP_AUX_CH_CTL_RECEIVE_ERROR | 443 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 444 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 445 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT); 446} 447 448static int 449intel_dp_aux_ch(struct intel_dp *intel_dp, 450 uint8_t *send, int send_bytes, 451 uint8_t *recv, int recv_size) 452{ 453 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 454 struct drm_device *dev = intel_dig_port->base.base.dev; 455 struct drm_i915_private *dev_priv = dev->dev_private; 456 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg; 457 uint32_t ch_data = ch_ctl + 4; 458 uint32_t aux_clock_divider; 459 int i, ret, recv_bytes; 460 uint32_t status; 461 int try, clock = 0; 462 bool has_aux_irq = true; 463 464 /* dp aux is extremely sensitive to irq latency, hence request the 465 * lowest possible wakeup latency and so prevent the cpu from going into 466 * deep sleep states. 467 */ 468 pm_qos_update_request(&dev_priv->pm_qos, 0); 469 470 intel_dp_check_edp(intel_dp); 471 472 intel_aux_display_runtime_get(dev_priv); 473 474 /* Try to wait for any previous AUX channel activity */ 475 for (try = 0; try < 3; try++) { 476 status = I915_READ_NOTRACE(ch_ctl); 477 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 478 break; 479 msleep(1); 480 } 481 482 if (try == 3) { 483 WARN(1, "dp_aux_ch not started status 0x%08x\n", 484 I915_READ(ch_ctl)); 485 ret = -EBUSY; 486 goto out; 487 } 488 489 /* Only 5 data registers! */ 490 if (WARN_ON(send_bytes > 20 || recv_size > 20)) { 491 ret = -E2BIG; 492 goto out; 493 } 494 495 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) { 496 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp, 497 has_aux_irq, 498 send_bytes, 499 aux_clock_divider); 500 501 /* Must try at least 3 times according to DP spec */ 502 for (try = 0; try < 5; try++) { 503 /* Load the send data into the aux channel data registers */ 504 for (i = 0; i < send_bytes; i += 4) 505 I915_WRITE(ch_data + i, 506 pack_aux(send + i, send_bytes - i)); 507 508 /* Send the command and wait for it to complete */ 509 I915_WRITE(ch_ctl, send_ctl); 510 511 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq); 512 513 /* Clear done status and any errors */ 514 I915_WRITE(ch_ctl, 515 status | 516 DP_AUX_CH_CTL_DONE | 517 DP_AUX_CH_CTL_TIME_OUT_ERROR | 518 DP_AUX_CH_CTL_RECEIVE_ERROR); 519 520 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR | 521 DP_AUX_CH_CTL_RECEIVE_ERROR)) 522 continue; 523 if (status & DP_AUX_CH_CTL_DONE) 524 break; 525 } 526 if (status & DP_AUX_CH_CTL_DONE) 527 break; 528 } 529 530 if ((status & DP_AUX_CH_CTL_DONE) == 0) { 531 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status); 532 ret = -EBUSY; 533 goto out; 534 } 535 536 /* Check for timeout or receive error. 537 * Timeouts occur when the sink is not connected 538 */ 539 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 540 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status); 541 ret = -EIO; 542 goto out; 543 } 544 545 /* Timeouts occur when the device isn't connected, so they're 546 * "normal" -- don't fill the kernel log with these */ 547 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { 548 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status); 549 ret = -ETIMEDOUT; 550 goto out; 551 } 552 553 /* Unload any bytes sent back from the other side */ 554 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> 555 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); 556 if (recv_bytes > recv_size) 557 recv_bytes = recv_size; 558 559 for (i = 0; i < recv_bytes; i += 4) 560 unpack_aux(I915_READ(ch_data + i), 561 recv + i, recv_bytes - i); 562 563 ret = recv_bytes; 564out: 565 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE); 566 intel_aux_display_runtime_put(dev_priv); 567 568 return ret; 569} 570 571/* Write data to the aux channel in native mode */ 572static int 573intel_dp_aux_native_write(struct intel_dp *intel_dp, 574 uint16_t address, uint8_t *send, int send_bytes) 575{ 576 int ret; 577 uint8_t msg[20]; 578 int msg_bytes; 579 uint8_t ack; 580 581 if (WARN_ON(send_bytes > 16)) 582 return -E2BIG; 583 584 intel_dp_check_edp(intel_dp); 585 msg[0] = DP_AUX_NATIVE_WRITE << 4; 586 msg[1] = address >> 8; 587 msg[2] = address & 0xff; 588 msg[3] = send_bytes - 1; 589 memcpy(&msg[4], send, send_bytes); 590 msg_bytes = send_bytes + 4; 591 for (;;) { 592 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1); 593 if (ret < 0) 594 return ret; 595 ack >>= 4; 596 if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) 597 break; 598 else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER) 599 udelay(100); 600 else 601 return -EIO; 602 } 603 return send_bytes; 604} 605 606/* Write a single byte to the aux channel in native mode */ 607static int 608intel_dp_aux_native_write_1(struct intel_dp *intel_dp, 609 uint16_t address, uint8_t byte) 610{ 611 return intel_dp_aux_native_write(intel_dp, address, &byte, 1); 612} 613 614/* read bytes from a native aux channel */ 615static int 616intel_dp_aux_native_read(struct intel_dp *intel_dp, 617 uint16_t address, uint8_t *recv, int recv_bytes) 618{ 619 uint8_t msg[4]; 620 int msg_bytes; 621 uint8_t reply[20]; 622 int reply_bytes; 623 uint8_t ack; 624 int ret; 625 626 if (WARN_ON(recv_bytes > 19)) 627 return -E2BIG; 628 629 intel_dp_check_edp(intel_dp); 630 msg[0] = DP_AUX_NATIVE_READ << 4; 631 msg[1] = address >> 8; 632 msg[2] = address & 0xff; 633 msg[3] = recv_bytes - 1; 634 635 msg_bytes = 4; 636 reply_bytes = recv_bytes + 1; 637 638 for (;;) { 639 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, 640 reply, reply_bytes); 641 if (ret == 0) 642 return -EPROTO; 643 if (ret < 0) 644 return ret; 645 ack = reply[0] >> 4; 646 if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) { 647 memcpy(recv, reply + 1, ret - 1); 648 return ret - 1; 649 } 650 else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER) 651 udelay(100); 652 else 653 return -EIO; 654 } 655} 656 657static int 658intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, 659 uint8_t write_byte, uint8_t *read_byte) 660{ 661 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; 662 struct intel_dp *intel_dp = container_of(adapter, 663 struct intel_dp, 664 adapter); 665 uint16_t address = algo_data->address; 666 uint8_t msg[5]; 667 uint8_t reply[2]; 668 unsigned retry; 669 int msg_bytes; 670 int reply_bytes; 671 int ret; 672 673 edp_panel_vdd_on(intel_dp); 674 intel_dp_check_edp(intel_dp); 675 /* Set up the command byte */ 676 if (mode & MODE_I2C_READ) 677 msg[0] = DP_AUX_I2C_READ << 4; 678 else 679 msg[0] = DP_AUX_I2C_WRITE << 4; 680 681 if (!(mode & MODE_I2C_STOP)) 682 msg[0] |= DP_AUX_I2C_MOT << 4; 683 684 msg[1] = address >> 8; 685 msg[2] = address; 686 687 switch (mode) { 688 case MODE_I2C_WRITE: 689 msg[3] = 0; 690 msg[4] = write_byte; 691 msg_bytes = 5; 692 reply_bytes = 1; 693 break; 694 case MODE_I2C_READ: 695 msg[3] = 0; 696 msg_bytes = 4; 697 reply_bytes = 2; 698 break; 699 default: 700 msg_bytes = 3; 701 reply_bytes = 1; 702 break; 703 } 704 705 /* 706 * DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device is 707 * required to retry at least seven times upon receiving AUX_DEFER 708 * before giving up the AUX transaction. 709 */ 710 for (retry = 0; retry < 7; retry++) { 711 ret = intel_dp_aux_ch(intel_dp, 712 msg, msg_bytes, 713 reply, reply_bytes); 714 if (ret < 0) { 715 DRM_DEBUG_KMS("aux_ch failed %d\n", ret); 716 goto out; 717 } 718 719 switch ((reply[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK) { 720 case DP_AUX_NATIVE_REPLY_ACK: 721 /* I2C-over-AUX Reply field is only valid 722 * when paired with AUX ACK. 723 */ 724 break; 725 case DP_AUX_NATIVE_REPLY_NACK: 726 DRM_DEBUG_KMS("aux_ch native nack\n"); 727 ret = -EREMOTEIO; 728 goto out; 729 case DP_AUX_NATIVE_REPLY_DEFER: 730 /* 731 * For now, just give more slack to branch devices. We 732 * could check the DPCD for I2C bit rate capabilities, 733 * and if available, adjust the interval. We could also 734 * be more careful with DP-to-Legacy adapters where a 735 * long legacy cable may force very low I2C bit rates. 736 */ 737 if (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 738 DP_DWN_STRM_PORT_PRESENT) 739 usleep_range(500, 600); 740 else 741 usleep_range(300, 400); 742 continue; 743 default: 744 DRM_ERROR("aux_ch invalid native reply 0x%02x\n", 745 reply[0]); 746 ret = -EREMOTEIO; 747 goto out; 748 } 749 750 switch ((reply[0] >> 4) & DP_AUX_I2C_REPLY_MASK) { 751 case DP_AUX_I2C_REPLY_ACK: 752 if (mode == MODE_I2C_READ) { 753 *read_byte = reply[1]; 754 } 755 ret = reply_bytes - 1; 756 goto out; 757 case DP_AUX_I2C_REPLY_NACK: 758 DRM_DEBUG_KMS("aux_i2c nack\n"); 759 ret = -EREMOTEIO; 760 goto out; 761 case DP_AUX_I2C_REPLY_DEFER: 762 DRM_DEBUG_KMS("aux_i2c defer\n"); 763 udelay(100); 764 break; 765 default: 766 DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]); 767 ret = -EREMOTEIO; 768 goto out; 769 } 770 } 771 772 DRM_ERROR("too many retries, giving up\n"); 773 ret = -EREMOTEIO; 774 775out: 776 edp_panel_vdd_off(intel_dp, false); 777 return ret; 778} 779 780static int 781intel_dp_i2c_init(struct intel_dp *intel_dp, 782 struct intel_connector *intel_connector, const char *name) 783{ 784 int ret; 785 786 DRM_DEBUG_KMS("i2c_init %s\n", name); 787 intel_dp->algo.running = false; 788 intel_dp->algo.address = 0; 789 intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch; 790 791 memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter)); 792 intel_dp->adapter.owner = THIS_MODULE; 793 intel_dp->adapter.class = I2C_CLASS_DDC; 794 strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1); 795 intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0'; 796 intel_dp->adapter.algo_data = &intel_dp->algo; 797 intel_dp->adapter.dev.parent = intel_connector->base.kdev; 798 799 ret = i2c_dp_aux_add_bus(&intel_dp->adapter); 800 return ret; 801} 802 803static void 804intel_dp_set_clock(struct intel_encoder *encoder, 805 struct intel_crtc_config *pipe_config, int link_bw) 806{ 807 struct drm_device *dev = encoder->base.dev; 808 const struct dp_link_dpll *divisor = NULL; 809 int i, count = 0; 810 811 if (IS_G4X(dev)) { 812 divisor = gen4_dpll; 813 count = ARRAY_SIZE(gen4_dpll); 814 } else if (IS_HASWELL(dev)) { 815 /* Haswell has special-purpose DP DDI clocks. */ 816 } else if (HAS_PCH_SPLIT(dev)) { 817 divisor = pch_dpll; 818 count = ARRAY_SIZE(pch_dpll); 819 } else if (IS_VALLEYVIEW(dev)) { 820 divisor = vlv_dpll; 821 count = ARRAY_SIZE(vlv_dpll); 822 } 823 824 if (divisor && count) { 825 for (i = 0; i < count; i++) { 826 if (link_bw == divisor[i].link_bw) { 827 pipe_config->dpll = divisor[i].dpll; 828 pipe_config->clock_set = true; 829 break; 830 } 831 } 832 } 833} 834 835bool 836intel_dp_compute_config(struct intel_encoder *encoder, 837 struct intel_crtc_config *pipe_config) 838{ 839 struct drm_device *dev = encoder->base.dev; 840 struct drm_i915_private *dev_priv = dev->dev_private; 841 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; 842 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 843 enum port port = dp_to_dig_port(intel_dp)->port; 844 struct intel_crtc *intel_crtc = encoder->new_crtc; 845 struct intel_connector *intel_connector = intel_dp->attached_connector; 846 int lane_count, clock; 847 int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd); 848 /* Conveniently, the link BW constants become indices with a shift...*/ 849 int max_clock = intel_dp_max_link_bw(intel_dp) >> 3; 850 int bpp, mode_rate; 851 static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 }; 852 int link_avail, link_clock; 853 854 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A) 855 pipe_config->has_pch_encoder = true; 856 857 pipe_config->has_dp_encoder = true; 858 859 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 860 intel_fixed_panel_mode(intel_connector->panel.fixed_mode, 861 adjusted_mode); 862 if (!HAS_PCH_SPLIT(dev)) 863 intel_gmch_panel_fitting(intel_crtc, pipe_config, 864 intel_connector->panel.fitting_mode); 865 else 866 intel_pch_panel_fitting(intel_crtc, pipe_config, 867 intel_connector->panel.fitting_mode); 868 } 869 870 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 871 return false; 872 873 DRM_DEBUG_KMS("DP link computation with max lane count %i " 874 "max bw %02x pixel clock %iKHz\n", 875 max_lane_count, bws[max_clock], 876 adjusted_mode->crtc_clock); 877 878 /* Walk through all bpp values. Luckily they're all nicely spaced with 2 879 * bpc in between. */ 880 bpp = pipe_config->pipe_bpp; 881 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp && 882 dev_priv->vbt.edp_bpp < bpp) { 883 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n", 884 dev_priv->vbt.edp_bpp); 885 bpp = dev_priv->vbt.edp_bpp; 886 } 887 888 for (; bpp >= 6*3; bpp -= 2*3) { 889 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, 890 bpp); 891 892 for (clock = 0; clock <= max_clock; clock++) { 893 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { 894 link_clock = drm_dp_bw_code_to_link_rate(bws[clock]); 895 link_avail = intel_dp_max_data_rate(link_clock, 896 lane_count); 897 898 if (mode_rate <= link_avail) { 899 goto found; 900 } 901 } 902 } 903 } 904 905 return false; 906 907found: 908 if (intel_dp->color_range_auto) { 909 /* 910 * See: 911 * CEA-861-E - 5.1 Default Encoding Parameters 912 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry 913 */ 914 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1) 915 intel_dp->color_range = DP_COLOR_RANGE_16_235; 916 else 917 intel_dp->color_range = 0; 918 } 919 920 if (intel_dp->color_range) 921 pipe_config->limited_color_range = true; 922 923 intel_dp->link_bw = bws[clock]; 924 intel_dp->lane_count = lane_count; 925 pipe_config->pipe_bpp = bpp; 926 pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw); 927 928 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n", 929 intel_dp->link_bw, intel_dp->lane_count, 930 pipe_config->port_clock, bpp); 931 DRM_DEBUG_KMS("DP link bw required %i available %i\n", 932 mode_rate, link_avail); 933 934 intel_link_compute_m_n(bpp, lane_count, 935 adjusted_mode->crtc_clock, 936 pipe_config->port_clock, 937 &pipe_config->dp_m_n); 938 939 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw); 940 941 return true; 942} 943 944static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp) 945{ 946 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 947 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); 948 struct drm_device *dev = crtc->base.dev; 949 struct drm_i915_private *dev_priv = dev->dev_private; 950 u32 dpa_ctl; 951 952 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", crtc->config.port_clock); 953 dpa_ctl = I915_READ(DP_A); 954 dpa_ctl &= ~DP_PLL_FREQ_MASK; 955 956 if (crtc->config.port_clock == 162000) { 957 /* For a long time we've carried around a ILK-DevA w/a for the 958 * 160MHz clock. If we're really unlucky, it's still required. 959 */ 960 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n"); 961 dpa_ctl |= DP_PLL_FREQ_160MHZ; 962 intel_dp->DP |= DP_PLL_FREQ_160MHZ; 963 } else { 964 dpa_ctl |= DP_PLL_FREQ_270MHZ; 965 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 966 } 967 968 I915_WRITE(DP_A, dpa_ctl); 969 970 POSTING_READ(DP_A); 971 udelay(500); 972} 973 974static void intel_dp_mode_set(struct intel_encoder *encoder) 975{ 976 struct drm_device *dev = encoder->base.dev; 977 struct drm_i915_private *dev_priv = dev->dev_private; 978 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 979 enum port port = dp_to_dig_port(intel_dp)->port; 980 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 981 struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode; 982 983 /* 984 * There are four kinds of DP registers: 985 * 986 * IBX PCH 987 * SNB CPU 988 * IVB CPU 989 * CPT PCH 990 * 991 * IBX PCH and CPU are the same for almost everything, 992 * except that the CPU DP PLL is configured in this 993 * register 994 * 995 * CPT PCH is quite different, having many bits moved 996 * to the TRANS_DP_CTL register instead. That 997 * configuration happens (oddly) in ironlake_pch_enable 998 */ 999 1000 /* Preserve the BIOS-computed detected bit. This is 1001 * supposed to be read-only. 1002 */ 1003 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED; 1004 1005 /* Handle DP bits in common between all three register formats */ 1006 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 1007 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count); 1008 1009 if (intel_dp->has_audio) { 1010 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n", 1011 pipe_name(crtc->pipe)); 1012 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 1013 intel_write_eld(&encoder->base, adjusted_mode); 1014 } 1015 1016 /* Split out the IBX/CPU vs CPT settings */ 1017 1018 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) { 1019 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 1020 intel_dp->DP |= DP_SYNC_HS_HIGH; 1021 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 1022 intel_dp->DP |= DP_SYNC_VS_HIGH; 1023 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 1024 1025 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 1026 intel_dp->DP |= DP_ENHANCED_FRAMING; 1027 1028 intel_dp->DP |= crtc->pipe << 29; 1029 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) { 1030 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev)) 1031 intel_dp->DP |= intel_dp->color_range; 1032 1033 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 1034 intel_dp->DP |= DP_SYNC_HS_HIGH; 1035 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 1036 intel_dp->DP |= DP_SYNC_VS_HIGH; 1037 intel_dp->DP |= DP_LINK_TRAIN_OFF; 1038 1039 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 1040 intel_dp->DP |= DP_ENHANCED_FRAMING; 1041 1042 if (crtc->pipe == 1) 1043 intel_dp->DP |= DP_PIPEB_SELECT; 1044 } else { 1045 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 1046 } 1047 1048 if (port == PORT_A && !IS_VALLEYVIEW(dev)) 1049 ironlake_set_pll_cpu_edp(intel_dp); 1050} 1051 1052#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 1053#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) 1054 1055#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0) 1056#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0) 1057 1058#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) 1059#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 1060 1061static void wait_panel_status(struct intel_dp *intel_dp, 1062 u32 mask, 1063 u32 value) 1064{ 1065 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1066 struct drm_i915_private *dev_priv = dev->dev_private; 1067 u32 pp_stat_reg, pp_ctrl_reg; 1068 1069 pp_stat_reg = _pp_stat_reg(intel_dp); 1070 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 1071 1072 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n", 1073 mask, value, 1074 I915_READ(pp_stat_reg), 1075 I915_READ(pp_ctrl_reg)); 1076 1077 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) { 1078 DRM_ERROR("Panel status timeout: status %08x control %08x\n", 1079 I915_READ(pp_stat_reg), 1080 I915_READ(pp_ctrl_reg)); 1081 } 1082 1083 DRM_DEBUG_KMS("Wait complete\n"); 1084} 1085 1086static void wait_panel_on(struct intel_dp *intel_dp) 1087{ 1088 DRM_DEBUG_KMS("Wait for panel power on\n"); 1089 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); 1090} 1091 1092static void wait_panel_off(struct intel_dp *intel_dp) 1093{ 1094 DRM_DEBUG_KMS("Wait for panel power off time\n"); 1095 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); 1096} 1097 1098static void wait_panel_power_cycle(struct intel_dp *intel_dp) 1099{ 1100 DRM_DEBUG_KMS("Wait for panel power cycle\n"); 1101 1102 /* When we disable the VDD override bit last we have to do the manual 1103 * wait. */ 1104 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle, 1105 intel_dp->panel_power_cycle_delay); 1106 1107 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); 1108} 1109 1110static void wait_backlight_on(struct intel_dp *intel_dp) 1111{ 1112 wait_remaining_ms_from_jiffies(intel_dp->last_power_on, 1113 intel_dp->backlight_on_delay); 1114} 1115 1116static void edp_wait_backlight_off(struct intel_dp *intel_dp) 1117{ 1118 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off, 1119 intel_dp->backlight_off_delay); 1120} 1121 1122/* Read the current pp_control value, unlocking the register if it 1123 * is locked 1124 */ 1125 1126static u32 ironlake_get_pp_control(struct intel_dp *intel_dp) 1127{ 1128 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1129 struct drm_i915_private *dev_priv = dev->dev_private; 1130 u32 control; 1131 1132 control = I915_READ(_pp_ctrl_reg(intel_dp)); 1133 control &= ~PANEL_UNLOCK_MASK; 1134 control |= PANEL_UNLOCK_REGS; 1135 return control; 1136} 1137 1138static void edp_panel_vdd_on(struct intel_dp *intel_dp) 1139{ 1140 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1141 struct drm_i915_private *dev_priv = dev->dev_private; 1142 u32 pp; 1143 u32 pp_stat_reg, pp_ctrl_reg; 1144 1145 if (!is_edp(intel_dp)) 1146 return; 1147 1148 WARN(intel_dp->want_panel_vdd, 1149 "eDP VDD already requested on\n"); 1150 1151 intel_dp->want_panel_vdd = true; 1152 1153 if (edp_have_panel_vdd(intel_dp)) 1154 return; 1155 1156 intel_runtime_pm_get(dev_priv); 1157 1158 DRM_DEBUG_KMS("Turning eDP VDD on\n"); 1159 1160 if (!edp_have_panel_power(intel_dp)) 1161 wait_panel_power_cycle(intel_dp); 1162 1163 pp = ironlake_get_pp_control(intel_dp); 1164 pp |= EDP_FORCE_VDD; 1165 1166 pp_stat_reg = _pp_stat_reg(intel_dp); 1167 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 1168 1169 I915_WRITE(pp_ctrl_reg, pp); 1170 POSTING_READ(pp_ctrl_reg); 1171 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 1172 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); 1173 /* 1174 * If the panel wasn't on, delay before accessing aux channel 1175 */ 1176 if (!edp_have_panel_power(intel_dp)) { 1177 DRM_DEBUG_KMS("eDP was not running\n"); 1178 msleep(intel_dp->panel_power_up_delay); 1179 } 1180} 1181 1182static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) 1183{ 1184 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1185 struct drm_i915_private *dev_priv = dev->dev_private; 1186 u32 pp; 1187 u32 pp_stat_reg, pp_ctrl_reg; 1188 1189 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 1190 1191 if (!intel_dp->want_panel_vdd && edp_have_panel_vdd(intel_dp)) { 1192 DRM_DEBUG_KMS("Turning eDP VDD off\n"); 1193 1194 pp = ironlake_get_pp_control(intel_dp); 1195 pp &= ~EDP_FORCE_VDD; 1196 1197 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 1198 pp_stat_reg = _pp_stat_reg(intel_dp); 1199 1200 I915_WRITE(pp_ctrl_reg, pp); 1201 POSTING_READ(pp_ctrl_reg); 1202 1203 /* Make sure sequencer is idle before allowing subsequent activity */ 1204 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 1205 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); 1206 1207 if ((pp & POWER_TARGET_ON) == 0) 1208 intel_dp->last_power_cycle = jiffies; 1209 1210 intel_runtime_pm_put(dev_priv); 1211 } 1212} 1213 1214static void edp_panel_vdd_work(struct work_struct *__work) 1215{ 1216 struct intel_dp *intel_dp = container_of(to_delayed_work(__work), 1217 struct intel_dp, panel_vdd_work); 1218 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1219 1220 mutex_lock(&dev->mode_config.mutex); 1221 edp_panel_vdd_off_sync(intel_dp); 1222 mutex_unlock(&dev->mode_config.mutex); 1223} 1224 1225static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) 1226{ 1227 if (!is_edp(intel_dp)) 1228 return; 1229 1230 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on"); 1231 1232 intel_dp->want_panel_vdd = false; 1233 1234 if (sync) { 1235 edp_panel_vdd_off_sync(intel_dp); 1236 } else { 1237 /* 1238 * Queue the timer to fire a long 1239 * time from now (relative to the power down delay) 1240 * to keep the panel power up across a sequence of operations 1241 */ 1242 schedule_delayed_work(&intel_dp->panel_vdd_work, 1243 msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5)); 1244 } 1245} 1246 1247void intel_edp_panel_on(struct intel_dp *intel_dp) 1248{ 1249 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1250 struct drm_i915_private *dev_priv = dev->dev_private; 1251 u32 pp; 1252 u32 pp_ctrl_reg; 1253 1254 if (!is_edp(intel_dp)) 1255 return; 1256 1257 DRM_DEBUG_KMS("Turn eDP power on\n"); 1258 1259 if (edp_have_panel_power(intel_dp)) { 1260 DRM_DEBUG_KMS("eDP power already on\n"); 1261 return; 1262 } 1263 1264 wait_panel_power_cycle(intel_dp); 1265 1266 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 1267 pp = ironlake_get_pp_control(intel_dp); 1268 if (IS_GEN5(dev)) { 1269 /* ILK workaround: disable reset around power sequence */ 1270 pp &= ~PANEL_POWER_RESET; 1271 I915_WRITE(pp_ctrl_reg, pp); 1272 POSTING_READ(pp_ctrl_reg); 1273 } 1274 1275 pp |= POWER_TARGET_ON; 1276 if (!IS_GEN5(dev)) 1277 pp |= PANEL_POWER_RESET; 1278 1279 I915_WRITE(pp_ctrl_reg, pp); 1280 POSTING_READ(pp_ctrl_reg); 1281 1282 wait_panel_on(intel_dp); 1283 intel_dp->last_power_on = jiffies; 1284 1285 if (IS_GEN5(dev)) { 1286 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 1287 I915_WRITE(pp_ctrl_reg, pp); 1288 POSTING_READ(pp_ctrl_reg); 1289 } 1290} 1291 1292void intel_edp_panel_off(struct intel_dp *intel_dp) 1293{ 1294 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1295 struct drm_i915_private *dev_priv = dev->dev_private; 1296 u32 pp; 1297 u32 pp_ctrl_reg; 1298 1299 if (!is_edp(intel_dp)) 1300 return; 1301 1302 DRM_DEBUG_KMS("Turn eDP power off\n"); 1303 1304 edp_wait_backlight_off(intel_dp); 1305 1306 pp = ironlake_get_pp_control(intel_dp); 1307 /* We need to switch off panel power _and_ force vdd, for otherwise some 1308 * panels get very unhappy and cease to work. */ 1309 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE); 1310 1311 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 1312 1313 I915_WRITE(pp_ctrl_reg, pp); 1314 POSTING_READ(pp_ctrl_reg); 1315 1316 intel_dp->last_power_cycle = jiffies; 1317 wait_panel_off(intel_dp); 1318} 1319 1320void intel_edp_backlight_on(struct intel_dp *intel_dp) 1321{ 1322 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1323 struct drm_device *dev = intel_dig_port->base.base.dev; 1324 struct drm_i915_private *dev_priv = dev->dev_private; 1325 u32 pp; 1326 u32 pp_ctrl_reg; 1327 1328 if (!is_edp(intel_dp)) 1329 return; 1330 1331 DRM_DEBUG_KMS("\n"); 1332 /* 1333 * If we enable the backlight right away following a panel power 1334 * on, we may see slight flicker as the panel syncs with the eDP 1335 * link. So delay a bit to make sure the image is solid before 1336 * allowing it to appear. 1337 */ 1338 wait_backlight_on(intel_dp); 1339 pp = ironlake_get_pp_control(intel_dp); 1340 pp |= EDP_BLC_ENABLE; 1341 1342 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 1343 1344 I915_WRITE(pp_ctrl_reg, pp); 1345 POSTING_READ(pp_ctrl_reg); 1346 1347 intel_panel_enable_backlight(intel_dp->attached_connector); 1348} 1349 1350void intel_edp_backlight_off(struct intel_dp *intel_dp) 1351{ 1352 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1353 struct drm_i915_private *dev_priv = dev->dev_private; 1354 u32 pp; 1355 u32 pp_ctrl_reg; 1356 1357 if (!is_edp(intel_dp)) 1358 return; 1359 1360 intel_panel_disable_backlight(intel_dp->attached_connector); 1361 1362 DRM_DEBUG_KMS("\n"); 1363 pp = ironlake_get_pp_control(intel_dp); 1364 pp &= ~EDP_BLC_ENABLE; 1365 1366 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 1367 1368 I915_WRITE(pp_ctrl_reg, pp); 1369 POSTING_READ(pp_ctrl_reg); 1370 intel_dp->last_backlight_off = jiffies; 1371} 1372 1373static void ironlake_edp_pll_on(struct intel_dp *intel_dp) 1374{ 1375 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1376 struct drm_crtc *crtc = intel_dig_port->base.base.crtc; 1377 struct drm_device *dev = crtc->dev; 1378 struct drm_i915_private *dev_priv = dev->dev_private; 1379 u32 dpa_ctl; 1380 1381 assert_pipe_disabled(dev_priv, 1382 to_intel_crtc(crtc)->pipe); 1383 1384 DRM_DEBUG_KMS("\n"); 1385 dpa_ctl = I915_READ(DP_A); 1386 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n"); 1387 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n"); 1388 1389 /* We don't adjust intel_dp->DP while tearing down the link, to 1390 * facilitate link retraining (e.g. after hotplug). Hence clear all 1391 * enable bits here to ensure that we don't enable too much. */ 1392 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE); 1393 intel_dp->DP |= DP_PLL_ENABLE; 1394 I915_WRITE(DP_A, intel_dp->DP); 1395 POSTING_READ(DP_A); 1396 udelay(200); 1397} 1398 1399static void ironlake_edp_pll_off(struct intel_dp *intel_dp) 1400{ 1401 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1402 struct drm_crtc *crtc = intel_dig_port->base.base.crtc; 1403 struct drm_device *dev = crtc->dev; 1404 struct drm_i915_private *dev_priv = dev->dev_private; 1405 u32 dpa_ctl; 1406 1407 assert_pipe_disabled(dev_priv, 1408 to_intel_crtc(crtc)->pipe); 1409 1410 dpa_ctl = I915_READ(DP_A); 1411 WARN((dpa_ctl & DP_PLL_ENABLE) == 0, 1412 "dp pll off, should be on\n"); 1413 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n"); 1414 1415 /* We can't rely on the value tracked for the DP register in 1416 * intel_dp->DP because link_down must not change that (otherwise link 1417 * re-training will fail. */ 1418 dpa_ctl &= ~DP_PLL_ENABLE; 1419 I915_WRITE(DP_A, dpa_ctl); 1420 POSTING_READ(DP_A); 1421 udelay(200); 1422} 1423 1424/* If the sink supports it, try to set the power state appropriately */ 1425void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) 1426{ 1427 int ret, i; 1428 1429 /* Should have a valid DPCD by this point */ 1430 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 1431 return; 1432 1433 if (mode != DRM_MODE_DPMS_ON) { 1434 ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER, 1435 DP_SET_POWER_D3); 1436 if (ret != 1) 1437 DRM_DEBUG_DRIVER("failed to write sink power state\n"); 1438 } else { 1439 /* 1440 * When turning on, we need to retry for 1ms to give the sink 1441 * time to wake up. 1442 */ 1443 for (i = 0; i < 3; i++) { 1444 ret = intel_dp_aux_native_write_1(intel_dp, 1445 DP_SET_POWER, 1446 DP_SET_POWER_D0); 1447 if (ret == 1) 1448 break; 1449 msleep(1); 1450 } 1451 } 1452} 1453 1454static bool intel_dp_get_hw_state(struct intel_encoder *encoder, 1455 enum pipe *pipe) 1456{ 1457 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1458 enum port port = dp_to_dig_port(intel_dp)->port; 1459 struct drm_device *dev = encoder->base.dev; 1460 struct drm_i915_private *dev_priv = dev->dev_private; 1461 u32 tmp = I915_READ(intel_dp->output_reg); 1462 1463 if (!(tmp & DP_PORT_EN)) 1464 return false; 1465 1466 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) { 1467 *pipe = PORT_TO_PIPE_CPT(tmp); 1468 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) { 1469 *pipe = PORT_TO_PIPE(tmp); 1470 } else { 1471 u32 trans_sel; 1472 u32 trans_dp; 1473 int i; 1474 1475 switch (intel_dp->output_reg) { 1476 case PCH_DP_B: 1477 trans_sel = TRANS_DP_PORT_SEL_B; 1478 break; 1479 case PCH_DP_C: 1480 trans_sel = TRANS_DP_PORT_SEL_C; 1481 break; 1482 case PCH_DP_D: 1483 trans_sel = TRANS_DP_PORT_SEL_D; 1484 break; 1485 default: 1486 return true; 1487 } 1488 1489 for_each_pipe(i) { 1490 trans_dp = I915_READ(TRANS_DP_CTL(i)); 1491 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) { 1492 *pipe = i; 1493 return true; 1494 } 1495 } 1496 1497 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", 1498 intel_dp->output_reg); 1499 } 1500 1501 return true; 1502} 1503 1504static void intel_dp_get_config(struct intel_encoder *encoder, 1505 struct intel_crtc_config *pipe_config) 1506{ 1507 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1508 u32 tmp, flags = 0; 1509 struct drm_device *dev = encoder->base.dev; 1510 struct drm_i915_private *dev_priv = dev->dev_private; 1511 enum port port = dp_to_dig_port(intel_dp)->port; 1512 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 1513 int dotclock; 1514 1515 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) { 1516 tmp = I915_READ(intel_dp->output_reg); 1517 if (tmp & DP_SYNC_HS_HIGH) 1518 flags |= DRM_MODE_FLAG_PHSYNC; 1519 else 1520 flags |= DRM_MODE_FLAG_NHSYNC; 1521 1522 if (tmp & DP_SYNC_VS_HIGH) 1523 flags |= DRM_MODE_FLAG_PVSYNC; 1524 else 1525 flags |= DRM_MODE_FLAG_NVSYNC; 1526 } else { 1527 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe)); 1528 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH) 1529 flags |= DRM_MODE_FLAG_PHSYNC; 1530 else 1531 flags |= DRM_MODE_FLAG_NHSYNC; 1532 1533 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH) 1534 flags |= DRM_MODE_FLAG_PVSYNC; 1535 else 1536 flags |= DRM_MODE_FLAG_NVSYNC; 1537 } 1538 1539 pipe_config->adjusted_mode.flags |= flags; 1540 1541 pipe_config->has_dp_encoder = true; 1542 1543 intel_dp_get_m_n(crtc, pipe_config); 1544 1545 if (port == PORT_A) { 1546 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ) 1547 pipe_config->port_clock = 162000; 1548 else 1549 pipe_config->port_clock = 270000; 1550 } 1551 1552 dotclock = intel_dotclock_calculate(pipe_config->port_clock, 1553 &pipe_config->dp_m_n); 1554 1555 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A) 1556 ironlake_check_encoder_dotclock(pipe_config, dotclock); 1557 1558 pipe_config->adjusted_mode.crtc_clock = dotclock; 1559 1560 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp && 1561 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) { 1562 /* 1563 * This is a big fat ugly hack. 1564 * 1565 * Some machines in UEFI boot mode provide us a VBT that has 18 1566 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons 1567 * unknown we fail to light up. Yet the same BIOS boots up with 1568 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as 1569 * max, not what it tells us to use. 1570 * 1571 * Note: This will still be broken if the eDP panel is not lit 1572 * up by the BIOS, and thus we can't get the mode at module 1573 * load. 1574 */ 1575 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n", 1576 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp); 1577 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp; 1578 } 1579} 1580 1581static bool is_edp_psr(struct drm_device *dev) 1582{ 1583 struct drm_i915_private *dev_priv = dev->dev_private; 1584 1585 return dev_priv->psr.sink_support; 1586} 1587 1588static bool intel_edp_is_psr_enabled(struct drm_device *dev) 1589{ 1590 struct drm_i915_private *dev_priv = dev->dev_private; 1591 1592 if (!HAS_PSR(dev)) 1593 return false; 1594 1595 return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE; 1596} 1597 1598static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp, 1599 struct edp_vsc_psr *vsc_psr) 1600{ 1601 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1602 struct drm_device *dev = dig_port->base.base.dev; 1603 struct drm_i915_private *dev_priv = dev->dev_private; 1604 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); 1605 u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder); 1606 u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder); 1607 uint32_t *data = (uint32_t *) vsc_psr; 1608 unsigned int i; 1609 1610 /* As per BSPec (Pipe Video Data Island Packet), we need to disable 1611 the video DIP being updated before program video DIP data buffer 1612 registers for DIP being updated. */ 1613 I915_WRITE(ctl_reg, 0); 1614 POSTING_READ(ctl_reg); 1615 1616 for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) { 1617 if (i < sizeof(struct edp_vsc_psr)) 1618 I915_WRITE(data_reg + i, *data++); 1619 else 1620 I915_WRITE(data_reg + i, 0); 1621 } 1622 1623 I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW); 1624 POSTING_READ(ctl_reg); 1625} 1626 1627static void intel_edp_psr_setup(struct intel_dp *intel_dp) 1628{ 1629 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1630 struct drm_i915_private *dev_priv = dev->dev_private; 1631 struct edp_vsc_psr psr_vsc; 1632 1633 if (intel_dp->psr_setup_done) 1634 return; 1635 1636 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */ 1637 memset(&psr_vsc, 0, sizeof(psr_vsc)); 1638 psr_vsc.sdp_header.HB0 = 0; 1639 psr_vsc.sdp_header.HB1 = 0x7; 1640 psr_vsc.sdp_header.HB2 = 0x2; 1641 psr_vsc.sdp_header.HB3 = 0x8; 1642 intel_edp_psr_write_vsc(intel_dp, &psr_vsc); 1643 1644 /* Avoid continuous PSR exit by masking memup and hpd */ 1645 I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP | 1646 EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP); 1647 1648 intel_dp->psr_setup_done = true; 1649} 1650 1651static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp) 1652{ 1653 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1654 struct drm_i915_private *dev_priv = dev->dev_private; 1655 uint32_t aux_clock_divider; 1656 int precharge = 0x3; 1657 int msg_size = 5; /* Header(4) + Message(1) */ 1658 1659 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); 1660 1661 /* Enable PSR in sink */ 1662 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) 1663 intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG, 1664 DP_PSR_ENABLE & 1665 ~DP_PSR_MAIN_LINK_ACTIVE); 1666 else 1667 intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG, 1668 DP_PSR_ENABLE | 1669 DP_PSR_MAIN_LINK_ACTIVE); 1670 1671 /* Setup AUX registers */ 1672 I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND); 1673 I915_WRITE(EDP_PSR_AUX_DATA2(dev), EDP_PSR_DPCD_NORMAL_OPERATION); 1674 I915_WRITE(EDP_PSR_AUX_CTL(dev), 1675 DP_AUX_CH_CTL_TIME_OUT_400us | 1676 (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 1677 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 1678 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT)); 1679} 1680 1681static void intel_edp_psr_enable_source(struct intel_dp *intel_dp) 1682{ 1683 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1684 struct drm_i915_private *dev_priv = dev->dev_private; 1685 uint32_t max_sleep_time = 0x1f; 1686 uint32_t idle_frames = 1; 1687 uint32_t val = 0x0; 1688 const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; 1689 1690 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) { 1691 val |= EDP_PSR_LINK_STANDBY; 1692 val |= EDP_PSR_TP2_TP3_TIME_0us; 1693 val |= EDP_PSR_TP1_TIME_0us; 1694 val |= EDP_PSR_SKIP_AUX_EXIT; 1695 } else 1696 val |= EDP_PSR_LINK_DISABLE; 1697 1698 I915_WRITE(EDP_PSR_CTL(dev), val | 1699 IS_BROADWELL(dev) ? 0 : link_entry_time | 1700 max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT | 1701 idle_frames << EDP_PSR_IDLE_FRAME_SHIFT | 1702 EDP_PSR_ENABLE); 1703} 1704 1705static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp) 1706{ 1707 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1708 struct drm_device *dev = dig_port->base.base.dev; 1709 struct drm_i915_private *dev_priv = dev->dev_private; 1710 struct drm_crtc *crtc = dig_port->base.base.crtc; 1711 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1712 struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->fb)->obj; 1713 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; 1714 1715 dev_priv->psr.source_ok = false; 1716 1717 if (!HAS_PSR(dev)) { 1718 DRM_DEBUG_KMS("PSR not supported on this platform\n"); 1719 return false; 1720 } 1721 1722 if ((intel_encoder->type != INTEL_OUTPUT_EDP) || 1723 (dig_port->port != PORT_A)) { 1724 DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n"); 1725 return false; 1726 } 1727 1728 if (!i915.enable_psr) { 1729 DRM_DEBUG_KMS("PSR disable by flag\n"); 1730 return false; 1731 } 1732 1733 crtc = dig_port->base.base.crtc; 1734 if (crtc == NULL) { 1735 DRM_DEBUG_KMS("crtc not active for PSR\n"); 1736 return false; 1737 } 1738 1739 intel_crtc = to_intel_crtc(crtc); 1740 if (!intel_crtc_active(crtc)) { 1741 DRM_DEBUG_KMS("crtc not active for PSR\n"); 1742 return false; 1743 } 1744 1745 obj = to_intel_framebuffer(crtc->fb)->obj; 1746 if (obj->tiling_mode != I915_TILING_X || 1747 obj->fence_reg == I915_FENCE_REG_NONE) { 1748 DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n"); 1749 return false; 1750 } 1751 1752 if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) { 1753 DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n"); 1754 return false; 1755 } 1756 1757 if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) & 1758 S3D_ENABLE) { 1759 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n"); 1760 return false; 1761 } 1762 1763 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 1764 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n"); 1765 return false; 1766 } 1767 1768 dev_priv->psr.source_ok = true; 1769 return true; 1770} 1771 1772static void intel_edp_psr_do_enable(struct intel_dp *intel_dp) 1773{ 1774 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1775 1776 if (!intel_edp_psr_match_conditions(intel_dp) || 1777 intel_edp_is_psr_enabled(dev)) 1778 return; 1779 1780 /* Setup PSR once */ 1781 intel_edp_psr_setup(intel_dp); 1782 1783 /* Enable PSR on the panel */ 1784 intel_edp_psr_enable_sink(intel_dp); 1785 1786 /* Enable PSR on the host */ 1787 intel_edp_psr_enable_source(intel_dp); 1788} 1789 1790void intel_edp_psr_enable(struct intel_dp *intel_dp) 1791{ 1792 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1793 1794 if (intel_edp_psr_match_conditions(intel_dp) && 1795 !intel_edp_is_psr_enabled(dev)) 1796 intel_edp_psr_do_enable(intel_dp); 1797} 1798 1799void intel_edp_psr_disable(struct intel_dp *intel_dp) 1800{ 1801 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1802 struct drm_i915_private *dev_priv = dev->dev_private; 1803 1804 if (!intel_edp_is_psr_enabled(dev)) 1805 return; 1806 1807 I915_WRITE(EDP_PSR_CTL(dev), 1808 I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE); 1809 1810 /* Wait till PSR is idle */ 1811 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) & 1812 EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10)) 1813 DRM_ERROR("Timed out waiting for PSR Idle State\n"); 1814} 1815 1816void intel_edp_psr_update(struct drm_device *dev) 1817{ 1818 struct intel_encoder *encoder; 1819 struct intel_dp *intel_dp = NULL; 1820 1821 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) 1822 if (encoder->type == INTEL_OUTPUT_EDP) { 1823 intel_dp = enc_to_intel_dp(&encoder->base); 1824 1825 if (!is_edp_psr(dev)) 1826 return; 1827 1828 if (!intel_edp_psr_match_conditions(intel_dp)) 1829 intel_edp_psr_disable(intel_dp); 1830 else 1831 if (!intel_edp_is_psr_enabled(dev)) 1832 intel_edp_psr_do_enable(intel_dp); 1833 } 1834} 1835 1836static void intel_disable_dp(struct intel_encoder *encoder) 1837{ 1838 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1839 enum port port = dp_to_dig_port(intel_dp)->port; 1840 struct drm_device *dev = encoder->base.dev; 1841 1842 /* Make sure the panel is off before trying to change the mode. But also 1843 * ensure that we have vdd while we switch off the panel. */ 1844 intel_edp_backlight_off(intel_dp); 1845 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); 1846 intel_edp_panel_off(intel_dp); 1847 1848 /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */ 1849 if (!(port == PORT_A || IS_VALLEYVIEW(dev))) 1850 intel_dp_link_down(intel_dp); 1851} 1852 1853static void intel_post_disable_dp(struct intel_encoder *encoder) 1854{ 1855 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1856 enum port port = dp_to_dig_port(intel_dp)->port; 1857 struct drm_device *dev = encoder->base.dev; 1858 1859 if (port == PORT_A || IS_VALLEYVIEW(dev)) { 1860 intel_dp_link_down(intel_dp); 1861 if (!IS_VALLEYVIEW(dev)) 1862 ironlake_edp_pll_off(intel_dp); 1863 } 1864} 1865 1866static void intel_enable_dp(struct intel_encoder *encoder) 1867{ 1868 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1869 struct drm_device *dev = encoder->base.dev; 1870 struct drm_i915_private *dev_priv = dev->dev_private; 1871 uint32_t dp_reg = I915_READ(intel_dp->output_reg); 1872 1873 if (WARN_ON(dp_reg & DP_PORT_EN)) 1874 return; 1875 1876 edp_panel_vdd_on(intel_dp); 1877 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1878 intel_dp_start_link_train(intel_dp); 1879 intel_edp_panel_on(intel_dp); 1880 edp_panel_vdd_off(intel_dp, true); 1881 intel_dp_complete_link_train(intel_dp); 1882 intel_dp_stop_link_train(intel_dp); 1883} 1884 1885static void g4x_enable_dp(struct intel_encoder *encoder) 1886{ 1887 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1888 1889 intel_enable_dp(encoder); 1890 intel_edp_backlight_on(intel_dp); 1891} 1892 1893static void vlv_enable_dp(struct intel_encoder *encoder) 1894{ 1895 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1896 1897 intel_edp_backlight_on(intel_dp); 1898} 1899 1900static void g4x_pre_enable_dp(struct intel_encoder *encoder) 1901{ 1902 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1903 struct intel_digital_port *dport = dp_to_dig_port(intel_dp); 1904 1905 if (dport->port == PORT_A) 1906 ironlake_edp_pll_on(intel_dp); 1907} 1908 1909static void vlv_pre_enable_dp(struct intel_encoder *encoder) 1910{ 1911 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1912 struct intel_digital_port *dport = dp_to_dig_port(intel_dp); 1913 struct drm_device *dev = encoder->base.dev; 1914 struct drm_i915_private *dev_priv = dev->dev_private; 1915 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 1916 enum dpio_channel port = vlv_dport_to_channel(dport); 1917 int pipe = intel_crtc->pipe; 1918 struct edp_power_seq power_seq; 1919 u32 val; 1920 1921 mutex_lock(&dev_priv->dpio_lock); 1922 1923 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port)); 1924 val = 0; 1925 if (pipe) 1926 val |= (1<<21); 1927 else 1928 val &= ~(1<<21); 1929 val |= 0x001000c4; 1930 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val); 1931 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018); 1932 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888); 1933 1934 mutex_unlock(&dev_priv->dpio_lock); 1935 1936 /* init power sequencer on this pipe and port */ 1937 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); 1938 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, 1939 &power_seq); 1940 1941 intel_enable_dp(encoder); 1942 1943 vlv_wait_port_ready(dev_priv, dport); 1944} 1945 1946static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder) 1947{ 1948 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); 1949 struct drm_device *dev = encoder->base.dev; 1950 struct drm_i915_private *dev_priv = dev->dev_private; 1951 struct intel_crtc *intel_crtc = 1952 to_intel_crtc(encoder->base.crtc); 1953 enum dpio_channel port = vlv_dport_to_channel(dport); 1954 int pipe = intel_crtc->pipe; 1955 1956 /* Program Tx lane resets to default */ 1957 mutex_lock(&dev_priv->dpio_lock); 1958 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 1959 DPIO_PCS_TX_LANE2_RESET | 1960 DPIO_PCS_TX_LANE1_RESET); 1961 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 1962 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN | 1963 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN | 1964 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) | 1965 DPIO_PCS_CLK_SOFT_RESET); 1966 1967 /* Fix up inter-pair skew failure */ 1968 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00); 1969 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500); 1970 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000); 1971 mutex_unlock(&dev_priv->dpio_lock); 1972} 1973 1974/* 1975 * Native read with retry for link status and receiver capability reads for 1976 * cases where the sink may still be asleep. 1977 */ 1978static bool 1979intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address, 1980 uint8_t *recv, int recv_bytes) 1981{ 1982 int ret, i; 1983 1984 /* 1985 * Sinks are *supposed* to come up within 1ms from an off state, 1986 * but we're also supposed to retry 3 times per the spec. 1987 */ 1988 for (i = 0; i < 3; i++) { 1989 ret = intel_dp_aux_native_read(intel_dp, address, recv, 1990 recv_bytes); 1991 if (ret == recv_bytes) 1992 return true; 1993 msleep(1); 1994 } 1995 1996 return false; 1997} 1998 1999/* 2000 * Fetch AUX CH registers 0x202 - 0x207 which contain 2001 * link status information 2002 */ 2003static bool 2004intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 2005{ 2006 return intel_dp_aux_native_read_retry(intel_dp, 2007 DP_LANE0_1_STATUS, 2008 link_status, 2009 DP_LINK_STATUS_SIZE); 2010} 2011 2012/* 2013 * These are source-specific values; current Intel hardware supports 2014 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB 2015 */ 2016 2017static uint8_t 2018intel_dp_voltage_max(struct intel_dp *intel_dp) 2019{ 2020 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2021 enum port port = dp_to_dig_port(intel_dp)->port; 2022 2023 if (IS_VALLEYVIEW(dev) || IS_BROADWELL(dev)) 2024 return DP_TRAIN_VOLTAGE_SWING_1200; 2025 else if (IS_GEN7(dev) && port == PORT_A) 2026 return DP_TRAIN_VOLTAGE_SWING_800; 2027 else if (HAS_PCH_CPT(dev) && port != PORT_A) 2028 return DP_TRAIN_VOLTAGE_SWING_1200; 2029 else 2030 return DP_TRAIN_VOLTAGE_SWING_800; 2031} 2032 2033static uint8_t 2034intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) 2035{ 2036 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2037 enum port port = dp_to_dig_port(intel_dp)->port; 2038 2039 if (IS_BROADWELL(dev)) { 2040 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 2041 case DP_TRAIN_VOLTAGE_SWING_400: 2042 case DP_TRAIN_VOLTAGE_SWING_600: 2043 return DP_TRAIN_PRE_EMPHASIS_6; 2044 case DP_TRAIN_VOLTAGE_SWING_800: 2045 return DP_TRAIN_PRE_EMPHASIS_3_5; 2046 case DP_TRAIN_VOLTAGE_SWING_1200: 2047 default: 2048 return DP_TRAIN_PRE_EMPHASIS_0; 2049 } 2050 } else if (IS_HASWELL(dev)) { 2051 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 2052 case DP_TRAIN_VOLTAGE_SWING_400: 2053 return DP_TRAIN_PRE_EMPHASIS_9_5; 2054 case DP_TRAIN_VOLTAGE_SWING_600: 2055 return DP_TRAIN_PRE_EMPHASIS_6; 2056 case DP_TRAIN_VOLTAGE_SWING_800: 2057 return DP_TRAIN_PRE_EMPHASIS_3_5; 2058 case DP_TRAIN_VOLTAGE_SWING_1200: 2059 default: 2060 return DP_TRAIN_PRE_EMPHASIS_0; 2061 } 2062 } else if (IS_VALLEYVIEW(dev)) { 2063 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 2064 case DP_TRAIN_VOLTAGE_SWING_400: 2065 return DP_TRAIN_PRE_EMPHASIS_9_5; 2066 case DP_TRAIN_VOLTAGE_SWING_600: 2067 return DP_TRAIN_PRE_EMPHASIS_6; 2068 case DP_TRAIN_VOLTAGE_SWING_800: 2069 return DP_TRAIN_PRE_EMPHASIS_3_5; 2070 case DP_TRAIN_VOLTAGE_SWING_1200: 2071 default: 2072 return DP_TRAIN_PRE_EMPHASIS_0; 2073 } 2074 } else if (IS_GEN7(dev) && port == PORT_A) { 2075 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 2076 case DP_TRAIN_VOLTAGE_SWING_400: 2077 return DP_TRAIN_PRE_EMPHASIS_6; 2078 case DP_TRAIN_VOLTAGE_SWING_600: 2079 case DP_TRAIN_VOLTAGE_SWING_800: 2080 return DP_TRAIN_PRE_EMPHASIS_3_5; 2081 default: 2082 return DP_TRAIN_PRE_EMPHASIS_0; 2083 } 2084 } else { 2085 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 2086 case DP_TRAIN_VOLTAGE_SWING_400: 2087 return DP_TRAIN_PRE_EMPHASIS_6; 2088 case DP_TRAIN_VOLTAGE_SWING_600: 2089 return DP_TRAIN_PRE_EMPHASIS_6; 2090 case DP_TRAIN_VOLTAGE_SWING_800: 2091 return DP_TRAIN_PRE_EMPHASIS_3_5; 2092 case DP_TRAIN_VOLTAGE_SWING_1200: 2093 default: 2094 return DP_TRAIN_PRE_EMPHASIS_0; 2095 } 2096 } 2097} 2098 2099static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp) 2100{ 2101 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2102 struct drm_i915_private *dev_priv = dev->dev_private; 2103 struct intel_digital_port *dport = dp_to_dig_port(intel_dp); 2104 struct intel_crtc *intel_crtc = 2105 to_intel_crtc(dport->base.base.crtc); 2106 unsigned long demph_reg_value, preemph_reg_value, 2107 uniqtranscale_reg_value; 2108 uint8_t train_set = intel_dp->train_set[0]; 2109 enum dpio_channel port = vlv_dport_to_channel(dport); 2110 int pipe = intel_crtc->pipe; 2111 2112 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 2113 case DP_TRAIN_PRE_EMPHASIS_0: 2114 preemph_reg_value = 0x0004000; 2115 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 2116 case DP_TRAIN_VOLTAGE_SWING_400: 2117 demph_reg_value = 0x2B405555; 2118 uniqtranscale_reg_value = 0x552AB83A; 2119 break; 2120 case DP_TRAIN_VOLTAGE_SWING_600: 2121 demph_reg_value = 0x2B404040; 2122 uniqtranscale_reg_value = 0x5548B83A; 2123 break; 2124 case DP_TRAIN_VOLTAGE_SWING_800: 2125 demph_reg_value = 0x2B245555; 2126 uniqtranscale_reg_value = 0x5560B83A; 2127 break; 2128 case DP_TRAIN_VOLTAGE_SWING_1200: 2129 demph_reg_value = 0x2B405555; 2130 uniqtranscale_reg_value = 0x5598DA3A; 2131 break; 2132 default: 2133 return 0; 2134 } 2135 break; 2136 case DP_TRAIN_PRE_EMPHASIS_3_5: 2137 preemph_reg_value = 0x0002000; 2138 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 2139 case DP_TRAIN_VOLTAGE_SWING_400: 2140 demph_reg_value = 0x2B404040; 2141 uniqtranscale_reg_value = 0x5552B83A; 2142 break; 2143 case DP_TRAIN_VOLTAGE_SWING_600: 2144 demph_reg_value = 0x2B404848; 2145 uniqtranscale_reg_value = 0x5580B83A; 2146 break; 2147 case DP_TRAIN_VOLTAGE_SWING_800: 2148 demph_reg_value = 0x2B404040; 2149 uniqtranscale_reg_value = 0x55ADDA3A; 2150 break; 2151 default: 2152 return 0; 2153 } 2154 break; 2155 case DP_TRAIN_PRE_EMPHASIS_6: 2156 preemph_reg_value = 0x0000000; 2157 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 2158 case DP_TRAIN_VOLTAGE_SWING_400: 2159 demph_reg_value = 0x2B305555; 2160 uniqtranscale_reg_value = 0x5570B83A; 2161 break; 2162 case DP_TRAIN_VOLTAGE_SWING_600: 2163 demph_reg_value = 0x2B2B4040; 2164 uniqtranscale_reg_value = 0x55ADDA3A; 2165 break; 2166 default: 2167 return 0; 2168 } 2169 break; 2170 case DP_TRAIN_PRE_EMPHASIS_9_5: 2171 preemph_reg_value = 0x0006000; 2172 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 2173 case DP_TRAIN_VOLTAGE_SWING_400: 2174 demph_reg_value = 0x1B405555; 2175 uniqtranscale_reg_value = 0x55ADDA3A; 2176 break; 2177 default: 2178 return 0; 2179 } 2180 break; 2181 default: 2182 return 0; 2183 } 2184 2185 mutex_lock(&dev_priv->dpio_lock); 2186 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000); 2187 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value); 2188 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port), 2189 uniqtranscale_reg_value); 2190 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040); 2191 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000); 2192 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value); 2193 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000); 2194 mutex_unlock(&dev_priv->dpio_lock); 2195 2196 return 0; 2197} 2198 2199static void 2200intel_get_adjust_train(struct intel_dp *intel_dp, 2201 const uint8_t link_status[DP_LINK_STATUS_SIZE]) 2202{ 2203 uint8_t v = 0; 2204 uint8_t p = 0; 2205 int lane; 2206 uint8_t voltage_max; 2207 uint8_t preemph_max; 2208 2209 for (lane = 0; lane < intel_dp->lane_count; lane++) { 2210 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane); 2211 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); 2212 2213 if (this_v > v) 2214 v = this_v; 2215 if (this_p > p) 2216 p = this_p; 2217 } 2218 2219 voltage_max = intel_dp_voltage_max(intel_dp); 2220 if (v >= voltage_max) 2221 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; 2222 2223 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v); 2224 if (p >= preemph_max) 2225 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; 2226 2227 for (lane = 0; lane < 4; lane++) 2228 intel_dp->train_set[lane] = v | p; 2229} 2230 2231static uint32_t 2232intel_gen4_signal_levels(uint8_t train_set) 2233{ 2234 uint32_t signal_levels = 0; 2235 2236 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 2237 case DP_TRAIN_VOLTAGE_SWING_400: 2238 default: 2239 signal_levels |= DP_VOLTAGE_0_4; 2240 break; 2241 case DP_TRAIN_VOLTAGE_SWING_600: 2242 signal_levels |= DP_VOLTAGE_0_6; 2243 break; 2244 case DP_TRAIN_VOLTAGE_SWING_800: 2245 signal_levels |= DP_VOLTAGE_0_8; 2246 break; 2247 case DP_TRAIN_VOLTAGE_SWING_1200: 2248 signal_levels |= DP_VOLTAGE_1_2; 2249 break; 2250 } 2251 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 2252 case DP_TRAIN_PRE_EMPHASIS_0: 2253 default: 2254 signal_levels |= DP_PRE_EMPHASIS_0; 2255 break; 2256 case DP_TRAIN_PRE_EMPHASIS_3_5: 2257 signal_levels |= DP_PRE_EMPHASIS_3_5; 2258 break; 2259 case DP_TRAIN_PRE_EMPHASIS_6: 2260 signal_levels |= DP_PRE_EMPHASIS_6; 2261 break; 2262 case DP_TRAIN_PRE_EMPHASIS_9_5: 2263 signal_levels |= DP_PRE_EMPHASIS_9_5; 2264 break; 2265 } 2266 return signal_levels; 2267} 2268 2269/* Gen6's DP voltage swing and pre-emphasis control */ 2270static uint32_t 2271intel_gen6_edp_signal_levels(uint8_t train_set) 2272{ 2273 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 2274 DP_TRAIN_PRE_EMPHASIS_MASK); 2275 switch (signal_levels) { 2276 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 2277 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 2278 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 2279 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 2280 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B; 2281 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 2282 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: 2283 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B; 2284 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 2285 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 2286 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B; 2287 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 2288 case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0: 2289 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B; 2290 default: 2291 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 2292 "0x%x\n", signal_levels); 2293 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 2294 } 2295} 2296 2297/* Gen7's DP voltage swing and pre-emphasis control */ 2298static uint32_t 2299intel_gen7_edp_signal_levels(uint8_t train_set) 2300{ 2301 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 2302 DP_TRAIN_PRE_EMPHASIS_MASK); 2303 switch (signal_levels) { 2304 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 2305 return EDP_LINK_TRAIN_400MV_0DB_IVB; 2306 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 2307 return EDP_LINK_TRAIN_400MV_3_5DB_IVB; 2308 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 2309 return EDP_LINK_TRAIN_400MV_6DB_IVB; 2310 2311 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 2312 return EDP_LINK_TRAIN_600MV_0DB_IVB; 2313 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 2314 return EDP_LINK_TRAIN_600MV_3_5DB_IVB; 2315 2316 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 2317 return EDP_LINK_TRAIN_800MV_0DB_IVB; 2318 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 2319 return EDP_LINK_TRAIN_800MV_3_5DB_IVB; 2320 2321 default: 2322 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 2323 "0x%x\n", signal_levels); 2324 return EDP_LINK_TRAIN_500MV_0DB_IVB; 2325 } 2326} 2327 2328/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */ 2329static uint32_t 2330intel_hsw_signal_levels(uint8_t train_set) 2331{ 2332 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 2333 DP_TRAIN_PRE_EMPHASIS_MASK); 2334 switch (signal_levels) { 2335 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 2336 return DDI_BUF_EMP_400MV_0DB_HSW; 2337 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 2338 return DDI_BUF_EMP_400MV_3_5DB_HSW; 2339 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 2340 return DDI_BUF_EMP_400MV_6DB_HSW; 2341 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5: 2342 return DDI_BUF_EMP_400MV_9_5DB_HSW; 2343 2344 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 2345 return DDI_BUF_EMP_600MV_0DB_HSW; 2346 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 2347 return DDI_BUF_EMP_600MV_3_5DB_HSW; 2348 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: 2349 return DDI_BUF_EMP_600MV_6DB_HSW; 2350 2351 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 2352 return DDI_BUF_EMP_800MV_0DB_HSW; 2353 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 2354 return DDI_BUF_EMP_800MV_3_5DB_HSW; 2355 default: 2356 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 2357 "0x%x\n", signal_levels); 2358 return DDI_BUF_EMP_400MV_0DB_HSW; 2359 } 2360} 2361 2362static uint32_t 2363intel_bdw_signal_levels(uint8_t train_set) 2364{ 2365 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 2366 DP_TRAIN_PRE_EMPHASIS_MASK); 2367 switch (signal_levels) { 2368 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 2369 return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */ 2370 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 2371 return DDI_BUF_EMP_400MV_3_5DB_BDW; /* Sel1 */ 2372 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 2373 return DDI_BUF_EMP_400MV_6DB_BDW; /* Sel2 */ 2374 2375 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 2376 return DDI_BUF_EMP_600MV_0DB_BDW; /* Sel3 */ 2377 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 2378 return DDI_BUF_EMP_600MV_3_5DB_BDW; /* Sel4 */ 2379 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: 2380 return DDI_BUF_EMP_600MV_6DB_BDW; /* Sel5 */ 2381 2382 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 2383 return DDI_BUF_EMP_800MV_0DB_BDW; /* Sel6 */ 2384 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 2385 return DDI_BUF_EMP_800MV_3_5DB_BDW; /* Sel7 */ 2386 2387 case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0: 2388 return DDI_BUF_EMP_1200MV_0DB_BDW; /* Sel8 */ 2389 2390 default: 2391 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 2392 "0x%x\n", signal_levels); 2393 return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */ 2394 } 2395} 2396 2397/* Properly updates "DP" with the correct signal levels. */ 2398static void 2399intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP) 2400{ 2401 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2402 enum port port = intel_dig_port->port; 2403 struct drm_device *dev = intel_dig_port->base.base.dev; 2404 uint32_t signal_levels, mask; 2405 uint8_t train_set = intel_dp->train_set[0]; 2406 2407 if (IS_BROADWELL(dev)) { 2408 signal_levels = intel_bdw_signal_levels(train_set); 2409 mask = DDI_BUF_EMP_MASK; 2410 } else if (IS_HASWELL(dev)) { 2411 signal_levels = intel_hsw_signal_levels(train_set); 2412 mask = DDI_BUF_EMP_MASK; 2413 } else if (IS_VALLEYVIEW(dev)) { 2414 signal_levels = intel_vlv_signal_levels(intel_dp); 2415 mask = 0; 2416 } else if (IS_GEN7(dev) && port == PORT_A) { 2417 signal_levels = intel_gen7_edp_signal_levels(train_set); 2418 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB; 2419 } else if (IS_GEN6(dev) && port == PORT_A) { 2420 signal_levels = intel_gen6_edp_signal_levels(train_set); 2421 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB; 2422 } else { 2423 signal_levels = intel_gen4_signal_levels(train_set); 2424 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK; 2425 } 2426 2427 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels); 2428 2429 *DP = (*DP & ~mask) | signal_levels; 2430} 2431 2432static bool 2433intel_dp_set_link_train(struct intel_dp *intel_dp, 2434 uint32_t *DP, 2435 uint8_t dp_train_pat) 2436{ 2437 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2438 struct drm_device *dev = intel_dig_port->base.base.dev; 2439 struct drm_i915_private *dev_priv = dev->dev_private; 2440 enum port port = intel_dig_port->port; 2441 uint8_t buf[sizeof(intel_dp->train_set) + 1]; 2442 int ret, len; 2443 2444 if (HAS_DDI(dev)) { 2445 uint32_t temp = I915_READ(DP_TP_CTL(port)); 2446 2447 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) 2448 temp |= DP_TP_CTL_SCRAMBLE_DISABLE; 2449 else 2450 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE; 2451 2452 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; 2453 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 2454 case DP_TRAINING_PATTERN_DISABLE: 2455 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; 2456 2457 break; 2458 case DP_TRAINING_PATTERN_1: 2459 temp |= DP_TP_CTL_LINK_TRAIN_PAT1; 2460 break; 2461 case DP_TRAINING_PATTERN_2: 2462 temp |= DP_TP_CTL_LINK_TRAIN_PAT2; 2463 break; 2464 case DP_TRAINING_PATTERN_3: 2465 temp |= DP_TP_CTL_LINK_TRAIN_PAT3; 2466 break; 2467 } 2468 I915_WRITE(DP_TP_CTL(port), temp); 2469 2470 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) { 2471 *DP &= ~DP_LINK_TRAIN_MASK_CPT; 2472 2473 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 2474 case DP_TRAINING_PATTERN_DISABLE: 2475 *DP |= DP_LINK_TRAIN_OFF_CPT; 2476 break; 2477 case DP_TRAINING_PATTERN_1: 2478 *DP |= DP_LINK_TRAIN_PAT_1_CPT; 2479 break; 2480 case DP_TRAINING_PATTERN_2: 2481 *DP |= DP_LINK_TRAIN_PAT_2_CPT; 2482 break; 2483 case DP_TRAINING_PATTERN_3: 2484 DRM_ERROR("DP training pattern 3 not supported\n"); 2485 *DP |= DP_LINK_TRAIN_PAT_2_CPT; 2486 break; 2487 } 2488 2489 } else { 2490 *DP &= ~DP_LINK_TRAIN_MASK; 2491 2492 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 2493 case DP_TRAINING_PATTERN_DISABLE: 2494 *DP |= DP_LINK_TRAIN_OFF; 2495 break; 2496 case DP_TRAINING_PATTERN_1: 2497 *DP |= DP_LINK_TRAIN_PAT_1; 2498 break; 2499 case DP_TRAINING_PATTERN_2: 2500 *DP |= DP_LINK_TRAIN_PAT_2; 2501 break; 2502 case DP_TRAINING_PATTERN_3: 2503 DRM_ERROR("DP training pattern 3 not supported\n"); 2504 *DP |= DP_LINK_TRAIN_PAT_2; 2505 break; 2506 } 2507 } 2508 2509 I915_WRITE(intel_dp->output_reg, *DP); 2510 POSTING_READ(intel_dp->output_reg); 2511 2512 buf[0] = dp_train_pat; 2513 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) == 2514 DP_TRAINING_PATTERN_DISABLE) { 2515 /* don't write DP_TRAINING_LANEx_SET on disable */ 2516 len = 1; 2517 } else { 2518 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */ 2519 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count); 2520 len = intel_dp->lane_count + 1; 2521 } 2522 2523 ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_PATTERN_SET, 2524 buf, len); 2525 2526 return ret == len; 2527} 2528 2529static bool 2530intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP, 2531 uint8_t dp_train_pat) 2532{ 2533 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set)); 2534 intel_dp_set_signal_levels(intel_dp, DP); 2535 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat); 2536} 2537 2538static bool 2539intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP, 2540 const uint8_t link_status[DP_LINK_STATUS_SIZE]) 2541{ 2542 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2543 struct drm_device *dev = intel_dig_port->base.base.dev; 2544 struct drm_i915_private *dev_priv = dev->dev_private; 2545 int ret; 2546 2547 intel_get_adjust_train(intel_dp, link_status); 2548 intel_dp_set_signal_levels(intel_dp, DP); 2549 2550 I915_WRITE(intel_dp->output_reg, *DP); 2551 POSTING_READ(intel_dp->output_reg); 2552 2553 ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_LANE0_SET, 2554 intel_dp->train_set, 2555 intel_dp->lane_count); 2556 2557 return ret == intel_dp->lane_count; 2558} 2559 2560static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) 2561{ 2562 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2563 struct drm_device *dev = intel_dig_port->base.base.dev; 2564 struct drm_i915_private *dev_priv = dev->dev_private; 2565 enum port port = intel_dig_port->port; 2566 uint32_t val; 2567 2568 if (!HAS_DDI(dev)) 2569 return; 2570 2571 val = I915_READ(DP_TP_CTL(port)); 2572 val &= ~DP_TP_CTL_LINK_TRAIN_MASK; 2573 val |= DP_TP_CTL_LINK_TRAIN_IDLE; 2574 I915_WRITE(DP_TP_CTL(port), val); 2575 2576 /* 2577 * On PORT_A we can have only eDP in SST mode. There the only reason 2578 * we need to set idle transmission mode is to work around a HW issue 2579 * where we enable the pipe while not in idle link-training mode. 2580 * In this case there is requirement to wait for a minimum number of 2581 * idle patterns to be sent. 2582 */ 2583 if (port == PORT_A) 2584 return; 2585 2586 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE), 2587 1)) 2588 DRM_ERROR("Timed out waiting for DP idle patterns\n"); 2589} 2590 2591/* Enable corresponding port and start training pattern 1 */ 2592void 2593intel_dp_start_link_train(struct intel_dp *intel_dp) 2594{ 2595 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base; 2596 struct drm_device *dev = encoder->dev; 2597 int i; 2598 uint8_t voltage; 2599 int voltage_tries, loop_tries; 2600 uint32_t DP = intel_dp->DP; 2601 uint8_t link_config[2]; 2602 2603 if (HAS_DDI(dev)) 2604 intel_ddi_prepare_link_retrain(encoder); 2605 2606 /* Write the link configuration data */ 2607 link_config[0] = intel_dp->link_bw; 2608 link_config[1] = intel_dp->lane_count; 2609 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 2610 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; 2611 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, link_config, 2); 2612 2613 link_config[0] = 0; 2614 link_config[1] = DP_SET_ANSI_8B10B; 2615 intel_dp_aux_native_write(intel_dp, DP_DOWNSPREAD_CTRL, link_config, 2); 2616 2617 DP |= DP_PORT_EN; 2618 2619 /* clock recovery */ 2620 if (!intel_dp_reset_link_train(intel_dp, &DP, 2621 DP_TRAINING_PATTERN_1 | 2622 DP_LINK_SCRAMBLING_DISABLE)) { 2623 DRM_ERROR("failed to enable link training\n"); 2624 return; 2625 } 2626 2627 voltage = 0xff; 2628 voltage_tries = 0; 2629 loop_tries = 0; 2630 for (;;) { 2631 uint8_t link_status[DP_LINK_STATUS_SIZE]; 2632 2633 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd); 2634 if (!intel_dp_get_link_status(intel_dp, link_status)) { 2635 DRM_ERROR("failed to get link status\n"); 2636 break; 2637 } 2638 2639 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { 2640 DRM_DEBUG_KMS("clock recovery OK\n"); 2641 break; 2642 } 2643 2644 /* Check to see if we've tried the max voltage */ 2645 for (i = 0; i < intel_dp->lane_count; i++) 2646 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) 2647 break; 2648 if (i == intel_dp->lane_count) { 2649 ++loop_tries; 2650 if (loop_tries == 5) { 2651 DRM_ERROR("too many full retries, give up\n"); 2652 break; 2653 } 2654 intel_dp_reset_link_train(intel_dp, &DP, 2655 DP_TRAINING_PATTERN_1 | 2656 DP_LINK_SCRAMBLING_DISABLE); 2657 voltage_tries = 0; 2658 continue; 2659 } 2660 2661 /* Check to see if we've tried the same voltage 5 times */ 2662 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { 2663 ++voltage_tries; 2664 if (voltage_tries == 5) { 2665 DRM_ERROR("too many voltage retries, give up\n"); 2666 break; 2667 } 2668 } else 2669 voltage_tries = 0; 2670 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; 2671 2672 /* Update training set as requested by target */ 2673 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) { 2674 DRM_ERROR("failed to update link training\n"); 2675 break; 2676 } 2677 } 2678 2679 intel_dp->DP = DP; 2680} 2681 2682void 2683intel_dp_complete_link_train(struct intel_dp *intel_dp) 2684{ 2685 bool channel_eq = false; 2686 int tries, cr_tries; 2687 uint32_t DP = intel_dp->DP; 2688 uint32_t training_pattern = DP_TRAINING_PATTERN_2; 2689 2690 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/ 2691 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3) 2692 training_pattern = DP_TRAINING_PATTERN_3; 2693 2694 /* channel equalization */ 2695 if (!intel_dp_set_link_train(intel_dp, &DP, 2696 training_pattern | 2697 DP_LINK_SCRAMBLING_DISABLE)) { 2698 DRM_ERROR("failed to start channel equalization\n"); 2699 return; 2700 } 2701 2702 tries = 0; 2703 cr_tries = 0; 2704 channel_eq = false; 2705 for (;;) { 2706 uint8_t link_status[DP_LINK_STATUS_SIZE]; 2707 2708 if (cr_tries > 5) { 2709 DRM_ERROR("failed to train DP, aborting\n"); 2710 break; 2711 } 2712 2713 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd); 2714 if (!intel_dp_get_link_status(intel_dp, link_status)) { 2715 DRM_ERROR("failed to get link status\n"); 2716 break; 2717 } 2718 2719 /* Make sure clock is still ok */ 2720 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { 2721 intel_dp_start_link_train(intel_dp); 2722 intel_dp_set_link_train(intel_dp, &DP, 2723 training_pattern | 2724 DP_LINK_SCRAMBLING_DISABLE); 2725 cr_tries++; 2726 continue; 2727 } 2728 2729 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { 2730 channel_eq = true; 2731 break; 2732 } 2733 2734 /* Try 5 times, then try clock recovery if that fails */ 2735 if (tries > 5) { 2736 intel_dp_link_down(intel_dp); 2737 intel_dp_start_link_train(intel_dp); 2738 intel_dp_set_link_train(intel_dp, &DP, 2739 training_pattern | 2740 DP_LINK_SCRAMBLING_DISABLE); 2741 tries = 0; 2742 cr_tries++; 2743 continue; 2744 } 2745 2746 /* Update training set as requested by target */ 2747 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) { 2748 DRM_ERROR("failed to update link training\n"); 2749 break; 2750 } 2751 ++tries; 2752 } 2753 2754 intel_dp_set_idle_link_train(intel_dp); 2755 2756 intel_dp->DP = DP; 2757 2758 if (channel_eq) 2759 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n"); 2760 2761} 2762 2763void intel_dp_stop_link_train(struct intel_dp *intel_dp) 2764{ 2765 intel_dp_set_link_train(intel_dp, &intel_dp->DP, 2766 DP_TRAINING_PATTERN_DISABLE); 2767} 2768 2769static void 2770intel_dp_link_down(struct intel_dp *intel_dp) 2771{ 2772 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2773 enum port port = intel_dig_port->port; 2774 struct drm_device *dev = intel_dig_port->base.base.dev; 2775 struct drm_i915_private *dev_priv = dev->dev_private; 2776 struct intel_crtc *intel_crtc = 2777 to_intel_crtc(intel_dig_port->base.base.crtc); 2778 uint32_t DP = intel_dp->DP; 2779 2780 /* 2781 * DDI code has a strict mode set sequence and we should try to respect 2782 * it, otherwise we might hang the machine in many different ways. So we 2783 * really should be disabling the port only on a complete crtc_disable 2784 * sequence. This function is just called under two conditions on DDI 2785 * code: 2786 * - Link train failed while doing crtc_enable, and on this case we 2787 * really should respect the mode set sequence and wait for a 2788 * crtc_disable. 2789 * - Someone turned the monitor off and intel_dp_check_link_status 2790 * called us. We don't need to disable the whole port on this case, so 2791 * when someone turns the monitor on again, 2792 * intel_ddi_prepare_link_retrain will take care of redoing the link 2793 * train. 2794 */ 2795 if (HAS_DDI(dev)) 2796 return; 2797 2798 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)) 2799 return; 2800 2801 DRM_DEBUG_KMS("\n"); 2802 2803 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) { 2804 DP &= ~DP_LINK_TRAIN_MASK_CPT; 2805 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); 2806 } else { 2807 DP &= ~DP_LINK_TRAIN_MASK; 2808 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); 2809 } 2810 POSTING_READ(intel_dp->output_reg); 2811 2812 /* We don't really know why we're doing this */ 2813 intel_wait_for_vblank(dev, intel_crtc->pipe); 2814 2815 if (HAS_PCH_IBX(dev) && 2816 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { 2817 struct drm_crtc *crtc = intel_dig_port->base.base.crtc; 2818 2819 /* Hardware workaround: leaving our transcoder select 2820 * set to transcoder B while it's off will prevent the 2821 * corresponding HDMI output on transcoder A. 2822 * 2823 * Combine this with another hardware workaround: 2824 * transcoder select bit can only be cleared while the 2825 * port is enabled. 2826 */ 2827 DP &= ~DP_PIPEB_SELECT; 2828 I915_WRITE(intel_dp->output_reg, DP); 2829 2830 /* Changes to enable or select take place the vblank 2831 * after being written. 2832 */ 2833 if (WARN_ON(crtc == NULL)) { 2834 /* We should never try to disable a port without a crtc 2835 * attached. For paranoia keep the code around for a 2836 * bit. */ 2837 POSTING_READ(intel_dp->output_reg); 2838 msleep(50); 2839 } else 2840 intel_wait_for_vblank(dev, intel_crtc->pipe); 2841 } 2842 2843 DP &= ~DP_AUDIO_OUTPUT_ENABLE; 2844 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); 2845 POSTING_READ(intel_dp->output_reg); 2846 msleep(intel_dp->panel_power_down_delay); 2847} 2848 2849static bool 2850intel_dp_get_dpcd(struct intel_dp *intel_dp) 2851{ 2852 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 2853 struct drm_device *dev = dig_port->base.base.dev; 2854 struct drm_i915_private *dev_priv = dev->dev_private; 2855 2856 char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3]; 2857 2858 if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd, 2859 sizeof(intel_dp->dpcd)) == 0) 2860 return false; /* aux transfer failed */ 2861 2862 hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd), 2863 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false); 2864 DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump); 2865 2866 if (intel_dp->dpcd[DP_DPCD_REV] == 0) 2867 return false; /* DPCD not present */ 2868 2869 /* Check if the panel supports PSR */ 2870 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd)); 2871 if (is_edp(intel_dp)) { 2872 intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT, 2873 intel_dp->psr_dpcd, 2874 sizeof(intel_dp->psr_dpcd)); 2875 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) { 2876 dev_priv->psr.sink_support = true; 2877 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n"); 2878 } 2879 } 2880 2881 /* Training Pattern 3 support */ 2882 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 && 2883 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED) { 2884 intel_dp->use_tps3 = true; 2885 DRM_DEBUG_KMS("Displayport TPS3 supported"); 2886 } else 2887 intel_dp->use_tps3 = false; 2888 2889 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 2890 DP_DWN_STRM_PORT_PRESENT)) 2891 return true; /* native DP sink */ 2892 2893 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10) 2894 return true; /* no per-port downstream info */ 2895 2896 if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0, 2897 intel_dp->downstream_ports, 2898 DP_MAX_DOWNSTREAM_PORTS) == 0) 2899 return false; /* downstream port status fetch failed */ 2900 2901 return true; 2902} 2903 2904static void 2905intel_dp_probe_oui(struct intel_dp *intel_dp) 2906{ 2907 u8 buf[3]; 2908 2909 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) 2910 return; 2911 2912 edp_panel_vdd_on(intel_dp); 2913 2914 if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3)) 2915 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", 2916 buf[0], buf[1], buf[2]); 2917 2918 if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3)) 2919 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", 2920 buf[0], buf[1], buf[2]); 2921 2922 edp_panel_vdd_off(intel_dp, false); 2923} 2924 2925int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc) 2926{ 2927 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2928 struct drm_device *dev = intel_dig_port->base.base.dev; 2929 struct intel_crtc *intel_crtc = 2930 to_intel_crtc(intel_dig_port->base.base.crtc); 2931 u8 buf[1]; 2932 2933 if (!intel_dp_aux_native_read(intel_dp, DP_TEST_SINK_MISC, buf, 1)) 2934 return -EAGAIN; 2935 2936 if (!(buf[0] & DP_TEST_CRC_SUPPORTED)) 2937 return -ENOTTY; 2938 2939 if (!intel_dp_aux_native_write_1(intel_dp, DP_TEST_SINK, 2940 DP_TEST_SINK_START)) 2941 return -EAGAIN; 2942 2943 /* Wait 2 vblanks to be sure we will have the correct CRC value */ 2944 intel_wait_for_vblank(dev, intel_crtc->pipe); 2945 intel_wait_for_vblank(dev, intel_crtc->pipe); 2946 2947 if (!intel_dp_aux_native_read(intel_dp, DP_TEST_CRC_R_CR, crc, 6)) 2948 return -EAGAIN; 2949 2950 intel_dp_aux_native_write_1(intel_dp, DP_TEST_SINK, 0); 2951 return 0; 2952} 2953 2954static bool 2955intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector) 2956{ 2957 int ret; 2958 2959 ret = intel_dp_aux_native_read_retry(intel_dp, 2960 DP_DEVICE_SERVICE_IRQ_VECTOR, 2961 sink_irq_vector, 1); 2962 if (!ret) 2963 return false; 2964 2965 return true; 2966} 2967 2968static void 2969intel_dp_handle_test_request(struct intel_dp *intel_dp) 2970{ 2971 /* NAK by default */ 2972 intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK); 2973} 2974 2975/* 2976 * According to DP spec 2977 * 5.1.2: 2978 * 1. Read DPCD 2979 * 2. Configure link according to Receiver Capabilities 2980 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 2981 * 4. Check link status on receipt of hot-plug interrupt 2982 */ 2983 2984void 2985intel_dp_check_link_status(struct intel_dp *intel_dp) 2986{ 2987 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; 2988 u8 sink_irq_vector; 2989 u8 link_status[DP_LINK_STATUS_SIZE]; 2990 2991 if (!intel_encoder->connectors_active) 2992 return; 2993 2994 if (WARN_ON(!intel_encoder->base.crtc)) 2995 return; 2996 2997 /* Try to read receiver status if the link appears to be up */ 2998 if (!intel_dp_get_link_status(intel_dp, link_status)) { 2999 return; 3000 } 3001 3002 /* Now read the DPCD to see if it's actually running */ 3003 if (!intel_dp_get_dpcd(intel_dp)) { 3004 return; 3005 } 3006 3007 /* Try to read the source of the interrupt */ 3008 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 3009 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) { 3010 /* Clear interrupt source */ 3011 intel_dp_aux_native_write_1(intel_dp, 3012 DP_DEVICE_SERVICE_IRQ_VECTOR, 3013 sink_irq_vector); 3014 3015 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST) 3016 intel_dp_handle_test_request(intel_dp); 3017 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ)) 3018 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); 3019 } 3020 3021 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { 3022 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", 3023 drm_get_encoder_name(&intel_encoder->base)); 3024 intel_dp_start_link_train(intel_dp); 3025 intel_dp_complete_link_train(intel_dp); 3026 intel_dp_stop_link_train(intel_dp); 3027 } 3028} 3029 3030/* XXX this is probably wrong for multiple downstream ports */ 3031static enum drm_connector_status 3032intel_dp_detect_dpcd(struct intel_dp *intel_dp) 3033{ 3034 uint8_t *dpcd = intel_dp->dpcd; 3035 uint8_t type; 3036 3037 if (!intel_dp_get_dpcd(intel_dp)) 3038 return connector_status_disconnected; 3039 3040 /* if there's no downstream port, we're done */ 3041 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT)) 3042 return connector_status_connected; 3043 3044 /* If we're HPD-aware, SINK_COUNT changes dynamically */ 3045 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 3046 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) { 3047 uint8_t reg; 3048 if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT, 3049 ®, 1)) 3050 return connector_status_unknown; 3051 return DP_GET_SINK_COUNT(reg) ? connector_status_connected 3052 : connector_status_disconnected; 3053 } 3054 3055 /* If no HPD, poke DDC gently */ 3056 if (drm_probe_ddc(&intel_dp->adapter)) 3057 return connector_status_connected; 3058 3059 /* Well we tried, say unknown for unreliable port types */ 3060 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { 3061 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 3062 if (type == DP_DS_PORT_TYPE_VGA || 3063 type == DP_DS_PORT_TYPE_NON_EDID) 3064 return connector_status_unknown; 3065 } else { 3066 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 3067 DP_DWN_STRM_PORT_TYPE_MASK; 3068 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG || 3069 type == DP_DWN_STRM_PORT_TYPE_OTHER) 3070 return connector_status_unknown; 3071 } 3072 3073 /* Anything else is out of spec, warn and ignore */ 3074 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n"); 3075 return connector_status_disconnected; 3076} 3077 3078static enum drm_connector_status 3079ironlake_dp_detect(struct intel_dp *intel_dp) 3080{ 3081 struct drm_device *dev = intel_dp_to_dev(intel_dp); 3082 struct drm_i915_private *dev_priv = dev->dev_private; 3083 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3084 enum drm_connector_status status; 3085 3086 /* Can't disconnect eDP, but you can close the lid... */ 3087 if (is_edp(intel_dp)) { 3088 status = intel_panel_detect(dev); 3089 if (status == connector_status_unknown) 3090 status = connector_status_connected; 3091 return status; 3092 } 3093 3094 if (!ibx_digital_port_connected(dev_priv, intel_dig_port)) 3095 return connector_status_disconnected; 3096 3097 return intel_dp_detect_dpcd(intel_dp); 3098} 3099 3100static enum drm_connector_status 3101g4x_dp_detect(struct intel_dp *intel_dp) 3102{ 3103 struct drm_device *dev = intel_dp_to_dev(intel_dp); 3104 struct drm_i915_private *dev_priv = dev->dev_private; 3105 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3106 uint32_t bit; 3107 3108 /* Can't disconnect eDP, but you can close the lid... */ 3109 if (is_edp(intel_dp)) { 3110 enum drm_connector_status status; 3111 3112 status = intel_panel_detect(dev); 3113 if (status == connector_status_unknown) 3114 status = connector_status_connected; 3115 return status; 3116 } 3117 3118 if (IS_VALLEYVIEW(dev)) { 3119 switch (intel_dig_port->port) { 3120 case PORT_B: 3121 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV; 3122 break; 3123 case PORT_C: 3124 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV; 3125 break; 3126 case PORT_D: 3127 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV; 3128 break; 3129 default: 3130 return connector_status_unknown; 3131 } 3132 } else { 3133 switch (intel_dig_port->port) { 3134 case PORT_B: 3135 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X; 3136 break; 3137 case PORT_C: 3138 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X; 3139 break; 3140 case PORT_D: 3141 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X; 3142 break; 3143 default: 3144 return connector_status_unknown; 3145 } 3146 } 3147 3148 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0) 3149 return connector_status_disconnected; 3150 3151 return intel_dp_detect_dpcd(intel_dp); 3152} 3153 3154static struct edid * 3155intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) 3156{ 3157 struct intel_connector *intel_connector = to_intel_connector(connector); 3158 3159 /* use cached edid if we have one */ 3160 if (intel_connector->edid) { 3161 /* invalid edid */ 3162 if (IS_ERR(intel_connector->edid)) 3163 return NULL; 3164 3165 return drm_edid_duplicate(intel_connector->edid); 3166 } 3167 3168 return drm_get_edid(connector, adapter); 3169} 3170 3171static int 3172intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter) 3173{ 3174 struct intel_connector *intel_connector = to_intel_connector(connector); 3175 3176 /* use cached edid if we have one */ 3177 if (intel_connector->edid) { 3178 /* invalid edid */ 3179 if (IS_ERR(intel_connector->edid)) 3180 return 0; 3181 3182 return intel_connector_update_modes(connector, 3183 intel_connector->edid); 3184 } 3185 3186 return intel_ddc_get_modes(connector, adapter); 3187} 3188 3189static enum drm_connector_status 3190intel_dp_detect(struct drm_connector *connector, bool force) 3191{ 3192 struct intel_dp *intel_dp = intel_attached_dp(connector); 3193 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3194 struct intel_encoder *intel_encoder = &intel_dig_port->base; 3195 struct drm_device *dev = connector->dev; 3196 struct drm_i915_private *dev_priv = dev->dev_private; 3197 enum drm_connector_status status; 3198 struct edid *edid = NULL; 3199 3200 intel_runtime_pm_get(dev_priv); 3201 3202 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 3203 connector->base.id, drm_get_connector_name(connector)); 3204 3205 intel_dp->has_audio = false; 3206 3207 if (HAS_PCH_SPLIT(dev)) 3208 status = ironlake_dp_detect(intel_dp); 3209 else 3210 status = g4x_dp_detect(intel_dp); 3211 3212 if (status != connector_status_connected) 3213 goto out; 3214 3215 intel_dp_probe_oui(intel_dp); 3216 3217 if (intel_dp->force_audio != HDMI_AUDIO_AUTO) { 3218 intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON); 3219 } else { 3220 edid = intel_dp_get_edid(connector, &intel_dp->adapter); 3221 if (edid) { 3222 intel_dp->has_audio = drm_detect_monitor_audio(edid); 3223 kfree(edid); 3224 } 3225 } 3226 3227 if (intel_encoder->type != INTEL_OUTPUT_EDP) 3228 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; 3229 status = connector_status_connected; 3230 3231out: 3232 intel_runtime_pm_put(dev_priv); 3233 return status; 3234} 3235 3236static int intel_dp_get_modes(struct drm_connector *connector) 3237{ 3238 struct intel_dp *intel_dp = intel_attached_dp(connector); 3239 struct intel_connector *intel_connector = to_intel_connector(connector); 3240 struct drm_device *dev = connector->dev; 3241 int ret; 3242 3243 /* We should parse the EDID data and find out if it has an audio sink 3244 */ 3245 3246 ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter); 3247 if (ret) 3248 return ret; 3249 3250 /* if eDP has no EDID, fall back to fixed mode */ 3251 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 3252 struct drm_display_mode *mode; 3253 mode = drm_mode_duplicate(dev, 3254 intel_connector->panel.fixed_mode); 3255 if (mode) { 3256 drm_mode_probed_add(connector, mode); 3257 return 1; 3258 } 3259 } 3260 return 0; 3261} 3262 3263static bool 3264intel_dp_detect_audio(struct drm_connector *connector) 3265{ 3266 struct intel_dp *intel_dp = intel_attached_dp(connector); 3267 struct edid *edid; 3268 bool has_audio = false; 3269 3270 edid = intel_dp_get_edid(connector, &intel_dp->adapter); 3271 if (edid) { 3272 has_audio = drm_detect_monitor_audio(edid); 3273 kfree(edid); 3274 } 3275 3276 return has_audio; 3277} 3278 3279static int 3280intel_dp_set_property(struct drm_connector *connector, 3281 struct drm_property *property, 3282 uint64_t val) 3283{ 3284 struct drm_i915_private *dev_priv = connector->dev->dev_private; 3285 struct intel_connector *intel_connector = to_intel_connector(connector); 3286 struct intel_encoder *intel_encoder = intel_attached_encoder(connector); 3287 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 3288 int ret; 3289 3290 ret = drm_object_property_set_value(&connector->base, property, val); 3291 if (ret) 3292 return ret; 3293 3294 if (property == dev_priv->force_audio_property) { 3295 int i = val; 3296 bool has_audio; 3297 3298 if (i == intel_dp->force_audio) 3299 return 0; 3300 3301 intel_dp->force_audio = i; 3302 3303 if (i == HDMI_AUDIO_AUTO) 3304 has_audio = intel_dp_detect_audio(connector); 3305 else 3306 has_audio = (i == HDMI_AUDIO_ON); 3307 3308 if (has_audio == intel_dp->has_audio) 3309 return 0; 3310 3311 intel_dp->has_audio = has_audio; 3312 goto done; 3313 } 3314 3315 if (property == dev_priv->broadcast_rgb_property) { 3316 bool old_auto = intel_dp->color_range_auto; 3317 uint32_t old_range = intel_dp->color_range; 3318 3319 switch (val) { 3320 case INTEL_BROADCAST_RGB_AUTO: 3321 intel_dp->color_range_auto = true; 3322 break; 3323 case INTEL_BROADCAST_RGB_FULL: 3324 intel_dp->color_range_auto = false; 3325 intel_dp->color_range = 0; 3326 break; 3327 case INTEL_BROADCAST_RGB_LIMITED: 3328 intel_dp->color_range_auto = false; 3329 intel_dp->color_range = DP_COLOR_RANGE_16_235; 3330 break; 3331 default: 3332 return -EINVAL; 3333 } 3334 3335 if (old_auto == intel_dp->color_range_auto && 3336 old_range == intel_dp->color_range) 3337 return 0; 3338 3339 goto done; 3340 } 3341 3342 if (is_edp(intel_dp) && 3343 property == connector->dev->mode_config.scaling_mode_property) { 3344 if (val == DRM_MODE_SCALE_NONE) { 3345 DRM_DEBUG_KMS("no scaling not supported\n"); 3346 return -EINVAL; 3347 } 3348 3349 if (intel_connector->panel.fitting_mode == val) { 3350 /* the eDP scaling property is not changed */ 3351 return 0; 3352 } 3353 intel_connector->panel.fitting_mode = val; 3354 3355 goto done; 3356 } 3357 3358 return -EINVAL; 3359 3360done: 3361 if (intel_encoder->base.crtc) 3362 intel_crtc_restore_mode(intel_encoder->base.crtc); 3363 3364 return 0; 3365} 3366 3367static void 3368intel_dp_connector_destroy(struct drm_connector *connector) 3369{ 3370 struct intel_connector *intel_connector = to_intel_connector(connector); 3371 3372 if (!IS_ERR_OR_NULL(intel_connector->edid)) 3373 kfree(intel_connector->edid); 3374 3375 /* Can't call is_edp() since the encoder may have been destroyed 3376 * already. */ 3377 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) 3378 intel_panel_fini(&intel_connector->panel); 3379 3380 drm_connector_cleanup(connector); 3381 kfree(connector); 3382} 3383 3384void intel_dp_encoder_destroy(struct drm_encoder *encoder) 3385{ 3386 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 3387 struct intel_dp *intel_dp = &intel_dig_port->dp; 3388 struct drm_device *dev = intel_dp_to_dev(intel_dp); 3389 3390 i2c_del_adapter(&intel_dp->adapter); 3391 drm_encoder_cleanup(encoder); 3392 if (is_edp(intel_dp)) { 3393 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 3394 mutex_lock(&dev->mode_config.mutex); 3395 edp_panel_vdd_off_sync(intel_dp); 3396 mutex_unlock(&dev->mode_config.mutex); 3397 } 3398 kfree(intel_dig_port); 3399} 3400 3401static const struct drm_connector_funcs intel_dp_connector_funcs = { 3402 .dpms = intel_connector_dpms, 3403 .detect = intel_dp_detect, 3404 .fill_modes = drm_helper_probe_single_connector_modes, 3405 .set_property = intel_dp_set_property, 3406 .destroy = intel_dp_connector_destroy, 3407}; 3408 3409static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 3410 .get_modes = intel_dp_get_modes, 3411 .mode_valid = intel_dp_mode_valid, 3412 .best_encoder = intel_best_encoder, 3413}; 3414 3415static const struct drm_encoder_funcs intel_dp_enc_funcs = { 3416 .destroy = intel_dp_encoder_destroy, 3417}; 3418 3419static void 3420intel_dp_hot_plug(struct intel_encoder *intel_encoder) 3421{ 3422 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 3423 3424 intel_dp_check_link_status(intel_dp); 3425} 3426 3427/* Return which DP Port should be selected for Transcoder DP control */ 3428int 3429intel_trans_dp_port_sel(struct drm_crtc *crtc) 3430{ 3431 struct drm_device *dev = crtc->dev; 3432 struct intel_encoder *intel_encoder; 3433 struct intel_dp *intel_dp; 3434 3435 for_each_encoder_on_crtc(dev, crtc, intel_encoder) { 3436 intel_dp = enc_to_intel_dp(&intel_encoder->base); 3437 3438 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || 3439 intel_encoder->type == INTEL_OUTPUT_EDP) 3440 return intel_dp->output_reg; 3441 } 3442 3443 return -1; 3444} 3445 3446/* check the VBT to see whether the eDP is on DP-D port */ 3447bool intel_dp_is_edp(struct drm_device *dev, enum port port) 3448{ 3449 struct drm_i915_private *dev_priv = dev->dev_private; 3450 union child_device_config *p_child; 3451 int i; 3452 static const short port_mapping[] = { 3453 [PORT_B] = PORT_IDPB, 3454 [PORT_C] = PORT_IDPC, 3455 [PORT_D] = PORT_IDPD, 3456 }; 3457 3458 if (port == PORT_A) 3459 return true; 3460 3461 if (!dev_priv->vbt.child_dev_num) 3462 return false; 3463 3464 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { 3465 p_child = dev_priv->vbt.child_dev + i; 3466 3467 if (p_child->common.dvo_port == port_mapping[port] && 3468 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) == 3469 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS)) 3470 return true; 3471 } 3472 return false; 3473} 3474 3475static void 3476intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) 3477{ 3478 struct intel_connector *intel_connector = to_intel_connector(connector); 3479 3480 intel_attach_force_audio_property(connector); 3481 intel_attach_broadcast_rgb_property(connector); 3482 intel_dp->color_range_auto = true; 3483 3484 if (is_edp(intel_dp)) { 3485 drm_mode_create_scaling_mode_property(connector->dev); 3486 drm_object_attach_property( 3487 &connector->base, 3488 connector->dev->mode_config.scaling_mode_property, 3489 DRM_MODE_SCALE_ASPECT); 3490 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT; 3491 } 3492} 3493 3494static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp) 3495{ 3496 intel_dp->last_power_cycle = jiffies; 3497 intel_dp->last_power_on = jiffies; 3498 intel_dp->last_backlight_off = jiffies; 3499} 3500 3501static void 3502intel_dp_init_panel_power_sequencer(struct drm_device *dev, 3503 struct intel_dp *intel_dp, 3504 struct edp_power_seq *out) 3505{ 3506 struct drm_i915_private *dev_priv = dev->dev_private; 3507 struct edp_power_seq cur, vbt, spec, final; 3508 u32 pp_on, pp_off, pp_div, pp; 3509 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg; 3510 3511 if (HAS_PCH_SPLIT(dev)) { 3512 pp_ctrl_reg = PCH_PP_CONTROL; 3513 pp_on_reg = PCH_PP_ON_DELAYS; 3514 pp_off_reg = PCH_PP_OFF_DELAYS; 3515 pp_div_reg = PCH_PP_DIVISOR; 3516 } else { 3517 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp); 3518 3519 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe); 3520 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe); 3521 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe); 3522 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe); 3523 } 3524 3525 /* Workaround: Need to write PP_CONTROL with the unlock key as 3526 * the very first thing. */ 3527 pp = ironlake_get_pp_control(intel_dp); 3528 I915_WRITE(pp_ctrl_reg, pp); 3529 3530 pp_on = I915_READ(pp_on_reg); 3531 pp_off = I915_READ(pp_off_reg); 3532 pp_div = I915_READ(pp_div_reg); 3533 3534 /* Pull timing values out of registers */ 3535 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> 3536 PANEL_POWER_UP_DELAY_SHIFT; 3537 3538 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> 3539 PANEL_LIGHT_ON_DELAY_SHIFT; 3540 3541 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> 3542 PANEL_LIGHT_OFF_DELAY_SHIFT; 3543 3544 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> 3545 PANEL_POWER_DOWN_DELAY_SHIFT; 3546 3547 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> 3548 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000; 3549 3550 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 3551 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12); 3552 3553 vbt = dev_priv->vbt.edp_pps; 3554 3555 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of 3556 * our hw here, which are all in 100usec. */ 3557 spec.t1_t3 = 210 * 10; 3558 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */ 3559 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */ 3560 spec.t10 = 500 * 10; 3561 /* This one is special and actually in units of 100ms, but zero 3562 * based in the hw (so we need to add 100 ms). But the sw vbt 3563 * table multiplies it with 1000 to make it in units of 100usec, 3564 * too. */ 3565 spec.t11_t12 = (510 + 100) * 10; 3566 3567 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 3568 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12); 3569 3570 /* Use the max of the register settings and vbt. If both are 3571 * unset, fall back to the spec limits. */ 3572#define assign_final(field) final.field = (max(cur.field, vbt.field) == 0 ? \ 3573 spec.field : \ 3574 max(cur.field, vbt.field)) 3575 assign_final(t1_t3); 3576 assign_final(t8); 3577 assign_final(t9); 3578 assign_final(t10); 3579 assign_final(t11_t12); 3580#undef assign_final 3581 3582#define get_delay(field) (DIV_ROUND_UP(final.field, 10)) 3583 intel_dp->panel_power_up_delay = get_delay(t1_t3); 3584 intel_dp->backlight_on_delay = get_delay(t8); 3585 intel_dp->backlight_off_delay = get_delay(t9); 3586 intel_dp->panel_power_down_delay = get_delay(t10); 3587 intel_dp->panel_power_cycle_delay = get_delay(t11_t12); 3588#undef get_delay 3589 3590 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n", 3591 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay, 3592 intel_dp->panel_power_cycle_delay); 3593 3594 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", 3595 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); 3596 3597 if (out) 3598 *out = final; 3599} 3600 3601static void 3602intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, 3603 struct intel_dp *intel_dp, 3604 struct edp_power_seq *seq) 3605{ 3606 struct drm_i915_private *dev_priv = dev->dev_private; 3607 u32 pp_on, pp_off, pp_div, port_sel = 0; 3608 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev); 3609 int pp_on_reg, pp_off_reg, pp_div_reg; 3610 3611 if (HAS_PCH_SPLIT(dev)) { 3612 pp_on_reg = PCH_PP_ON_DELAYS; 3613 pp_off_reg = PCH_PP_OFF_DELAYS; 3614 pp_div_reg = PCH_PP_DIVISOR; 3615 } else { 3616 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp); 3617 3618 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe); 3619 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe); 3620 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe); 3621 } 3622 3623 /* 3624 * And finally store the new values in the power sequencer. The 3625 * backlight delays are set to 1 because we do manual waits on them. For 3626 * T8, even BSpec recommends doing it. For T9, if we don't do this, 3627 * we'll end up waiting for the backlight off delay twice: once when we 3628 * do the manual sleep, and once when we disable the panel and wait for 3629 * the PP_STATUS bit to become zero. 3630 */ 3631 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | 3632 (1 << PANEL_LIGHT_ON_DELAY_SHIFT); 3633 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) | 3634 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT); 3635 /* Compute the divisor for the pp clock, simply match the Bspec 3636 * formula. */ 3637 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT; 3638 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000) 3639 << PANEL_POWER_CYCLE_DELAY_SHIFT); 3640 3641 /* Haswell doesn't have any port selection bits for the panel 3642 * power sequencer any more. */ 3643 if (IS_VALLEYVIEW(dev)) { 3644 if (dp_to_dig_port(intel_dp)->port == PORT_B) 3645 port_sel = PANEL_PORT_SELECT_DPB_VLV; 3646 else 3647 port_sel = PANEL_PORT_SELECT_DPC_VLV; 3648 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { 3649 if (dp_to_dig_port(intel_dp)->port == PORT_A) 3650 port_sel = PANEL_PORT_SELECT_DPA; 3651 else 3652 port_sel = PANEL_PORT_SELECT_DPD; 3653 } 3654 3655 pp_on |= port_sel; 3656 3657 I915_WRITE(pp_on_reg, pp_on); 3658 I915_WRITE(pp_off_reg, pp_off); 3659 I915_WRITE(pp_div_reg, pp_div); 3660 3661 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", 3662 I915_READ(pp_on_reg), 3663 I915_READ(pp_off_reg), 3664 I915_READ(pp_div_reg)); 3665} 3666 3667static bool intel_edp_init_connector(struct intel_dp *intel_dp, 3668 struct intel_connector *intel_connector, 3669 struct edp_power_seq *power_seq) 3670{ 3671 struct drm_connector *connector = &intel_connector->base; 3672 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3673 struct drm_device *dev = intel_dig_port->base.base.dev; 3674 struct drm_i915_private *dev_priv = dev->dev_private; 3675 struct drm_display_mode *fixed_mode = NULL; 3676 bool has_dpcd; 3677 struct drm_display_mode *scan; 3678 struct edid *edid; 3679 3680 if (!is_edp(intel_dp)) 3681 return true; 3682 3683 /* Cache DPCD and EDID for edp. */ 3684 edp_panel_vdd_on(intel_dp); 3685 has_dpcd = intel_dp_get_dpcd(intel_dp); 3686 edp_panel_vdd_off(intel_dp, false); 3687 3688 if (has_dpcd) { 3689 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) 3690 dev_priv->no_aux_handshake = 3691 intel_dp->dpcd[DP_MAX_DOWNSPREAD] & 3692 DP_NO_AUX_HANDSHAKE_LINK_TRAINING; 3693 } else { 3694 /* if this fails, presume the device is a ghost */ 3695 DRM_INFO("failed to retrieve link info, disabling eDP\n"); 3696 return false; 3697 } 3698 3699 /* We now know it's not a ghost, init power sequence regs. */ 3700 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, power_seq); 3701 3702 edid = drm_get_edid(connector, &intel_dp->adapter); 3703 if (edid) { 3704 if (drm_add_edid_modes(connector, edid)) { 3705 drm_mode_connector_update_edid_property(connector, 3706 edid); 3707 drm_edid_to_eld(connector, edid); 3708 } else { 3709 kfree(edid); 3710 edid = ERR_PTR(-EINVAL); 3711 } 3712 } else { 3713 edid = ERR_PTR(-ENOENT); 3714 } 3715 intel_connector->edid = edid; 3716 3717 /* prefer fixed mode from EDID if available */ 3718 list_for_each_entry(scan, &connector->probed_modes, head) { 3719 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) { 3720 fixed_mode = drm_mode_duplicate(dev, scan); 3721 break; 3722 } 3723 } 3724 3725 /* fallback to VBT if available for eDP */ 3726 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) { 3727 fixed_mode = drm_mode_duplicate(dev, 3728 dev_priv->vbt.lfp_lvds_vbt_mode); 3729 if (fixed_mode) 3730 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; 3731 } 3732 3733 intel_panel_init(&intel_connector->panel, fixed_mode, NULL); 3734 intel_panel_setup_backlight(connector); 3735 3736 return true; 3737} 3738 3739bool 3740intel_dp_init_connector(struct intel_digital_port *intel_dig_port, 3741 struct intel_connector *intel_connector) 3742{ 3743 struct drm_connector *connector = &intel_connector->base; 3744 struct intel_dp *intel_dp = &intel_dig_port->dp; 3745 struct intel_encoder *intel_encoder = &intel_dig_port->base; 3746 struct drm_device *dev = intel_encoder->base.dev; 3747 struct drm_i915_private *dev_priv = dev->dev_private; 3748 enum port port = intel_dig_port->port; 3749 struct edp_power_seq power_seq = { 0 }; 3750 const char *name = NULL; 3751 int type, error; 3752 3753 /* intel_dp vfuncs */ 3754 if (IS_VALLEYVIEW(dev)) 3755 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider; 3756 else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 3757 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider; 3758 else if (HAS_PCH_SPLIT(dev)) 3759 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider; 3760 else 3761 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider; 3762 3763 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl; 3764 3765 /* Preserve the current hw state. */ 3766 intel_dp->DP = I915_READ(intel_dp->output_reg); 3767 intel_dp->attached_connector = intel_connector; 3768 3769 if (intel_dp_is_edp(dev, port)) 3770 type = DRM_MODE_CONNECTOR_eDP; 3771 else 3772 type = DRM_MODE_CONNECTOR_DisplayPort; 3773 3774 /* 3775 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but 3776 * for DP the encoder type can be set by the caller to 3777 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it. 3778 */ 3779 if (type == DRM_MODE_CONNECTOR_eDP) 3780 intel_encoder->type = INTEL_OUTPUT_EDP; 3781 3782 DRM_DEBUG_KMS("Adding %s connector on port %c\n", 3783 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP", 3784 port_name(port)); 3785 3786 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); 3787 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 3788 3789 connector->interlace_allowed = true; 3790 connector->doublescan_allowed = 0; 3791 3792 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, 3793 edp_panel_vdd_work); 3794 3795 intel_connector_attach_encoder(intel_connector, intel_encoder); 3796 drm_sysfs_connector_add(connector); 3797 3798 if (HAS_DDI(dev)) 3799 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 3800 else 3801 intel_connector->get_hw_state = intel_connector_get_hw_state; 3802 3803 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10; 3804 if (HAS_DDI(dev)) { 3805 switch (intel_dig_port->port) { 3806 case PORT_A: 3807 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL; 3808 break; 3809 case PORT_B: 3810 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL; 3811 break; 3812 case PORT_C: 3813 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL; 3814 break; 3815 case PORT_D: 3816 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL; 3817 break; 3818 default: 3819 BUG(); 3820 } 3821 } 3822 3823 /* Set up the DDC bus. */ 3824 switch (port) { 3825 case PORT_A: 3826 intel_encoder->hpd_pin = HPD_PORT_A; 3827 name = "DPDDC-A"; 3828 break; 3829 case PORT_B: 3830 intel_encoder->hpd_pin = HPD_PORT_B; 3831 name = "DPDDC-B"; 3832 break; 3833 case PORT_C: 3834 intel_encoder->hpd_pin = HPD_PORT_C; 3835 name = "DPDDC-C"; 3836 break; 3837 case PORT_D: 3838 intel_encoder->hpd_pin = HPD_PORT_D; 3839 name = "DPDDC-D"; 3840 break; 3841 default: 3842 BUG(); 3843 } 3844 3845 if (is_edp(intel_dp)) { 3846 intel_dp_init_panel_power_timestamps(intel_dp); 3847 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); 3848 } 3849 3850 error = intel_dp_i2c_init(intel_dp, intel_connector, name); 3851 WARN(error, "intel_dp_i2c_init failed with error %d for port %c\n", 3852 error, port_name(port)); 3853 3854 intel_dp->psr_setup_done = false; 3855 3856 if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) { 3857 i2c_del_adapter(&intel_dp->adapter); 3858 if (is_edp(intel_dp)) { 3859 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 3860 mutex_lock(&dev->mode_config.mutex); 3861 edp_panel_vdd_off_sync(intel_dp); 3862 mutex_unlock(&dev->mode_config.mutex); 3863 } 3864 drm_sysfs_connector_remove(connector); 3865 drm_connector_cleanup(connector); 3866 return false; 3867 } 3868 3869 intel_dp_add_properties(intel_dp, connector); 3870 3871 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 3872 * 0xd. Failure to do so will result in spurious interrupts being 3873 * generated on the port when a cable is not attached. 3874 */ 3875 if (IS_G4X(dev) && !IS_GM45(dev)) { 3876 u32 temp = I915_READ(PEG_BAND_GAP_DATA); 3877 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); 3878 } 3879 3880 return true; 3881} 3882 3883void 3884intel_dp_init(struct drm_device *dev, int output_reg, enum port port) 3885{ 3886 struct intel_digital_port *intel_dig_port; 3887 struct intel_encoder *intel_encoder; 3888 struct drm_encoder *encoder; 3889 struct intel_connector *intel_connector; 3890 3891 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL); 3892 if (!intel_dig_port) 3893 return; 3894 3895 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL); 3896 if (!intel_connector) { 3897 kfree(intel_dig_port); 3898 return; 3899 } 3900 3901 intel_encoder = &intel_dig_port->base; 3902 encoder = &intel_encoder->base; 3903 3904 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, 3905 DRM_MODE_ENCODER_TMDS); 3906 3907 intel_encoder->compute_config = intel_dp_compute_config; 3908 intel_encoder->mode_set = intel_dp_mode_set; 3909 intel_encoder->disable = intel_disable_dp; 3910 intel_encoder->post_disable = intel_post_disable_dp; 3911 intel_encoder->get_hw_state = intel_dp_get_hw_state; 3912 intel_encoder->get_config = intel_dp_get_config; 3913 if (IS_VALLEYVIEW(dev)) { 3914 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable; 3915 intel_encoder->pre_enable = vlv_pre_enable_dp; 3916 intel_encoder->enable = vlv_enable_dp; 3917 } else { 3918 intel_encoder->pre_enable = g4x_pre_enable_dp; 3919 intel_encoder->enable = g4x_enable_dp; 3920 } 3921 3922 intel_dig_port->port = port; 3923 intel_dig_port->dp.output_reg = output_reg; 3924 3925 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; 3926 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); 3927 intel_encoder->cloneable = false; 3928 intel_encoder->hot_plug = intel_dp_hot_plug; 3929 3930 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) { 3931 drm_encoder_cleanup(encoder); 3932 kfree(intel_dig_port); 3933 kfree(intel_connector); 3934 } 3935} 3936