intel_dp.c revision ca73b4f026751254da5c98ac8c3667b16fb00245
1/* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Keith Packard <keithp@keithp.com> 25 * 26 */ 27 28#include <linux/i2c.h> 29#include <linux/slab.h> 30#include <linux/export.h> 31#include <drm/drmP.h> 32#include <drm/drm_crtc.h> 33#include <drm/drm_crtc_helper.h> 34#include <drm/drm_edid.h> 35#include "intel_drv.h" 36#include <drm/i915_drm.h> 37#include "i915_drv.h" 38 39#define DP_LINK_CHECK_TIMEOUT (10 * 1000) 40 41struct dp_link_dpll { 42 int link_bw; 43 struct dpll dpll; 44}; 45 46static const struct dp_link_dpll gen4_dpll[] = { 47 { DP_LINK_BW_1_62, 48 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } }, 49 { DP_LINK_BW_2_7, 50 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } } 51}; 52 53static const struct dp_link_dpll pch_dpll[] = { 54 { DP_LINK_BW_1_62, 55 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } }, 56 { DP_LINK_BW_2_7, 57 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } } 58}; 59 60static const struct dp_link_dpll vlv_dpll[] = { 61 { DP_LINK_BW_1_62, 62 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 5, .m2 = 3 } }, 63 { DP_LINK_BW_2_7, 64 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } } 65}; 66 67/** 68 * is_edp - is the given port attached to an eDP panel (either CPU or PCH) 69 * @intel_dp: DP struct 70 * 71 * If a CPU or PCH DP output is attached to an eDP panel, this function 72 * will return true, and false otherwise. 73 */ 74static bool is_edp(struct intel_dp *intel_dp) 75{ 76 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 77 78 return intel_dig_port->base.type == INTEL_OUTPUT_EDP; 79} 80 81static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp) 82{ 83 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 84 85 return intel_dig_port->base.base.dev; 86} 87 88static struct intel_dp *intel_attached_dp(struct drm_connector *connector) 89{ 90 return enc_to_intel_dp(&intel_attached_encoder(connector)->base); 91} 92 93static void intel_dp_link_down(struct intel_dp *intel_dp); 94 95static int 96intel_dp_max_link_bw(struct intel_dp *intel_dp) 97{ 98 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; 99 100 switch (max_link_bw) { 101 case DP_LINK_BW_1_62: 102 case DP_LINK_BW_2_7: 103 break; 104 case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */ 105 max_link_bw = DP_LINK_BW_2_7; 106 break; 107 default: 108 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n", 109 max_link_bw); 110 max_link_bw = DP_LINK_BW_1_62; 111 break; 112 } 113 return max_link_bw; 114} 115 116/* 117 * The units on the numbers in the next two are... bizarre. Examples will 118 * make it clearer; this one parallels an example in the eDP spec. 119 * 120 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as: 121 * 122 * 270000 * 1 * 8 / 10 == 216000 123 * 124 * The actual data capacity of that configuration is 2.16Gbit/s, so the 125 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz - 126 * or equivalently, kilopixels per second - so for 1680x1050R it'd be 127 * 119000. At 18bpp that's 2142000 kilobits per second. 128 * 129 * Thus the strange-looking division by 10 in intel_dp_link_required, to 130 * get the result in decakilobits instead of kilobits. 131 */ 132 133static int 134intel_dp_link_required(int pixel_clock, int bpp) 135{ 136 return (pixel_clock * bpp + 9) / 10; 137} 138 139static int 140intel_dp_max_data_rate(int max_link_clock, int max_lanes) 141{ 142 return (max_link_clock * max_lanes * 8) / 10; 143} 144 145static int 146intel_dp_mode_valid(struct drm_connector *connector, 147 struct drm_display_mode *mode) 148{ 149 struct intel_dp *intel_dp = intel_attached_dp(connector); 150 struct intel_connector *intel_connector = to_intel_connector(connector); 151 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; 152 int target_clock = mode->clock; 153 int max_rate, mode_rate, max_lanes, max_link_clock; 154 155 if (is_edp(intel_dp) && fixed_mode) { 156 if (mode->hdisplay > fixed_mode->hdisplay) 157 return MODE_PANEL; 158 159 if (mode->vdisplay > fixed_mode->vdisplay) 160 return MODE_PANEL; 161 162 target_clock = fixed_mode->clock; 163 } 164 165 max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp)); 166 max_lanes = drm_dp_max_lane_count(intel_dp->dpcd); 167 168 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); 169 mode_rate = intel_dp_link_required(target_clock, 18); 170 171 if (mode_rate > max_rate) 172 return MODE_CLOCK_HIGH; 173 174 if (mode->clock < 10000) 175 return MODE_CLOCK_LOW; 176 177 if (mode->flags & DRM_MODE_FLAG_DBLCLK) 178 return MODE_H_ILLEGAL; 179 180 return MODE_OK; 181} 182 183static uint32_t 184pack_aux(uint8_t *src, int src_bytes) 185{ 186 int i; 187 uint32_t v = 0; 188 189 if (src_bytes > 4) 190 src_bytes = 4; 191 for (i = 0; i < src_bytes; i++) 192 v |= ((uint32_t) src[i]) << ((3-i) * 8); 193 return v; 194} 195 196static void 197unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) 198{ 199 int i; 200 if (dst_bytes > 4) 201 dst_bytes = 4; 202 for (i = 0; i < dst_bytes; i++) 203 dst[i] = src >> ((3-i) * 8); 204} 205 206/* hrawclock is 1/4 the FSB frequency */ 207static int 208intel_hrawclk(struct drm_device *dev) 209{ 210 struct drm_i915_private *dev_priv = dev->dev_private; 211 uint32_t clkcfg; 212 213 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */ 214 if (IS_VALLEYVIEW(dev)) 215 return 200; 216 217 clkcfg = I915_READ(CLKCFG); 218 switch (clkcfg & CLKCFG_FSB_MASK) { 219 case CLKCFG_FSB_400: 220 return 100; 221 case CLKCFG_FSB_533: 222 return 133; 223 case CLKCFG_FSB_667: 224 return 166; 225 case CLKCFG_FSB_800: 226 return 200; 227 case CLKCFG_FSB_1067: 228 return 266; 229 case CLKCFG_FSB_1333: 230 return 333; 231 /* these two are just a guess; one of them might be right */ 232 case CLKCFG_FSB_1600: 233 case CLKCFG_FSB_1600_ALT: 234 return 400; 235 default: 236 return 133; 237 } 238} 239 240static void 241intel_dp_init_panel_power_sequencer(struct drm_device *dev, 242 struct intel_dp *intel_dp, 243 struct edp_power_seq *out); 244static void 245intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, 246 struct intel_dp *intel_dp, 247 struct edp_power_seq *out); 248 249static enum pipe 250vlv_power_sequencer_pipe(struct intel_dp *intel_dp) 251{ 252 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 253 struct drm_crtc *crtc = intel_dig_port->base.base.crtc; 254 struct drm_device *dev = intel_dig_port->base.base.dev; 255 struct drm_i915_private *dev_priv = dev->dev_private; 256 enum port port = intel_dig_port->port; 257 enum pipe pipe; 258 259 /* modeset should have pipe */ 260 if (crtc) 261 return to_intel_crtc(crtc)->pipe; 262 263 /* init time, try to find a pipe with this port selected */ 264 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) { 265 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) & 266 PANEL_PORT_SELECT_MASK; 267 if (port_sel == PANEL_PORT_SELECT_DPB_VLV && port == PORT_B) 268 return pipe; 269 if (port_sel == PANEL_PORT_SELECT_DPC_VLV && port == PORT_C) 270 return pipe; 271 } 272 273 /* shrug */ 274 return PIPE_A; 275} 276 277static u32 _pp_ctrl_reg(struct intel_dp *intel_dp) 278{ 279 struct drm_device *dev = intel_dp_to_dev(intel_dp); 280 281 if (HAS_PCH_SPLIT(dev)) 282 return PCH_PP_CONTROL; 283 else 284 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp)); 285} 286 287static u32 _pp_stat_reg(struct intel_dp *intel_dp) 288{ 289 struct drm_device *dev = intel_dp_to_dev(intel_dp); 290 291 if (HAS_PCH_SPLIT(dev)) 292 return PCH_PP_STATUS; 293 else 294 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp)); 295} 296 297static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp) 298{ 299 struct drm_device *dev = intel_dp_to_dev(intel_dp); 300 struct drm_i915_private *dev_priv = dev->dev_private; 301 302 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0; 303} 304 305static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp) 306{ 307 struct drm_device *dev = intel_dp_to_dev(intel_dp); 308 struct drm_i915_private *dev_priv = dev->dev_private; 309 310 return (I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0; 311} 312 313static void 314intel_dp_check_edp(struct intel_dp *intel_dp) 315{ 316 struct drm_device *dev = intel_dp_to_dev(intel_dp); 317 struct drm_i915_private *dev_priv = dev->dev_private; 318 319 if (!is_edp(intel_dp)) 320 return; 321 322 if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) { 323 WARN(1, "eDP powered off while attempting aux channel communication.\n"); 324 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n", 325 I915_READ(_pp_stat_reg(intel_dp)), 326 I915_READ(_pp_ctrl_reg(intel_dp))); 327 } 328} 329 330static uint32_t 331intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq) 332{ 333 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 334 struct drm_device *dev = intel_dig_port->base.base.dev; 335 struct drm_i915_private *dev_priv = dev->dev_private; 336 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg; 337 uint32_t status; 338 bool done; 339 340#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) 341 if (has_aux_irq) 342 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 343 msecs_to_jiffies_timeout(10)); 344 else 345 done = wait_for_atomic(C, 10) == 0; 346 if (!done) 347 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n", 348 has_aux_irq); 349#undef C 350 351 return status; 352} 353 354static uint32_t get_aux_clock_divider(struct intel_dp *intel_dp, 355 int index) 356{ 357 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 358 struct drm_device *dev = intel_dig_port->base.base.dev; 359 struct drm_i915_private *dev_priv = dev->dev_private; 360 361 /* The clock divider is based off the hrawclk, 362 * and would like to run at 2MHz. So, take the 363 * hrawclk value and divide by 2 and use that 364 * 365 * Note that PCH attached eDP panels should use a 125MHz input 366 * clock divider. 367 */ 368 if (IS_VALLEYVIEW(dev)) { 369 return index ? 0 : 100; 370 } else if (intel_dig_port->port == PORT_A) { 371 if (index) 372 return 0; 373 if (HAS_DDI(dev)) 374 return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000); 375 else if (IS_GEN6(dev) || IS_GEN7(dev)) 376 return 200; /* SNB & IVB eDP input clock at 400Mhz */ 377 else 378 return 225; /* eDP input clock at 450Mhz */ 379 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { 380 /* Workaround for non-ULT HSW */ 381 switch (index) { 382 case 0: return 63; 383 case 1: return 72; 384 default: return 0; 385 } 386 } else if (HAS_PCH_SPLIT(dev)) { 387 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2); 388 } else { 389 return index ? 0 :intel_hrawclk(dev) / 2; 390 } 391} 392 393static int 394intel_dp_aux_ch(struct intel_dp *intel_dp, 395 uint8_t *send, int send_bytes, 396 uint8_t *recv, int recv_size) 397{ 398 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 399 struct drm_device *dev = intel_dig_port->base.base.dev; 400 struct drm_i915_private *dev_priv = dev->dev_private; 401 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg; 402 uint32_t ch_data = ch_ctl + 4; 403 uint32_t aux_clock_divider; 404 int i, ret, recv_bytes; 405 uint32_t status; 406 int try, precharge, clock = 0; 407 bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev); 408 409 /* dp aux is extremely sensitive to irq latency, hence request the 410 * lowest possible wakeup latency and so prevent the cpu from going into 411 * deep sleep states. 412 */ 413 pm_qos_update_request(&dev_priv->pm_qos, 0); 414 415 intel_dp_check_edp(intel_dp); 416 417 if (IS_GEN6(dev)) 418 precharge = 3; 419 else 420 precharge = 5; 421 422 intel_aux_display_runtime_get(dev_priv); 423 424 /* Try to wait for any previous AUX channel activity */ 425 for (try = 0; try < 3; try++) { 426 status = I915_READ_NOTRACE(ch_ctl); 427 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 428 break; 429 msleep(1); 430 } 431 432 if (try == 3) { 433 WARN(1, "dp_aux_ch not started status 0x%08x\n", 434 I915_READ(ch_ctl)); 435 ret = -EBUSY; 436 goto out; 437 } 438 439 while ((aux_clock_divider = get_aux_clock_divider(intel_dp, clock++))) { 440 /* Must try at least 3 times according to DP spec */ 441 for (try = 0; try < 5; try++) { 442 /* Load the send data into the aux channel data registers */ 443 for (i = 0; i < send_bytes; i += 4) 444 I915_WRITE(ch_data + i, 445 pack_aux(send + i, send_bytes - i)); 446 447 /* Send the command and wait for it to complete */ 448 I915_WRITE(ch_ctl, 449 DP_AUX_CH_CTL_SEND_BUSY | 450 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) | 451 DP_AUX_CH_CTL_TIME_OUT_400us | 452 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 453 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 454 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | 455 DP_AUX_CH_CTL_DONE | 456 DP_AUX_CH_CTL_TIME_OUT_ERROR | 457 DP_AUX_CH_CTL_RECEIVE_ERROR); 458 459 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq); 460 461 /* Clear done status and any errors */ 462 I915_WRITE(ch_ctl, 463 status | 464 DP_AUX_CH_CTL_DONE | 465 DP_AUX_CH_CTL_TIME_OUT_ERROR | 466 DP_AUX_CH_CTL_RECEIVE_ERROR); 467 468 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR | 469 DP_AUX_CH_CTL_RECEIVE_ERROR)) 470 continue; 471 if (status & DP_AUX_CH_CTL_DONE) 472 break; 473 } 474 if (status & DP_AUX_CH_CTL_DONE) 475 break; 476 } 477 478 if ((status & DP_AUX_CH_CTL_DONE) == 0) { 479 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status); 480 ret = -EBUSY; 481 goto out; 482 } 483 484 /* Check for timeout or receive error. 485 * Timeouts occur when the sink is not connected 486 */ 487 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 488 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status); 489 ret = -EIO; 490 goto out; 491 } 492 493 /* Timeouts occur when the device isn't connected, so they're 494 * "normal" -- don't fill the kernel log with these */ 495 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { 496 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status); 497 ret = -ETIMEDOUT; 498 goto out; 499 } 500 501 /* Unload any bytes sent back from the other side */ 502 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> 503 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); 504 if (recv_bytes > recv_size) 505 recv_bytes = recv_size; 506 507 for (i = 0; i < recv_bytes; i += 4) 508 unpack_aux(I915_READ(ch_data + i), 509 recv + i, recv_bytes - i); 510 511 ret = recv_bytes; 512out: 513 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE); 514 intel_aux_display_runtime_put(dev_priv); 515 516 return ret; 517} 518 519/* Write data to the aux channel in native mode */ 520static int 521intel_dp_aux_native_write(struct intel_dp *intel_dp, 522 uint16_t address, uint8_t *send, int send_bytes) 523{ 524 int ret; 525 uint8_t msg[20]; 526 int msg_bytes; 527 uint8_t ack; 528 529 intel_dp_check_edp(intel_dp); 530 if (send_bytes > 16) 531 return -1; 532 msg[0] = AUX_NATIVE_WRITE << 4; 533 msg[1] = address >> 8; 534 msg[2] = address & 0xff; 535 msg[3] = send_bytes - 1; 536 memcpy(&msg[4], send, send_bytes); 537 msg_bytes = send_bytes + 4; 538 for (;;) { 539 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1); 540 if (ret < 0) 541 return ret; 542 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) 543 break; 544 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) 545 udelay(100); 546 else 547 return -EIO; 548 } 549 return send_bytes; 550} 551 552/* Write a single byte to the aux channel in native mode */ 553static int 554intel_dp_aux_native_write_1(struct intel_dp *intel_dp, 555 uint16_t address, uint8_t byte) 556{ 557 return intel_dp_aux_native_write(intel_dp, address, &byte, 1); 558} 559 560/* read bytes from a native aux channel */ 561static int 562intel_dp_aux_native_read(struct intel_dp *intel_dp, 563 uint16_t address, uint8_t *recv, int recv_bytes) 564{ 565 uint8_t msg[4]; 566 int msg_bytes; 567 uint8_t reply[20]; 568 int reply_bytes; 569 uint8_t ack; 570 int ret; 571 572 intel_dp_check_edp(intel_dp); 573 msg[0] = AUX_NATIVE_READ << 4; 574 msg[1] = address >> 8; 575 msg[2] = address & 0xff; 576 msg[3] = recv_bytes - 1; 577 578 msg_bytes = 4; 579 reply_bytes = recv_bytes + 1; 580 581 for (;;) { 582 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, 583 reply, reply_bytes); 584 if (ret == 0) 585 return -EPROTO; 586 if (ret < 0) 587 return ret; 588 ack = reply[0]; 589 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) { 590 memcpy(recv, reply + 1, ret - 1); 591 return ret - 1; 592 } 593 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) 594 udelay(100); 595 else 596 return -EIO; 597 } 598} 599 600static int 601intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, 602 uint8_t write_byte, uint8_t *read_byte) 603{ 604 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; 605 struct intel_dp *intel_dp = container_of(adapter, 606 struct intel_dp, 607 adapter); 608 uint16_t address = algo_data->address; 609 uint8_t msg[5]; 610 uint8_t reply[2]; 611 unsigned retry; 612 int msg_bytes; 613 int reply_bytes; 614 int ret; 615 616 intel_dp_check_edp(intel_dp); 617 /* Set up the command byte */ 618 if (mode & MODE_I2C_READ) 619 msg[0] = AUX_I2C_READ << 4; 620 else 621 msg[0] = AUX_I2C_WRITE << 4; 622 623 if (!(mode & MODE_I2C_STOP)) 624 msg[0] |= AUX_I2C_MOT << 4; 625 626 msg[1] = address >> 8; 627 msg[2] = address; 628 629 switch (mode) { 630 case MODE_I2C_WRITE: 631 msg[3] = 0; 632 msg[4] = write_byte; 633 msg_bytes = 5; 634 reply_bytes = 1; 635 break; 636 case MODE_I2C_READ: 637 msg[3] = 0; 638 msg_bytes = 4; 639 reply_bytes = 2; 640 break; 641 default: 642 msg_bytes = 3; 643 reply_bytes = 1; 644 break; 645 } 646 647 for (retry = 0; retry < 5; retry++) { 648 ret = intel_dp_aux_ch(intel_dp, 649 msg, msg_bytes, 650 reply, reply_bytes); 651 if (ret < 0) { 652 DRM_DEBUG_KMS("aux_ch failed %d\n", ret); 653 return ret; 654 } 655 656 switch (reply[0] & AUX_NATIVE_REPLY_MASK) { 657 case AUX_NATIVE_REPLY_ACK: 658 /* I2C-over-AUX Reply field is only valid 659 * when paired with AUX ACK. 660 */ 661 break; 662 case AUX_NATIVE_REPLY_NACK: 663 DRM_DEBUG_KMS("aux_ch native nack\n"); 664 return -EREMOTEIO; 665 case AUX_NATIVE_REPLY_DEFER: 666 udelay(100); 667 continue; 668 default: 669 DRM_ERROR("aux_ch invalid native reply 0x%02x\n", 670 reply[0]); 671 return -EREMOTEIO; 672 } 673 674 switch (reply[0] & AUX_I2C_REPLY_MASK) { 675 case AUX_I2C_REPLY_ACK: 676 if (mode == MODE_I2C_READ) { 677 *read_byte = reply[1]; 678 } 679 return reply_bytes - 1; 680 case AUX_I2C_REPLY_NACK: 681 DRM_DEBUG_KMS("aux_i2c nack\n"); 682 return -EREMOTEIO; 683 case AUX_I2C_REPLY_DEFER: 684 DRM_DEBUG_KMS("aux_i2c defer\n"); 685 udelay(100); 686 break; 687 default: 688 DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]); 689 return -EREMOTEIO; 690 } 691 } 692 693 DRM_ERROR("too many retries, giving up\n"); 694 return -EREMOTEIO; 695} 696 697static int 698intel_dp_i2c_init(struct intel_dp *intel_dp, 699 struct intel_connector *intel_connector, const char *name) 700{ 701 int ret; 702 703 DRM_DEBUG_KMS("i2c_init %s\n", name); 704 intel_dp->algo.running = false; 705 intel_dp->algo.address = 0; 706 intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch; 707 708 memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter)); 709 intel_dp->adapter.owner = THIS_MODULE; 710 intel_dp->adapter.class = I2C_CLASS_DDC; 711 strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1); 712 intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0'; 713 intel_dp->adapter.algo_data = &intel_dp->algo; 714 intel_dp->adapter.dev.parent = &intel_connector->base.kdev; 715 716 ironlake_edp_panel_vdd_on(intel_dp); 717 ret = i2c_dp_aux_add_bus(&intel_dp->adapter); 718 ironlake_edp_panel_vdd_off(intel_dp, false); 719 return ret; 720} 721 722static void 723intel_dp_set_clock(struct intel_encoder *encoder, 724 struct intel_crtc_config *pipe_config, int link_bw) 725{ 726 struct drm_device *dev = encoder->base.dev; 727 const struct dp_link_dpll *divisor = NULL; 728 int i, count = 0; 729 730 if (IS_G4X(dev)) { 731 divisor = gen4_dpll; 732 count = ARRAY_SIZE(gen4_dpll); 733 } else if (IS_HASWELL(dev)) { 734 /* Haswell has special-purpose DP DDI clocks. */ 735 } else if (HAS_PCH_SPLIT(dev)) { 736 divisor = pch_dpll; 737 count = ARRAY_SIZE(pch_dpll); 738 } else if (IS_VALLEYVIEW(dev)) { 739 divisor = vlv_dpll; 740 count = ARRAY_SIZE(vlv_dpll); 741 } 742 743 if (divisor && count) { 744 for (i = 0; i < count; i++) { 745 if (link_bw == divisor[i].link_bw) { 746 pipe_config->dpll = divisor[i].dpll; 747 pipe_config->clock_set = true; 748 break; 749 } 750 } 751 } 752} 753 754bool 755intel_dp_compute_config(struct intel_encoder *encoder, 756 struct intel_crtc_config *pipe_config) 757{ 758 struct drm_device *dev = encoder->base.dev; 759 struct drm_i915_private *dev_priv = dev->dev_private; 760 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; 761 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 762 enum port port = dp_to_dig_port(intel_dp)->port; 763 struct intel_crtc *intel_crtc = encoder->new_crtc; 764 struct intel_connector *intel_connector = intel_dp->attached_connector; 765 int lane_count, clock; 766 int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd); 767 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; 768 int bpp, mode_rate; 769 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; 770 int link_avail, link_clock; 771 772 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A) 773 pipe_config->has_pch_encoder = true; 774 775 pipe_config->has_dp_encoder = true; 776 777 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 778 intel_fixed_panel_mode(intel_connector->panel.fixed_mode, 779 adjusted_mode); 780 if (!HAS_PCH_SPLIT(dev)) 781 intel_gmch_panel_fitting(intel_crtc, pipe_config, 782 intel_connector->panel.fitting_mode); 783 else 784 intel_pch_panel_fitting(intel_crtc, pipe_config, 785 intel_connector->panel.fitting_mode); 786 } 787 788 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 789 return false; 790 791 DRM_DEBUG_KMS("DP link computation with max lane count %i " 792 "max bw %02x pixel clock %iKHz\n", 793 max_lane_count, bws[max_clock], adjusted_mode->clock); 794 795 /* Walk through all bpp values. Luckily they're all nicely spaced with 2 796 * bpc in between. */ 797 bpp = pipe_config->pipe_bpp; 798 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp) { 799 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n", 800 dev_priv->vbt.edp_bpp); 801 bpp = min_t(int, bpp, dev_priv->vbt.edp_bpp); 802 } 803 804 for (; bpp >= 6*3; bpp -= 2*3) { 805 mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp); 806 807 for (clock = 0; clock <= max_clock; clock++) { 808 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { 809 link_clock = drm_dp_bw_code_to_link_rate(bws[clock]); 810 link_avail = intel_dp_max_data_rate(link_clock, 811 lane_count); 812 813 if (mode_rate <= link_avail) { 814 goto found; 815 } 816 } 817 } 818 } 819 820 return false; 821 822found: 823 if (intel_dp->color_range_auto) { 824 /* 825 * See: 826 * CEA-861-E - 5.1 Default Encoding Parameters 827 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry 828 */ 829 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1) 830 intel_dp->color_range = DP_COLOR_RANGE_16_235; 831 else 832 intel_dp->color_range = 0; 833 } 834 835 if (intel_dp->color_range) 836 pipe_config->limited_color_range = true; 837 838 intel_dp->link_bw = bws[clock]; 839 intel_dp->lane_count = lane_count; 840 pipe_config->pipe_bpp = bpp; 841 pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw); 842 843 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n", 844 intel_dp->link_bw, intel_dp->lane_count, 845 pipe_config->port_clock, bpp); 846 DRM_DEBUG_KMS("DP link bw required %i available %i\n", 847 mode_rate, link_avail); 848 849 intel_link_compute_m_n(bpp, lane_count, 850 adjusted_mode->clock, pipe_config->port_clock, 851 &pipe_config->dp_m_n); 852 853 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw); 854 855 return true; 856} 857 858void intel_dp_init_link_config(struct intel_dp *intel_dp) 859{ 860 memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); 861 intel_dp->link_configuration[0] = intel_dp->link_bw; 862 intel_dp->link_configuration[1] = intel_dp->lane_count; 863 intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B; 864 /* 865 * Check for DPCD version > 1.1 and enhanced framing support 866 */ 867 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 868 (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) { 869 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; 870 } 871} 872 873static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp) 874{ 875 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 876 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); 877 struct drm_device *dev = crtc->base.dev; 878 struct drm_i915_private *dev_priv = dev->dev_private; 879 u32 dpa_ctl; 880 881 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", crtc->config.port_clock); 882 dpa_ctl = I915_READ(DP_A); 883 dpa_ctl &= ~DP_PLL_FREQ_MASK; 884 885 if (crtc->config.port_clock == 162000) { 886 /* For a long time we've carried around a ILK-DevA w/a for the 887 * 160MHz clock. If we're really unlucky, it's still required. 888 */ 889 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n"); 890 dpa_ctl |= DP_PLL_FREQ_160MHZ; 891 intel_dp->DP |= DP_PLL_FREQ_160MHZ; 892 } else { 893 dpa_ctl |= DP_PLL_FREQ_270MHZ; 894 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 895 } 896 897 I915_WRITE(DP_A, dpa_ctl); 898 899 POSTING_READ(DP_A); 900 udelay(500); 901} 902 903static void intel_dp_mode_set(struct intel_encoder *encoder) 904{ 905 struct drm_device *dev = encoder->base.dev; 906 struct drm_i915_private *dev_priv = dev->dev_private; 907 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 908 enum port port = dp_to_dig_port(intel_dp)->port; 909 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 910 struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode; 911 912 /* 913 * There are four kinds of DP registers: 914 * 915 * IBX PCH 916 * SNB CPU 917 * IVB CPU 918 * CPT PCH 919 * 920 * IBX PCH and CPU are the same for almost everything, 921 * except that the CPU DP PLL is configured in this 922 * register 923 * 924 * CPT PCH is quite different, having many bits moved 925 * to the TRANS_DP_CTL register instead. That 926 * configuration happens (oddly) in ironlake_pch_enable 927 */ 928 929 /* Preserve the BIOS-computed detected bit. This is 930 * supposed to be read-only. 931 */ 932 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED; 933 934 /* Handle DP bits in common between all three register formats */ 935 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 936 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count); 937 938 if (intel_dp->has_audio) { 939 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n", 940 pipe_name(crtc->pipe)); 941 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 942 intel_write_eld(&encoder->base, adjusted_mode); 943 } 944 945 intel_dp_init_link_config(intel_dp); 946 947 /* Split out the IBX/CPU vs CPT settings */ 948 949 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) { 950 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 951 intel_dp->DP |= DP_SYNC_HS_HIGH; 952 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 953 intel_dp->DP |= DP_SYNC_VS_HIGH; 954 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 955 956 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) 957 intel_dp->DP |= DP_ENHANCED_FRAMING; 958 959 intel_dp->DP |= crtc->pipe << 29; 960 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) { 961 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev)) 962 intel_dp->DP |= intel_dp->color_range; 963 964 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 965 intel_dp->DP |= DP_SYNC_HS_HIGH; 966 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 967 intel_dp->DP |= DP_SYNC_VS_HIGH; 968 intel_dp->DP |= DP_LINK_TRAIN_OFF; 969 970 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) 971 intel_dp->DP |= DP_ENHANCED_FRAMING; 972 973 if (crtc->pipe == 1) 974 intel_dp->DP |= DP_PIPEB_SELECT; 975 } else { 976 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 977 } 978 979 if (port == PORT_A && !IS_VALLEYVIEW(dev)) 980 ironlake_set_pll_cpu_edp(intel_dp); 981} 982 983#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 984#define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) 985 986#define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 987#define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 988 989#define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) 990#define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 991 992static void ironlake_wait_panel_status(struct intel_dp *intel_dp, 993 u32 mask, 994 u32 value) 995{ 996 struct drm_device *dev = intel_dp_to_dev(intel_dp); 997 struct drm_i915_private *dev_priv = dev->dev_private; 998 u32 pp_stat_reg, pp_ctrl_reg; 999 1000 pp_stat_reg = _pp_stat_reg(intel_dp); 1001 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 1002 1003 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n", 1004 mask, value, 1005 I915_READ(pp_stat_reg), 1006 I915_READ(pp_ctrl_reg)); 1007 1008 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) { 1009 DRM_ERROR("Panel status timeout: status %08x control %08x\n", 1010 I915_READ(pp_stat_reg), 1011 I915_READ(pp_ctrl_reg)); 1012 } 1013} 1014 1015static void ironlake_wait_panel_on(struct intel_dp *intel_dp) 1016{ 1017 DRM_DEBUG_KMS("Wait for panel power on\n"); 1018 ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); 1019} 1020 1021static void ironlake_wait_panel_off(struct intel_dp *intel_dp) 1022{ 1023 DRM_DEBUG_KMS("Wait for panel power off time\n"); 1024 ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); 1025} 1026 1027static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp) 1028{ 1029 DRM_DEBUG_KMS("Wait for panel power cycle\n"); 1030 ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); 1031} 1032 1033 1034/* Read the current pp_control value, unlocking the register if it 1035 * is locked 1036 */ 1037 1038static u32 ironlake_get_pp_control(struct intel_dp *intel_dp) 1039{ 1040 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1041 struct drm_i915_private *dev_priv = dev->dev_private; 1042 u32 control; 1043 1044 control = I915_READ(_pp_ctrl_reg(intel_dp)); 1045 control &= ~PANEL_UNLOCK_MASK; 1046 control |= PANEL_UNLOCK_REGS; 1047 return control; 1048} 1049 1050void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) 1051{ 1052 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1053 struct drm_i915_private *dev_priv = dev->dev_private; 1054 u32 pp; 1055 u32 pp_stat_reg, pp_ctrl_reg; 1056 1057 if (!is_edp(intel_dp)) 1058 return; 1059 DRM_DEBUG_KMS("Turn eDP VDD on\n"); 1060 1061 WARN(intel_dp->want_panel_vdd, 1062 "eDP VDD already requested on\n"); 1063 1064 intel_dp->want_panel_vdd = true; 1065 1066 if (ironlake_edp_have_panel_vdd(intel_dp)) { 1067 DRM_DEBUG_KMS("eDP VDD already on\n"); 1068 return; 1069 } 1070 1071 if (!ironlake_edp_have_panel_power(intel_dp)) 1072 ironlake_wait_panel_power_cycle(intel_dp); 1073 1074 pp = ironlake_get_pp_control(intel_dp); 1075 pp |= EDP_FORCE_VDD; 1076 1077 pp_stat_reg = _pp_stat_reg(intel_dp); 1078 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 1079 1080 I915_WRITE(pp_ctrl_reg, pp); 1081 POSTING_READ(pp_ctrl_reg); 1082 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 1083 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); 1084 /* 1085 * If the panel wasn't on, delay before accessing aux channel 1086 */ 1087 if (!ironlake_edp_have_panel_power(intel_dp)) { 1088 DRM_DEBUG_KMS("eDP was not running\n"); 1089 msleep(intel_dp->panel_power_up_delay); 1090 } 1091} 1092 1093static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp) 1094{ 1095 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1096 struct drm_i915_private *dev_priv = dev->dev_private; 1097 u32 pp; 1098 u32 pp_stat_reg, pp_ctrl_reg; 1099 1100 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 1101 1102 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) { 1103 pp = ironlake_get_pp_control(intel_dp); 1104 pp &= ~EDP_FORCE_VDD; 1105 1106 pp_stat_reg = _pp_ctrl_reg(intel_dp); 1107 pp_ctrl_reg = _pp_stat_reg(intel_dp); 1108 1109 I915_WRITE(pp_ctrl_reg, pp); 1110 POSTING_READ(pp_ctrl_reg); 1111 1112 /* Make sure sequencer is idle before allowing subsequent activity */ 1113 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 1114 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); 1115 msleep(intel_dp->panel_power_down_delay); 1116 } 1117} 1118 1119static void ironlake_panel_vdd_work(struct work_struct *__work) 1120{ 1121 struct intel_dp *intel_dp = container_of(to_delayed_work(__work), 1122 struct intel_dp, panel_vdd_work); 1123 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1124 1125 mutex_lock(&dev->mode_config.mutex); 1126 ironlake_panel_vdd_off_sync(intel_dp); 1127 mutex_unlock(&dev->mode_config.mutex); 1128} 1129 1130void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) 1131{ 1132 if (!is_edp(intel_dp)) 1133 return; 1134 1135 DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd); 1136 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on"); 1137 1138 intel_dp->want_panel_vdd = false; 1139 1140 if (sync) { 1141 ironlake_panel_vdd_off_sync(intel_dp); 1142 } else { 1143 /* 1144 * Queue the timer to fire a long 1145 * time from now (relative to the power down delay) 1146 * to keep the panel power up across a sequence of operations 1147 */ 1148 schedule_delayed_work(&intel_dp->panel_vdd_work, 1149 msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5)); 1150 } 1151} 1152 1153void ironlake_edp_panel_on(struct intel_dp *intel_dp) 1154{ 1155 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1156 struct drm_i915_private *dev_priv = dev->dev_private; 1157 u32 pp; 1158 u32 pp_ctrl_reg; 1159 1160 if (!is_edp(intel_dp)) 1161 return; 1162 1163 DRM_DEBUG_KMS("Turn eDP power on\n"); 1164 1165 if (ironlake_edp_have_panel_power(intel_dp)) { 1166 DRM_DEBUG_KMS("eDP power already on\n"); 1167 return; 1168 } 1169 1170 ironlake_wait_panel_power_cycle(intel_dp); 1171 1172 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 1173 pp = ironlake_get_pp_control(intel_dp); 1174 if (IS_GEN5(dev)) { 1175 /* ILK workaround: disable reset around power sequence */ 1176 pp &= ~PANEL_POWER_RESET; 1177 I915_WRITE(pp_ctrl_reg, pp); 1178 POSTING_READ(pp_ctrl_reg); 1179 } 1180 1181 pp |= POWER_TARGET_ON; 1182 if (!IS_GEN5(dev)) 1183 pp |= PANEL_POWER_RESET; 1184 1185 I915_WRITE(pp_ctrl_reg, pp); 1186 POSTING_READ(pp_ctrl_reg); 1187 1188 ironlake_wait_panel_on(intel_dp); 1189 1190 if (IS_GEN5(dev)) { 1191 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 1192 I915_WRITE(pp_ctrl_reg, pp); 1193 POSTING_READ(pp_ctrl_reg); 1194 } 1195} 1196 1197void ironlake_edp_panel_off(struct intel_dp *intel_dp) 1198{ 1199 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1200 struct drm_i915_private *dev_priv = dev->dev_private; 1201 u32 pp; 1202 u32 pp_ctrl_reg; 1203 1204 if (!is_edp(intel_dp)) 1205 return; 1206 1207 DRM_DEBUG_KMS("Turn eDP power off\n"); 1208 1209 WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n"); 1210 1211 pp = ironlake_get_pp_control(intel_dp); 1212 /* We need to switch off panel power _and_ force vdd, for otherwise some 1213 * panels get very unhappy and cease to work. */ 1214 pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE); 1215 1216 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 1217 1218 I915_WRITE(pp_ctrl_reg, pp); 1219 POSTING_READ(pp_ctrl_reg); 1220 1221 intel_dp->want_panel_vdd = false; 1222 1223 ironlake_wait_panel_off(intel_dp); 1224} 1225 1226void ironlake_edp_backlight_on(struct intel_dp *intel_dp) 1227{ 1228 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1229 struct drm_device *dev = intel_dig_port->base.base.dev; 1230 struct drm_i915_private *dev_priv = dev->dev_private; 1231 int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe; 1232 u32 pp; 1233 u32 pp_ctrl_reg; 1234 1235 if (!is_edp(intel_dp)) 1236 return; 1237 1238 DRM_DEBUG_KMS("\n"); 1239 /* 1240 * If we enable the backlight right away following a panel power 1241 * on, we may see slight flicker as the panel syncs with the eDP 1242 * link. So delay a bit to make sure the image is solid before 1243 * allowing it to appear. 1244 */ 1245 msleep(intel_dp->backlight_on_delay); 1246 pp = ironlake_get_pp_control(intel_dp); 1247 pp |= EDP_BLC_ENABLE; 1248 1249 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 1250 1251 I915_WRITE(pp_ctrl_reg, pp); 1252 POSTING_READ(pp_ctrl_reg); 1253 1254 intel_panel_enable_backlight(dev, pipe); 1255} 1256 1257void ironlake_edp_backlight_off(struct intel_dp *intel_dp) 1258{ 1259 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1260 struct drm_i915_private *dev_priv = dev->dev_private; 1261 u32 pp; 1262 u32 pp_ctrl_reg; 1263 1264 if (!is_edp(intel_dp)) 1265 return; 1266 1267 intel_panel_disable_backlight(dev); 1268 1269 DRM_DEBUG_KMS("\n"); 1270 pp = ironlake_get_pp_control(intel_dp); 1271 pp &= ~EDP_BLC_ENABLE; 1272 1273 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 1274 1275 I915_WRITE(pp_ctrl_reg, pp); 1276 POSTING_READ(pp_ctrl_reg); 1277 msleep(intel_dp->backlight_off_delay); 1278} 1279 1280static void ironlake_edp_pll_on(struct intel_dp *intel_dp) 1281{ 1282 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1283 struct drm_crtc *crtc = intel_dig_port->base.base.crtc; 1284 struct drm_device *dev = crtc->dev; 1285 struct drm_i915_private *dev_priv = dev->dev_private; 1286 u32 dpa_ctl; 1287 1288 assert_pipe_disabled(dev_priv, 1289 to_intel_crtc(crtc)->pipe); 1290 1291 DRM_DEBUG_KMS("\n"); 1292 dpa_ctl = I915_READ(DP_A); 1293 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n"); 1294 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n"); 1295 1296 /* We don't adjust intel_dp->DP while tearing down the link, to 1297 * facilitate link retraining (e.g. after hotplug). Hence clear all 1298 * enable bits here to ensure that we don't enable too much. */ 1299 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE); 1300 intel_dp->DP |= DP_PLL_ENABLE; 1301 I915_WRITE(DP_A, intel_dp->DP); 1302 POSTING_READ(DP_A); 1303 udelay(200); 1304} 1305 1306static void ironlake_edp_pll_off(struct intel_dp *intel_dp) 1307{ 1308 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1309 struct drm_crtc *crtc = intel_dig_port->base.base.crtc; 1310 struct drm_device *dev = crtc->dev; 1311 struct drm_i915_private *dev_priv = dev->dev_private; 1312 u32 dpa_ctl; 1313 1314 assert_pipe_disabled(dev_priv, 1315 to_intel_crtc(crtc)->pipe); 1316 1317 dpa_ctl = I915_READ(DP_A); 1318 WARN((dpa_ctl & DP_PLL_ENABLE) == 0, 1319 "dp pll off, should be on\n"); 1320 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n"); 1321 1322 /* We can't rely on the value tracked for the DP register in 1323 * intel_dp->DP because link_down must not change that (otherwise link 1324 * re-training will fail. */ 1325 dpa_ctl &= ~DP_PLL_ENABLE; 1326 I915_WRITE(DP_A, dpa_ctl); 1327 POSTING_READ(DP_A); 1328 udelay(200); 1329} 1330 1331/* If the sink supports it, try to set the power state appropriately */ 1332void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) 1333{ 1334 int ret, i; 1335 1336 /* Should have a valid DPCD by this point */ 1337 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 1338 return; 1339 1340 if (mode != DRM_MODE_DPMS_ON) { 1341 ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER, 1342 DP_SET_POWER_D3); 1343 if (ret != 1) 1344 DRM_DEBUG_DRIVER("failed to write sink power state\n"); 1345 } else { 1346 /* 1347 * When turning on, we need to retry for 1ms to give the sink 1348 * time to wake up. 1349 */ 1350 for (i = 0; i < 3; i++) { 1351 ret = intel_dp_aux_native_write_1(intel_dp, 1352 DP_SET_POWER, 1353 DP_SET_POWER_D0); 1354 if (ret == 1) 1355 break; 1356 msleep(1); 1357 } 1358 } 1359} 1360 1361static bool intel_dp_get_hw_state(struct intel_encoder *encoder, 1362 enum pipe *pipe) 1363{ 1364 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1365 enum port port = dp_to_dig_port(intel_dp)->port; 1366 struct drm_device *dev = encoder->base.dev; 1367 struct drm_i915_private *dev_priv = dev->dev_private; 1368 u32 tmp = I915_READ(intel_dp->output_reg); 1369 1370 if (!(tmp & DP_PORT_EN)) 1371 return false; 1372 1373 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) { 1374 *pipe = PORT_TO_PIPE_CPT(tmp); 1375 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) { 1376 *pipe = PORT_TO_PIPE(tmp); 1377 } else { 1378 u32 trans_sel; 1379 u32 trans_dp; 1380 int i; 1381 1382 switch (intel_dp->output_reg) { 1383 case PCH_DP_B: 1384 trans_sel = TRANS_DP_PORT_SEL_B; 1385 break; 1386 case PCH_DP_C: 1387 trans_sel = TRANS_DP_PORT_SEL_C; 1388 break; 1389 case PCH_DP_D: 1390 trans_sel = TRANS_DP_PORT_SEL_D; 1391 break; 1392 default: 1393 return true; 1394 } 1395 1396 for_each_pipe(i) { 1397 trans_dp = I915_READ(TRANS_DP_CTL(i)); 1398 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) { 1399 *pipe = i; 1400 return true; 1401 } 1402 } 1403 1404 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", 1405 intel_dp->output_reg); 1406 } 1407 1408 return true; 1409} 1410 1411static void intel_dp_get_config(struct intel_encoder *encoder, 1412 struct intel_crtc_config *pipe_config) 1413{ 1414 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1415 u32 tmp, flags = 0; 1416 struct drm_device *dev = encoder->base.dev; 1417 struct drm_i915_private *dev_priv = dev->dev_private; 1418 enum port port = dp_to_dig_port(intel_dp)->port; 1419 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 1420 int dotclock; 1421 1422 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) { 1423 tmp = I915_READ(intel_dp->output_reg); 1424 if (tmp & DP_SYNC_HS_HIGH) 1425 flags |= DRM_MODE_FLAG_PHSYNC; 1426 else 1427 flags |= DRM_MODE_FLAG_NHSYNC; 1428 1429 if (tmp & DP_SYNC_VS_HIGH) 1430 flags |= DRM_MODE_FLAG_PVSYNC; 1431 else 1432 flags |= DRM_MODE_FLAG_NVSYNC; 1433 } else { 1434 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe)); 1435 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH) 1436 flags |= DRM_MODE_FLAG_PHSYNC; 1437 else 1438 flags |= DRM_MODE_FLAG_NHSYNC; 1439 1440 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH) 1441 flags |= DRM_MODE_FLAG_PVSYNC; 1442 else 1443 flags |= DRM_MODE_FLAG_NVSYNC; 1444 } 1445 1446 pipe_config->adjusted_mode.flags |= flags; 1447 1448 pipe_config->has_dp_encoder = true; 1449 1450 intel_dp_get_m_n(crtc, pipe_config); 1451 1452 if (port == PORT_A) { 1453 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ) 1454 pipe_config->port_clock = 162000; 1455 else 1456 pipe_config->port_clock = 270000; 1457 } 1458 1459 dotclock = intel_dotclock_calculate(pipe_config->port_clock, 1460 &pipe_config->dp_m_n); 1461 1462 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A) 1463 ironlake_check_encoder_dotclock(pipe_config, dotclock); 1464 1465 pipe_config->adjusted_mode.clock = dotclock; 1466} 1467 1468static bool is_edp_psr(struct intel_dp *intel_dp) 1469{ 1470 return is_edp(intel_dp) && 1471 intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED; 1472} 1473 1474static bool intel_edp_is_psr_enabled(struct drm_device *dev) 1475{ 1476 struct drm_i915_private *dev_priv = dev->dev_private; 1477 1478 if (!IS_HASWELL(dev)) 1479 return false; 1480 1481 return I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE; 1482} 1483 1484static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp, 1485 struct edp_vsc_psr *vsc_psr) 1486{ 1487 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1488 struct drm_device *dev = dig_port->base.base.dev; 1489 struct drm_i915_private *dev_priv = dev->dev_private; 1490 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); 1491 u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder); 1492 u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder); 1493 uint32_t *data = (uint32_t *) vsc_psr; 1494 unsigned int i; 1495 1496 /* As per BSPec (Pipe Video Data Island Packet), we need to disable 1497 the video DIP being updated before program video DIP data buffer 1498 registers for DIP being updated. */ 1499 I915_WRITE(ctl_reg, 0); 1500 POSTING_READ(ctl_reg); 1501 1502 for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) { 1503 if (i < sizeof(struct edp_vsc_psr)) 1504 I915_WRITE(data_reg + i, *data++); 1505 else 1506 I915_WRITE(data_reg + i, 0); 1507 } 1508 1509 I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW); 1510 POSTING_READ(ctl_reg); 1511} 1512 1513static void intel_edp_psr_setup(struct intel_dp *intel_dp) 1514{ 1515 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1516 struct drm_i915_private *dev_priv = dev->dev_private; 1517 struct edp_vsc_psr psr_vsc; 1518 1519 if (intel_dp->psr_setup_done) 1520 return; 1521 1522 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */ 1523 memset(&psr_vsc, 0, sizeof(psr_vsc)); 1524 psr_vsc.sdp_header.HB0 = 0; 1525 psr_vsc.sdp_header.HB1 = 0x7; 1526 psr_vsc.sdp_header.HB2 = 0x2; 1527 psr_vsc.sdp_header.HB3 = 0x8; 1528 intel_edp_psr_write_vsc(intel_dp, &psr_vsc); 1529 1530 /* Avoid continuous PSR exit by masking memup and hpd */ 1531 I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP | 1532 EDP_PSR_DEBUG_MASK_HPD); 1533 1534 intel_dp->psr_setup_done = true; 1535} 1536 1537static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp) 1538{ 1539 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1540 struct drm_i915_private *dev_priv = dev->dev_private; 1541 uint32_t aux_clock_divider = get_aux_clock_divider(intel_dp, 0); 1542 int precharge = 0x3; 1543 int msg_size = 5; /* Header(4) + Message(1) */ 1544 1545 /* Enable PSR in sink */ 1546 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) 1547 intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG, 1548 DP_PSR_ENABLE & 1549 ~DP_PSR_MAIN_LINK_ACTIVE); 1550 else 1551 intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG, 1552 DP_PSR_ENABLE | 1553 DP_PSR_MAIN_LINK_ACTIVE); 1554 1555 /* Setup AUX registers */ 1556 I915_WRITE(EDP_PSR_AUX_DATA1, EDP_PSR_DPCD_COMMAND); 1557 I915_WRITE(EDP_PSR_AUX_DATA2, EDP_PSR_DPCD_NORMAL_OPERATION); 1558 I915_WRITE(EDP_PSR_AUX_CTL, 1559 DP_AUX_CH_CTL_TIME_OUT_400us | 1560 (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 1561 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 1562 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT)); 1563} 1564 1565static void intel_edp_psr_enable_source(struct intel_dp *intel_dp) 1566{ 1567 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1568 struct drm_i915_private *dev_priv = dev->dev_private; 1569 uint32_t max_sleep_time = 0x1f; 1570 uint32_t idle_frames = 1; 1571 uint32_t val = 0x0; 1572 1573 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) { 1574 val |= EDP_PSR_LINK_STANDBY; 1575 val |= EDP_PSR_TP2_TP3_TIME_0us; 1576 val |= EDP_PSR_TP1_TIME_0us; 1577 val |= EDP_PSR_SKIP_AUX_EXIT; 1578 } else 1579 val |= EDP_PSR_LINK_DISABLE; 1580 1581 I915_WRITE(EDP_PSR_CTL, val | 1582 EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES | 1583 max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT | 1584 idle_frames << EDP_PSR_IDLE_FRAME_SHIFT | 1585 EDP_PSR_ENABLE); 1586} 1587 1588static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp) 1589{ 1590 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1591 struct drm_device *dev = dig_port->base.base.dev; 1592 struct drm_i915_private *dev_priv = dev->dev_private; 1593 struct drm_crtc *crtc = dig_port->base.base.crtc; 1594 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1595 struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->fb)->obj; 1596 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; 1597 1598 if (!IS_HASWELL(dev)) { 1599 DRM_DEBUG_KMS("PSR not supported on this platform\n"); 1600 dev_priv->no_psr_reason = PSR_NO_SOURCE; 1601 return false; 1602 } 1603 1604 if ((intel_encoder->type != INTEL_OUTPUT_EDP) || 1605 (dig_port->port != PORT_A)) { 1606 DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n"); 1607 dev_priv->no_psr_reason = PSR_HSW_NOT_DDIA; 1608 return false; 1609 } 1610 1611 if (!is_edp_psr(intel_dp)) { 1612 DRM_DEBUG_KMS("PSR not supported by this panel\n"); 1613 dev_priv->no_psr_reason = PSR_NO_SINK; 1614 return false; 1615 } 1616 1617 if (!i915_enable_psr) { 1618 DRM_DEBUG_KMS("PSR disable by flag\n"); 1619 dev_priv->no_psr_reason = PSR_MODULE_PARAM; 1620 return false; 1621 } 1622 1623 crtc = dig_port->base.base.crtc; 1624 if (crtc == NULL) { 1625 DRM_DEBUG_KMS("crtc not active for PSR\n"); 1626 dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE; 1627 return false; 1628 } 1629 1630 intel_crtc = to_intel_crtc(crtc); 1631 if (!intel_crtc->active || !crtc->fb || 1632 !intel_crtc->config.adjusted_mode.clock) { 1633 DRM_DEBUG_KMS("crtc not active for PSR\n"); 1634 dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE; 1635 return false; 1636 } 1637 1638 obj = to_intel_framebuffer(crtc->fb)->obj; 1639 if (obj->tiling_mode != I915_TILING_X || 1640 obj->fence_reg == I915_FENCE_REG_NONE) { 1641 DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n"); 1642 dev_priv->no_psr_reason = PSR_NOT_TILED; 1643 return false; 1644 } 1645 1646 if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) { 1647 DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n"); 1648 dev_priv->no_psr_reason = PSR_SPRITE_ENABLED; 1649 return false; 1650 } 1651 1652 if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) & 1653 S3D_ENABLE) { 1654 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n"); 1655 dev_priv->no_psr_reason = PSR_S3D_ENABLED; 1656 return false; 1657 } 1658 1659 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 1660 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n"); 1661 dev_priv->no_psr_reason = PSR_INTERLACED_ENABLED; 1662 return false; 1663 } 1664 1665 return true; 1666} 1667 1668static void intel_edp_psr_do_enable(struct intel_dp *intel_dp) 1669{ 1670 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1671 1672 if (!intel_edp_psr_match_conditions(intel_dp) || 1673 intel_edp_is_psr_enabled(dev)) 1674 return; 1675 1676 /* Setup PSR once */ 1677 intel_edp_psr_setup(intel_dp); 1678 1679 /* Enable PSR on the panel */ 1680 intel_edp_psr_enable_sink(intel_dp); 1681 1682 /* Enable PSR on the host */ 1683 intel_edp_psr_enable_source(intel_dp); 1684} 1685 1686void intel_edp_psr_enable(struct intel_dp *intel_dp) 1687{ 1688 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1689 1690 if (intel_edp_psr_match_conditions(intel_dp) && 1691 !intel_edp_is_psr_enabled(dev)) 1692 intel_edp_psr_do_enable(intel_dp); 1693} 1694 1695void intel_edp_psr_disable(struct intel_dp *intel_dp) 1696{ 1697 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1698 struct drm_i915_private *dev_priv = dev->dev_private; 1699 1700 if (!intel_edp_is_psr_enabled(dev)) 1701 return; 1702 1703 I915_WRITE(EDP_PSR_CTL, I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE); 1704 1705 /* Wait till PSR is idle */ 1706 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) & 1707 EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10)) 1708 DRM_ERROR("Timed out waiting for PSR Idle State\n"); 1709} 1710 1711void intel_edp_psr_update(struct drm_device *dev) 1712{ 1713 struct intel_encoder *encoder; 1714 struct intel_dp *intel_dp = NULL; 1715 1716 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) 1717 if (encoder->type == INTEL_OUTPUT_EDP) { 1718 intel_dp = enc_to_intel_dp(&encoder->base); 1719 1720 if (!is_edp_psr(intel_dp)) 1721 return; 1722 1723 if (!intel_edp_psr_match_conditions(intel_dp)) 1724 intel_edp_psr_disable(intel_dp); 1725 else 1726 if (!intel_edp_is_psr_enabled(dev)) 1727 intel_edp_psr_do_enable(intel_dp); 1728 } 1729} 1730 1731static void intel_disable_dp(struct intel_encoder *encoder) 1732{ 1733 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1734 enum port port = dp_to_dig_port(intel_dp)->port; 1735 struct drm_device *dev = encoder->base.dev; 1736 1737 /* Make sure the panel is off before trying to change the mode. But also 1738 * ensure that we have vdd while we switch off the panel. */ 1739 ironlake_edp_panel_vdd_on(intel_dp); 1740 ironlake_edp_backlight_off(intel_dp); 1741 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1742 ironlake_edp_panel_off(intel_dp); 1743 1744 /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */ 1745 if (!(port == PORT_A || IS_VALLEYVIEW(dev))) 1746 intel_dp_link_down(intel_dp); 1747} 1748 1749static void intel_post_disable_dp(struct intel_encoder *encoder) 1750{ 1751 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1752 enum port port = dp_to_dig_port(intel_dp)->port; 1753 struct drm_device *dev = encoder->base.dev; 1754 1755 if (port == PORT_A || IS_VALLEYVIEW(dev)) { 1756 intel_dp_link_down(intel_dp); 1757 if (!IS_VALLEYVIEW(dev)) 1758 ironlake_edp_pll_off(intel_dp); 1759 } 1760} 1761 1762static void intel_enable_dp(struct intel_encoder *encoder) 1763{ 1764 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1765 struct drm_device *dev = encoder->base.dev; 1766 struct drm_i915_private *dev_priv = dev->dev_private; 1767 uint32_t dp_reg = I915_READ(intel_dp->output_reg); 1768 1769 if (WARN_ON(dp_reg & DP_PORT_EN)) 1770 return; 1771 1772 ironlake_edp_panel_vdd_on(intel_dp); 1773 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1774 intel_dp_start_link_train(intel_dp); 1775 ironlake_edp_panel_on(intel_dp); 1776 ironlake_edp_panel_vdd_off(intel_dp, true); 1777 intel_dp_complete_link_train(intel_dp); 1778 intel_dp_stop_link_train(intel_dp); 1779} 1780 1781static void g4x_enable_dp(struct intel_encoder *encoder) 1782{ 1783 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1784 1785 intel_enable_dp(encoder); 1786 ironlake_edp_backlight_on(intel_dp); 1787} 1788 1789static void vlv_enable_dp(struct intel_encoder *encoder) 1790{ 1791 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1792 1793 ironlake_edp_backlight_on(intel_dp); 1794} 1795 1796static void g4x_pre_enable_dp(struct intel_encoder *encoder) 1797{ 1798 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1799 struct intel_digital_port *dport = dp_to_dig_port(intel_dp); 1800 1801 if (dport->port == PORT_A) 1802 ironlake_edp_pll_on(intel_dp); 1803} 1804 1805static void vlv_pre_enable_dp(struct intel_encoder *encoder) 1806{ 1807 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1808 struct intel_digital_port *dport = dp_to_dig_port(intel_dp); 1809 struct drm_device *dev = encoder->base.dev; 1810 struct drm_i915_private *dev_priv = dev->dev_private; 1811 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 1812 int port = vlv_dport_to_channel(dport); 1813 int pipe = intel_crtc->pipe; 1814 struct edp_power_seq power_seq; 1815 u32 val; 1816 1817 mutex_lock(&dev_priv->dpio_lock); 1818 1819 val = vlv_dpio_read(dev_priv, pipe, DPIO_DATA_LANE_A(port)); 1820 val = 0; 1821 if (pipe) 1822 val |= (1<<21); 1823 else 1824 val &= ~(1<<21); 1825 val |= 0x001000c4; 1826 vlv_dpio_write(dev_priv, pipe, DPIO_DATA_CHANNEL(port), val); 1827 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF0(port), 0x00760018); 1828 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF8(port), 0x00400888); 1829 1830 mutex_unlock(&dev_priv->dpio_lock); 1831 1832 /* init power sequencer on this pipe and port */ 1833 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); 1834 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, 1835 &power_seq); 1836 1837 intel_enable_dp(encoder); 1838 1839 vlv_wait_port_ready(dev_priv, port); 1840} 1841 1842static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder) 1843{ 1844 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); 1845 struct drm_device *dev = encoder->base.dev; 1846 struct drm_i915_private *dev_priv = dev->dev_private; 1847 struct intel_crtc *intel_crtc = 1848 to_intel_crtc(encoder->base.crtc); 1849 int port = vlv_dport_to_channel(dport); 1850 int pipe = intel_crtc->pipe; 1851 1852 /* Program Tx lane resets to default */ 1853 mutex_lock(&dev_priv->dpio_lock); 1854 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port), 1855 DPIO_PCS_TX_LANE2_RESET | 1856 DPIO_PCS_TX_LANE1_RESET); 1857 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port), 1858 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN | 1859 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN | 1860 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) | 1861 DPIO_PCS_CLK_SOFT_RESET); 1862 1863 /* Fix up inter-pair skew failure */ 1864 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER1(port), 0x00750f00); 1865 vlv_dpio_write(dev_priv, pipe, DPIO_TX_CTL(port), 0x00001500); 1866 vlv_dpio_write(dev_priv, pipe, DPIO_TX_LANE(port), 0x40400000); 1867 mutex_unlock(&dev_priv->dpio_lock); 1868} 1869 1870/* 1871 * Native read with retry for link status and receiver capability reads for 1872 * cases where the sink may still be asleep. 1873 */ 1874static bool 1875intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address, 1876 uint8_t *recv, int recv_bytes) 1877{ 1878 int ret, i; 1879 1880 /* 1881 * Sinks are *supposed* to come up within 1ms from an off state, 1882 * but we're also supposed to retry 3 times per the spec. 1883 */ 1884 for (i = 0; i < 3; i++) { 1885 ret = intel_dp_aux_native_read(intel_dp, address, recv, 1886 recv_bytes); 1887 if (ret == recv_bytes) 1888 return true; 1889 msleep(1); 1890 } 1891 1892 return false; 1893} 1894 1895/* 1896 * Fetch AUX CH registers 0x202 - 0x207 which contain 1897 * link status information 1898 */ 1899static bool 1900intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 1901{ 1902 return intel_dp_aux_native_read_retry(intel_dp, 1903 DP_LANE0_1_STATUS, 1904 link_status, 1905 DP_LINK_STATUS_SIZE); 1906} 1907 1908#if 0 1909static char *voltage_names[] = { 1910 "0.4V", "0.6V", "0.8V", "1.2V" 1911}; 1912static char *pre_emph_names[] = { 1913 "0dB", "3.5dB", "6dB", "9.5dB" 1914}; 1915static char *link_train_names[] = { 1916 "pattern 1", "pattern 2", "idle", "off" 1917}; 1918#endif 1919 1920/* 1921 * These are source-specific values; current Intel hardware supports 1922 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB 1923 */ 1924 1925static uint8_t 1926intel_dp_voltage_max(struct intel_dp *intel_dp) 1927{ 1928 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1929 enum port port = dp_to_dig_port(intel_dp)->port; 1930 1931 if (IS_VALLEYVIEW(dev)) 1932 return DP_TRAIN_VOLTAGE_SWING_1200; 1933 else if (IS_GEN7(dev) && port == PORT_A) 1934 return DP_TRAIN_VOLTAGE_SWING_800; 1935 else if (HAS_PCH_CPT(dev) && port != PORT_A) 1936 return DP_TRAIN_VOLTAGE_SWING_1200; 1937 else 1938 return DP_TRAIN_VOLTAGE_SWING_800; 1939} 1940 1941static uint8_t 1942intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) 1943{ 1944 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1945 enum port port = dp_to_dig_port(intel_dp)->port; 1946 1947 if (HAS_DDI(dev)) { 1948 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1949 case DP_TRAIN_VOLTAGE_SWING_400: 1950 return DP_TRAIN_PRE_EMPHASIS_9_5; 1951 case DP_TRAIN_VOLTAGE_SWING_600: 1952 return DP_TRAIN_PRE_EMPHASIS_6; 1953 case DP_TRAIN_VOLTAGE_SWING_800: 1954 return DP_TRAIN_PRE_EMPHASIS_3_5; 1955 case DP_TRAIN_VOLTAGE_SWING_1200: 1956 default: 1957 return DP_TRAIN_PRE_EMPHASIS_0; 1958 } 1959 } else if (IS_VALLEYVIEW(dev)) { 1960 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1961 case DP_TRAIN_VOLTAGE_SWING_400: 1962 return DP_TRAIN_PRE_EMPHASIS_9_5; 1963 case DP_TRAIN_VOLTAGE_SWING_600: 1964 return DP_TRAIN_PRE_EMPHASIS_6; 1965 case DP_TRAIN_VOLTAGE_SWING_800: 1966 return DP_TRAIN_PRE_EMPHASIS_3_5; 1967 case DP_TRAIN_VOLTAGE_SWING_1200: 1968 default: 1969 return DP_TRAIN_PRE_EMPHASIS_0; 1970 } 1971 } else if (IS_GEN7(dev) && port == PORT_A) { 1972 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1973 case DP_TRAIN_VOLTAGE_SWING_400: 1974 return DP_TRAIN_PRE_EMPHASIS_6; 1975 case DP_TRAIN_VOLTAGE_SWING_600: 1976 case DP_TRAIN_VOLTAGE_SWING_800: 1977 return DP_TRAIN_PRE_EMPHASIS_3_5; 1978 default: 1979 return DP_TRAIN_PRE_EMPHASIS_0; 1980 } 1981 } else { 1982 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1983 case DP_TRAIN_VOLTAGE_SWING_400: 1984 return DP_TRAIN_PRE_EMPHASIS_6; 1985 case DP_TRAIN_VOLTAGE_SWING_600: 1986 return DP_TRAIN_PRE_EMPHASIS_6; 1987 case DP_TRAIN_VOLTAGE_SWING_800: 1988 return DP_TRAIN_PRE_EMPHASIS_3_5; 1989 case DP_TRAIN_VOLTAGE_SWING_1200: 1990 default: 1991 return DP_TRAIN_PRE_EMPHASIS_0; 1992 } 1993 } 1994} 1995 1996static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp) 1997{ 1998 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1999 struct drm_i915_private *dev_priv = dev->dev_private; 2000 struct intel_digital_port *dport = dp_to_dig_port(intel_dp); 2001 struct intel_crtc *intel_crtc = 2002 to_intel_crtc(dport->base.base.crtc); 2003 unsigned long demph_reg_value, preemph_reg_value, 2004 uniqtranscale_reg_value; 2005 uint8_t train_set = intel_dp->train_set[0]; 2006 int port = vlv_dport_to_channel(dport); 2007 int pipe = intel_crtc->pipe; 2008 2009 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 2010 case DP_TRAIN_PRE_EMPHASIS_0: 2011 preemph_reg_value = 0x0004000; 2012 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 2013 case DP_TRAIN_VOLTAGE_SWING_400: 2014 demph_reg_value = 0x2B405555; 2015 uniqtranscale_reg_value = 0x552AB83A; 2016 break; 2017 case DP_TRAIN_VOLTAGE_SWING_600: 2018 demph_reg_value = 0x2B404040; 2019 uniqtranscale_reg_value = 0x5548B83A; 2020 break; 2021 case DP_TRAIN_VOLTAGE_SWING_800: 2022 demph_reg_value = 0x2B245555; 2023 uniqtranscale_reg_value = 0x5560B83A; 2024 break; 2025 case DP_TRAIN_VOLTAGE_SWING_1200: 2026 demph_reg_value = 0x2B405555; 2027 uniqtranscale_reg_value = 0x5598DA3A; 2028 break; 2029 default: 2030 return 0; 2031 } 2032 break; 2033 case DP_TRAIN_PRE_EMPHASIS_3_5: 2034 preemph_reg_value = 0x0002000; 2035 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 2036 case DP_TRAIN_VOLTAGE_SWING_400: 2037 demph_reg_value = 0x2B404040; 2038 uniqtranscale_reg_value = 0x5552B83A; 2039 break; 2040 case DP_TRAIN_VOLTAGE_SWING_600: 2041 demph_reg_value = 0x2B404848; 2042 uniqtranscale_reg_value = 0x5580B83A; 2043 break; 2044 case DP_TRAIN_VOLTAGE_SWING_800: 2045 demph_reg_value = 0x2B404040; 2046 uniqtranscale_reg_value = 0x55ADDA3A; 2047 break; 2048 default: 2049 return 0; 2050 } 2051 break; 2052 case DP_TRAIN_PRE_EMPHASIS_6: 2053 preemph_reg_value = 0x0000000; 2054 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 2055 case DP_TRAIN_VOLTAGE_SWING_400: 2056 demph_reg_value = 0x2B305555; 2057 uniqtranscale_reg_value = 0x5570B83A; 2058 break; 2059 case DP_TRAIN_VOLTAGE_SWING_600: 2060 demph_reg_value = 0x2B2B4040; 2061 uniqtranscale_reg_value = 0x55ADDA3A; 2062 break; 2063 default: 2064 return 0; 2065 } 2066 break; 2067 case DP_TRAIN_PRE_EMPHASIS_9_5: 2068 preemph_reg_value = 0x0006000; 2069 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 2070 case DP_TRAIN_VOLTAGE_SWING_400: 2071 demph_reg_value = 0x1B405555; 2072 uniqtranscale_reg_value = 0x55ADDA3A; 2073 break; 2074 default: 2075 return 0; 2076 } 2077 break; 2078 default: 2079 return 0; 2080 } 2081 2082 mutex_lock(&dev_priv->dpio_lock); 2083 vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0x00000000); 2084 vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL4(port), demph_reg_value); 2085 vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL2(port), 2086 uniqtranscale_reg_value); 2087 vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL3(port), 0x0C782040); 2088 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER0(port), 0x00030000); 2089 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port), preemph_reg_value); 2090 vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0x80000000); 2091 mutex_unlock(&dev_priv->dpio_lock); 2092 2093 return 0; 2094} 2095 2096static void 2097intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 2098{ 2099 uint8_t v = 0; 2100 uint8_t p = 0; 2101 int lane; 2102 uint8_t voltage_max; 2103 uint8_t preemph_max; 2104 2105 for (lane = 0; lane < intel_dp->lane_count; lane++) { 2106 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane); 2107 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); 2108 2109 if (this_v > v) 2110 v = this_v; 2111 if (this_p > p) 2112 p = this_p; 2113 } 2114 2115 voltage_max = intel_dp_voltage_max(intel_dp); 2116 if (v >= voltage_max) 2117 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; 2118 2119 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v); 2120 if (p >= preemph_max) 2121 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; 2122 2123 for (lane = 0; lane < 4; lane++) 2124 intel_dp->train_set[lane] = v | p; 2125} 2126 2127static uint32_t 2128intel_gen4_signal_levels(uint8_t train_set) 2129{ 2130 uint32_t signal_levels = 0; 2131 2132 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 2133 case DP_TRAIN_VOLTAGE_SWING_400: 2134 default: 2135 signal_levels |= DP_VOLTAGE_0_4; 2136 break; 2137 case DP_TRAIN_VOLTAGE_SWING_600: 2138 signal_levels |= DP_VOLTAGE_0_6; 2139 break; 2140 case DP_TRAIN_VOLTAGE_SWING_800: 2141 signal_levels |= DP_VOLTAGE_0_8; 2142 break; 2143 case DP_TRAIN_VOLTAGE_SWING_1200: 2144 signal_levels |= DP_VOLTAGE_1_2; 2145 break; 2146 } 2147 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 2148 case DP_TRAIN_PRE_EMPHASIS_0: 2149 default: 2150 signal_levels |= DP_PRE_EMPHASIS_0; 2151 break; 2152 case DP_TRAIN_PRE_EMPHASIS_3_5: 2153 signal_levels |= DP_PRE_EMPHASIS_3_5; 2154 break; 2155 case DP_TRAIN_PRE_EMPHASIS_6: 2156 signal_levels |= DP_PRE_EMPHASIS_6; 2157 break; 2158 case DP_TRAIN_PRE_EMPHASIS_9_5: 2159 signal_levels |= DP_PRE_EMPHASIS_9_5; 2160 break; 2161 } 2162 return signal_levels; 2163} 2164 2165/* Gen6's DP voltage swing and pre-emphasis control */ 2166static uint32_t 2167intel_gen6_edp_signal_levels(uint8_t train_set) 2168{ 2169 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 2170 DP_TRAIN_PRE_EMPHASIS_MASK); 2171 switch (signal_levels) { 2172 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 2173 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 2174 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 2175 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 2176 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B; 2177 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 2178 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: 2179 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B; 2180 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 2181 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 2182 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B; 2183 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 2184 case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0: 2185 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B; 2186 default: 2187 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 2188 "0x%x\n", signal_levels); 2189 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 2190 } 2191} 2192 2193/* Gen7's DP voltage swing and pre-emphasis control */ 2194static uint32_t 2195intel_gen7_edp_signal_levels(uint8_t train_set) 2196{ 2197 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 2198 DP_TRAIN_PRE_EMPHASIS_MASK); 2199 switch (signal_levels) { 2200 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 2201 return EDP_LINK_TRAIN_400MV_0DB_IVB; 2202 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 2203 return EDP_LINK_TRAIN_400MV_3_5DB_IVB; 2204 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 2205 return EDP_LINK_TRAIN_400MV_6DB_IVB; 2206 2207 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 2208 return EDP_LINK_TRAIN_600MV_0DB_IVB; 2209 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 2210 return EDP_LINK_TRAIN_600MV_3_5DB_IVB; 2211 2212 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 2213 return EDP_LINK_TRAIN_800MV_0DB_IVB; 2214 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 2215 return EDP_LINK_TRAIN_800MV_3_5DB_IVB; 2216 2217 default: 2218 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 2219 "0x%x\n", signal_levels); 2220 return EDP_LINK_TRAIN_500MV_0DB_IVB; 2221 } 2222} 2223 2224/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */ 2225static uint32_t 2226intel_hsw_signal_levels(uint8_t train_set) 2227{ 2228 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 2229 DP_TRAIN_PRE_EMPHASIS_MASK); 2230 switch (signal_levels) { 2231 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 2232 return DDI_BUF_EMP_400MV_0DB_HSW; 2233 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 2234 return DDI_BUF_EMP_400MV_3_5DB_HSW; 2235 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 2236 return DDI_BUF_EMP_400MV_6DB_HSW; 2237 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5: 2238 return DDI_BUF_EMP_400MV_9_5DB_HSW; 2239 2240 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 2241 return DDI_BUF_EMP_600MV_0DB_HSW; 2242 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 2243 return DDI_BUF_EMP_600MV_3_5DB_HSW; 2244 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: 2245 return DDI_BUF_EMP_600MV_6DB_HSW; 2246 2247 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 2248 return DDI_BUF_EMP_800MV_0DB_HSW; 2249 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 2250 return DDI_BUF_EMP_800MV_3_5DB_HSW; 2251 default: 2252 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 2253 "0x%x\n", signal_levels); 2254 return DDI_BUF_EMP_400MV_0DB_HSW; 2255 } 2256} 2257 2258/* Properly updates "DP" with the correct signal levels. */ 2259static void 2260intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP) 2261{ 2262 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2263 enum port port = intel_dig_port->port; 2264 struct drm_device *dev = intel_dig_port->base.base.dev; 2265 uint32_t signal_levels, mask; 2266 uint8_t train_set = intel_dp->train_set[0]; 2267 2268 if (HAS_DDI(dev)) { 2269 signal_levels = intel_hsw_signal_levels(train_set); 2270 mask = DDI_BUF_EMP_MASK; 2271 } else if (IS_VALLEYVIEW(dev)) { 2272 signal_levels = intel_vlv_signal_levels(intel_dp); 2273 mask = 0; 2274 } else if (IS_GEN7(dev) && port == PORT_A) { 2275 signal_levels = intel_gen7_edp_signal_levels(train_set); 2276 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB; 2277 } else if (IS_GEN6(dev) && port == PORT_A) { 2278 signal_levels = intel_gen6_edp_signal_levels(train_set); 2279 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB; 2280 } else { 2281 signal_levels = intel_gen4_signal_levels(train_set); 2282 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK; 2283 } 2284 2285 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels); 2286 2287 *DP = (*DP & ~mask) | signal_levels; 2288} 2289 2290static bool 2291intel_dp_set_link_train(struct intel_dp *intel_dp, 2292 uint32_t dp_reg_value, 2293 uint8_t dp_train_pat) 2294{ 2295 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2296 struct drm_device *dev = intel_dig_port->base.base.dev; 2297 struct drm_i915_private *dev_priv = dev->dev_private; 2298 enum port port = intel_dig_port->port; 2299 int ret; 2300 2301 if (HAS_DDI(dev)) { 2302 uint32_t temp = I915_READ(DP_TP_CTL(port)); 2303 2304 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) 2305 temp |= DP_TP_CTL_SCRAMBLE_DISABLE; 2306 else 2307 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE; 2308 2309 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; 2310 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 2311 case DP_TRAINING_PATTERN_DISABLE: 2312 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; 2313 2314 break; 2315 case DP_TRAINING_PATTERN_1: 2316 temp |= DP_TP_CTL_LINK_TRAIN_PAT1; 2317 break; 2318 case DP_TRAINING_PATTERN_2: 2319 temp |= DP_TP_CTL_LINK_TRAIN_PAT2; 2320 break; 2321 case DP_TRAINING_PATTERN_3: 2322 temp |= DP_TP_CTL_LINK_TRAIN_PAT3; 2323 break; 2324 } 2325 I915_WRITE(DP_TP_CTL(port), temp); 2326 2327 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) { 2328 dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT; 2329 2330 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 2331 case DP_TRAINING_PATTERN_DISABLE: 2332 dp_reg_value |= DP_LINK_TRAIN_OFF_CPT; 2333 break; 2334 case DP_TRAINING_PATTERN_1: 2335 dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT; 2336 break; 2337 case DP_TRAINING_PATTERN_2: 2338 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT; 2339 break; 2340 case DP_TRAINING_PATTERN_3: 2341 DRM_ERROR("DP training pattern 3 not supported\n"); 2342 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT; 2343 break; 2344 } 2345 2346 } else { 2347 dp_reg_value &= ~DP_LINK_TRAIN_MASK; 2348 2349 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 2350 case DP_TRAINING_PATTERN_DISABLE: 2351 dp_reg_value |= DP_LINK_TRAIN_OFF; 2352 break; 2353 case DP_TRAINING_PATTERN_1: 2354 dp_reg_value |= DP_LINK_TRAIN_PAT_1; 2355 break; 2356 case DP_TRAINING_PATTERN_2: 2357 dp_reg_value |= DP_LINK_TRAIN_PAT_2; 2358 break; 2359 case DP_TRAINING_PATTERN_3: 2360 DRM_ERROR("DP training pattern 3 not supported\n"); 2361 dp_reg_value |= DP_LINK_TRAIN_PAT_2; 2362 break; 2363 } 2364 } 2365 2366 I915_WRITE(intel_dp->output_reg, dp_reg_value); 2367 POSTING_READ(intel_dp->output_reg); 2368 2369 intel_dp_aux_native_write_1(intel_dp, 2370 DP_TRAINING_PATTERN_SET, 2371 dp_train_pat); 2372 2373 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) != 2374 DP_TRAINING_PATTERN_DISABLE) { 2375 ret = intel_dp_aux_native_write(intel_dp, 2376 DP_TRAINING_LANE0_SET, 2377 intel_dp->train_set, 2378 intel_dp->lane_count); 2379 if (ret != intel_dp->lane_count) 2380 return false; 2381 } 2382 2383 return true; 2384} 2385 2386static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) 2387{ 2388 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2389 struct drm_device *dev = intel_dig_port->base.base.dev; 2390 struct drm_i915_private *dev_priv = dev->dev_private; 2391 enum port port = intel_dig_port->port; 2392 uint32_t val; 2393 2394 if (!HAS_DDI(dev)) 2395 return; 2396 2397 val = I915_READ(DP_TP_CTL(port)); 2398 val &= ~DP_TP_CTL_LINK_TRAIN_MASK; 2399 val |= DP_TP_CTL_LINK_TRAIN_IDLE; 2400 I915_WRITE(DP_TP_CTL(port), val); 2401 2402 /* 2403 * On PORT_A we can have only eDP in SST mode. There the only reason 2404 * we need to set idle transmission mode is to work around a HW issue 2405 * where we enable the pipe while not in idle link-training mode. 2406 * In this case there is requirement to wait for a minimum number of 2407 * idle patterns to be sent. 2408 */ 2409 if (port == PORT_A) 2410 return; 2411 2412 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE), 2413 1)) 2414 DRM_ERROR("Timed out waiting for DP idle patterns\n"); 2415} 2416 2417/* Enable corresponding port and start training pattern 1 */ 2418void 2419intel_dp_start_link_train(struct intel_dp *intel_dp) 2420{ 2421 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base; 2422 struct drm_device *dev = encoder->dev; 2423 int i; 2424 uint8_t voltage; 2425 int voltage_tries, loop_tries; 2426 uint32_t DP = intel_dp->DP; 2427 2428 if (HAS_DDI(dev)) 2429 intel_ddi_prepare_link_retrain(encoder); 2430 2431 /* Write the link configuration data */ 2432 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, 2433 intel_dp->link_configuration, 2434 DP_LINK_CONFIGURATION_SIZE); 2435 2436 DP |= DP_PORT_EN; 2437 2438 memset(intel_dp->train_set, 0, 4); 2439 voltage = 0xff; 2440 voltage_tries = 0; 2441 loop_tries = 0; 2442 for (;;) { 2443 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 2444 uint8_t link_status[DP_LINK_STATUS_SIZE]; 2445 2446 intel_dp_set_signal_levels(intel_dp, &DP); 2447 2448 /* Set training pattern 1 */ 2449 if (!intel_dp_set_link_train(intel_dp, DP, 2450 DP_TRAINING_PATTERN_1 | 2451 DP_LINK_SCRAMBLING_DISABLE)) 2452 break; 2453 2454 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd); 2455 if (!intel_dp_get_link_status(intel_dp, link_status)) { 2456 DRM_ERROR("failed to get link status\n"); 2457 break; 2458 } 2459 2460 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { 2461 DRM_DEBUG_KMS("clock recovery OK\n"); 2462 break; 2463 } 2464 2465 /* Check to see if we've tried the max voltage */ 2466 for (i = 0; i < intel_dp->lane_count; i++) 2467 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) 2468 break; 2469 if (i == intel_dp->lane_count) { 2470 ++loop_tries; 2471 if (loop_tries == 5) { 2472 DRM_DEBUG_KMS("too many full retries, give up\n"); 2473 break; 2474 } 2475 memset(intel_dp->train_set, 0, 4); 2476 voltage_tries = 0; 2477 continue; 2478 } 2479 2480 /* Check to see if we've tried the same voltage 5 times */ 2481 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { 2482 ++voltage_tries; 2483 if (voltage_tries == 5) { 2484 DRM_DEBUG_KMS("too many voltage retries, give up\n"); 2485 break; 2486 } 2487 } else 2488 voltage_tries = 0; 2489 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; 2490 2491 /* Compute new intel_dp->train_set as requested by target */ 2492 intel_get_adjust_train(intel_dp, link_status); 2493 } 2494 2495 intel_dp->DP = DP; 2496} 2497 2498void 2499intel_dp_complete_link_train(struct intel_dp *intel_dp) 2500{ 2501 bool channel_eq = false; 2502 int tries, cr_tries; 2503 uint32_t DP = intel_dp->DP; 2504 2505 /* channel equalization */ 2506 tries = 0; 2507 cr_tries = 0; 2508 channel_eq = false; 2509 for (;;) { 2510 uint8_t link_status[DP_LINK_STATUS_SIZE]; 2511 2512 if (cr_tries > 5) { 2513 DRM_ERROR("failed to train DP, aborting\n"); 2514 intel_dp_link_down(intel_dp); 2515 break; 2516 } 2517 2518 intel_dp_set_signal_levels(intel_dp, &DP); 2519 2520 /* channel eq pattern */ 2521 if (!intel_dp_set_link_train(intel_dp, DP, 2522 DP_TRAINING_PATTERN_2 | 2523 DP_LINK_SCRAMBLING_DISABLE)) 2524 break; 2525 2526 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd); 2527 if (!intel_dp_get_link_status(intel_dp, link_status)) 2528 break; 2529 2530 /* Make sure clock is still ok */ 2531 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { 2532 intel_dp_start_link_train(intel_dp); 2533 cr_tries++; 2534 continue; 2535 } 2536 2537 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { 2538 channel_eq = true; 2539 break; 2540 } 2541 2542 /* Try 5 times, then try clock recovery if that fails */ 2543 if (tries > 5) { 2544 intel_dp_link_down(intel_dp); 2545 intel_dp_start_link_train(intel_dp); 2546 tries = 0; 2547 cr_tries++; 2548 continue; 2549 } 2550 2551 /* Compute new intel_dp->train_set as requested by target */ 2552 intel_get_adjust_train(intel_dp, link_status); 2553 ++tries; 2554 } 2555 2556 intel_dp_set_idle_link_train(intel_dp); 2557 2558 intel_dp->DP = DP; 2559 2560 if (channel_eq) 2561 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n"); 2562 2563} 2564 2565void intel_dp_stop_link_train(struct intel_dp *intel_dp) 2566{ 2567 intel_dp_set_link_train(intel_dp, intel_dp->DP, 2568 DP_TRAINING_PATTERN_DISABLE); 2569} 2570 2571static void 2572intel_dp_link_down(struct intel_dp *intel_dp) 2573{ 2574 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2575 enum port port = intel_dig_port->port; 2576 struct drm_device *dev = intel_dig_port->base.base.dev; 2577 struct drm_i915_private *dev_priv = dev->dev_private; 2578 struct intel_crtc *intel_crtc = 2579 to_intel_crtc(intel_dig_port->base.base.crtc); 2580 uint32_t DP = intel_dp->DP; 2581 2582 /* 2583 * DDI code has a strict mode set sequence and we should try to respect 2584 * it, otherwise we might hang the machine in many different ways. So we 2585 * really should be disabling the port only on a complete crtc_disable 2586 * sequence. This function is just called under two conditions on DDI 2587 * code: 2588 * - Link train failed while doing crtc_enable, and on this case we 2589 * really should respect the mode set sequence and wait for a 2590 * crtc_disable. 2591 * - Someone turned the monitor off and intel_dp_check_link_status 2592 * called us. We don't need to disable the whole port on this case, so 2593 * when someone turns the monitor on again, 2594 * intel_ddi_prepare_link_retrain will take care of redoing the link 2595 * train. 2596 */ 2597 if (HAS_DDI(dev)) 2598 return; 2599 2600 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)) 2601 return; 2602 2603 DRM_DEBUG_KMS("\n"); 2604 2605 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) { 2606 DP &= ~DP_LINK_TRAIN_MASK_CPT; 2607 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); 2608 } else { 2609 DP &= ~DP_LINK_TRAIN_MASK; 2610 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); 2611 } 2612 POSTING_READ(intel_dp->output_reg); 2613 2614 /* We don't really know why we're doing this */ 2615 intel_wait_for_vblank(dev, intel_crtc->pipe); 2616 2617 if (HAS_PCH_IBX(dev) && 2618 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { 2619 struct drm_crtc *crtc = intel_dig_port->base.base.crtc; 2620 2621 /* Hardware workaround: leaving our transcoder select 2622 * set to transcoder B while it's off will prevent the 2623 * corresponding HDMI output on transcoder A. 2624 * 2625 * Combine this with another hardware workaround: 2626 * transcoder select bit can only be cleared while the 2627 * port is enabled. 2628 */ 2629 DP &= ~DP_PIPEB_SELECT; 2630 I915_WRITE(intel_dp->output_reg, DP); 2631 2632 /* Changes to enable or select take place the vblank 2633 * after being written. 2634 */ 2635 if (WARN_ON(crtc == NULL)) { 2636 /* We should never try to disable a port without a crtc 2637 * attached. For paranoia keep the code around for a 2638 * bit. */ 2639 POSTING_READ(intel_dp->output_reg); 2640 msleep(50); 2641 } else 2642 intel_wait_for_vblank(dev, intel_crtc->pipe); 2643 } 2644 2645 DP &= ~DP_AUDIO_OUTPUT_ENABLE; 2646 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); 2647 POSTING_READ(intel_dp->output_reg); 2648 msleep(intel_dp->panel_power_down_delay); 2649} 2650 2651static bool 2652intel_dp_get_dpcd(struct intel_dp *intel_dp) 2653{ 2654 char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3]; 2655 2656 if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd, 2657 sizeof(intel_dp->dpcd)) == 0) 2658 return false; /* aux transfer failed */ 2659 2660 hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd), 2661 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false); 2662 DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump); 2663 2664 if (intel_dp->dpcd[DP_DPCD_REV] == 0) 2665 return false; /* DPCD not present */ 2666 2667 /* Check if the panel supports PSR */ 2668 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd)); 2669 intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT, 2670 intel_dp->psr_dpcd, 2671 sizeof(intel_dp->psr_dpcd)); 2672 if (is_edp_psr(intel_dp)) 2673 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n"); 2674 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 2675 DP_DWN_STRM_PORT_PRESENT)) 2676 return true; /* native DP sink */ 2677 2678 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10) 2679 return true; /* no per-port downstream info */ 2680 2681 if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0, 2682 intel_dp->downstream_ports, 2683 DP_MAX_DOWNSTREAM_PORTS) == 0) 2684 return false; /* downstream port status fetch failed */ 2685 2686 return true; 2687} 2688 2689static void 2690intel_dp_probe_oui(struct intel_dp *intel_dp) 2691{ 2692 u8 buf[3]; 2693 2694 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) 2695 return; 2696 2697 ironlake_edp_panel_vdd_on(intel_dp); 2698 2699 if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3)) 2700 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", 2701 buf[0], buf[1], buf[2]); 2702 2703 if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3)) 2704 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", 2705 buf[0], buf[1], buf[2]); 2706 2707 ironlake_edp_panel_vdd_off(intel_dp, false); 2708} 2709 2710static bool 2711intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector) 2712{ 2713 int ret; 2714 2715 ret = intel_dp_aux_native_read_retry(intel_dp, 2716 DP_DEVICE_SERVICE_IRQ_VECTOR, 2717 sink_irq_vector, 1); 2718 if (!ret) 2719 return false; 2720 2721 return true; 2722} 2723 2724static void 2725intel_dp_handle_test_request(struct intel_dp *intel_dp) 2726{ 2727 /* NAK by default */ 2728 intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK); 2729} 2730 2731/* 2732 * According to DP spec 2733 * 5.1.2: 2734 * 1. Read DPCD 2735 * 2. Configure link according to Receiver Capabilities 2736 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 2737 * 4. Check link status on receipt of hot-plug interrupt 2738 */ 2739 2740void 2741intel_dp_check_link_status(struct intel_dp *intel_dp) 2742{ 2743 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; 2744 u8 sink_irq_vector; 2745 u8 link_status[DP_LINK_STATUS_SIZE]; 2746 2747 if (!intel_encoder->connectors_active) 2748 return; 2749 2750 if (WARN_ON(!intel_encoder->base.crtc)) 2751 return; 2752 2753 /* Try to read receiver status if the link appears to be up */ 2754 if (!intel_dp_get_link_status(intel_dp, link_status)) { 2755 intel_dp_link_down(intel_dp); 2756 return; 2757 } 2758 2759 /* Now read the DPCD to see if it's actually running */ 2760 if (!intel_dp_get_dpcd(intel_dp)) { 2761 intel_dp_link_down(intel_dp); 2762 return; 2763 } 2764 2765 /* Try to read the source of the interrupt */ 2766 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 2767 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) { 2768 /* Clear interrupt source */ 2769 intel_dp_aux_native_write_1(intel_dp, 2770 DP_DEVICE_SERVICE_IRQ_VECTOR, 2771 sink_irq_vector); 2772 2773 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST) 2774 intel_dp_handle_test_request(intel_dp); 2775 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ)) 2776 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); 2777 } 2778 2779 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { 2780 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", 2781 drm_get_encoder_name(&intel_encoder->base)); 2782 intel_dp_start_link_train(intel_dp); 2783 intel_dp_complete_link_train(intel_dp); 2784 intel_dp_stop_link_train(intel_dp); 2785 } 2786} 2787 2788/* XXX this is probably wrong for multiple downstream ports */ 2789static enum drm_connector_status 2790intel_dp_detect_dpcd(struct intel_dp *intel_dp) 2791{ 2792 uint8_t *dpcd = intel_dp->dpcd; 2793 bool hpd; 2794 uint8_t type; 2795 2796 if (!intel_dp_get_dpcd(intel_dp)) 2797 return connector_status_disconnected; 2798 2799 /* if there's no downstream port, we're done */ 2800 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT)) 2801 return connector_status_connected; 2802 2803 /* If we're HPD-aware, SINK_COUNT changes dynamically */ 2804 hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD); 2805 if (hpd) { 2806 uint8_t reg; 2807 if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT, 2808 ®, 1)) 2809 return connector_status_unknown; 2810 return DP_GET_SINK_COUNT(reg) ? connector_status_connected 2811 : connector_status_disconnected; 2812 } 2813 2814 /* If no HPD, poke DDC gently */ 2815 if (drm_probe_ddc(&intel_dp->adapter)) 2816 return connector_status_connected; 2817 2818 /* Well we tried, say unknown for unreliable port types */ 2819 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 2820 if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID) 2821 return connector_status_unknown; 2822 2823 /* Anything else is out of spec, warn and ignore */ 2824 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n"); 2825 return connector_status_disconnected; 2826} 2827 2828static enum drm_connector_status 2829ironlake_dp_detect(struct intel_dp *intel_dp) 2830{ 2831 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2832 struct drm_i915_private *dev_priv = dev->dev_private; 2833 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2834 enum drm_connector_status status; 2835 2836 /* Can't disconnect eDP, but you can close the lid... */ 2837 if (is_edp(intel_dp)) { 2838 status = intel_panel_detect(dev); 2839 if (status == connector_status_unknown) 2840 status = connector_status_connected; 2841 return status; 2842 } 2843 2844 if (!ibx_digital_port_connected(dev_priv, intel_dig_port)) 2845 return connector_status_disconnected; 2846 2847 return intel_dp_detect_dpcd(intel_dp); 2848} 2849 2850static enum drm_connector_status 2851g4x_dp_detect(struct intel_dp *intel_dp) 2852{ 2853 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2854 struct drm_i915_private *dev_priv = dev->dev_private; 2855 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2856 uint32_t bit; 2857 2858 /* Can't disconnect eDP, but you can close the lid... */ 2859 if (is_edp(intel_dp)) { 2860 enum drm_connector_status status; 2861 2862 status = intel_panel_detect(dev); 2863 if (status == connector_status_unknown) 2864 status = connector_status_connected; 2865 return status; 2866 } 2867 2868 switch (intel_dig_port->port) { 2869 case PORT_B: 2870 bit = PORTB_HOTPLUG_LIVE_STATUS; 2871 break; 2872 case PORT_C: 2873 bit = PORTC_HOTPLUG_LIVE_STATUS; 2874 break; 2875 case PORT_D: 2876 bit = PORTD_HOTPLUG_LIVE_STATUS; 2877 break; 2878 default: 2879 return connector_status_unknown; 2880 } 2881 2882 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0) 2883 return connector_status_disconnected; 2884 2885 return intel_dp_detect_dpcd(intel_dp); 2886} 2887 2888static struct edid * 2889intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) 2890{ 2891 struct intel_connector *intel_connector = to_intel_connector(connector); 2892 2893 /* use cached edid if we have one */ 2894 if (intel_connector->edid) { 2895 struct edid *edid; 2896 int size; 2897 2898 /* invalid edid */ 2899 if (IS_ERR(intel_connector->edid)) 2900 return NULL; 2901 2902 size = (intel_connector->edid->extensions + 1) * EDID_LENGTH; 2903 edid = kmemdup(intel_connector->edid, size, GFP_KERNEL); 2904 if (!edid) 2905 return NULL; 2906 2907 return edid; 2908 } 2909 2910 return drm_get_edid(connector, adapter); 2911} 2912 2913static int 2914intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter) 2915{ 2916 struct intel_connector *intel_connector = to_intel_connector(connector); 2917 2918 /* use cached edid if we have one */ 2919 if (intel_connector->edid) { 2920 /* invalid edid */ 2921 if (IS_ERR(intel_connector->edid)) 2922 return 0; 2923 2924 return intel_connector_update_modes(connector, 2925 intel_connector->edid); 2926 } 2927 2928 return intel_ddc_get_modes(connector, adapter); 2929} 2930 2931static enum drm_connector_status 2932intel_dp_detect(struct drm_connector *connector, bool force) 2933{ 2934 struct intel_dp *intel_dp = intel_attached_dp(connector); 2935 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2936 struct intel_encoder *intel_encoder = &intel_dig_port->base; 2937 struct drm_device *dev = connector->dev; 2938 enum drm_connector_status status; 2939 struct edid *edid = NULL; 2940 2941 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 2942 connector->base.id, drm_get_connector_name(connector)); 2943 2944 intel_dp->has_audio = false; 2945 2946 if (HAS_PCH_SPLIT(dev)) 2947 status = ironlake_dp_detect(intel_dp); 2948 else 2949 status = g4x_dp_detect(intel_dp); 2950 2951 if (status != connector_status_connected) 2952 return status; 2953 2954 intel_dp_probe_oui(intel_dp); 2955 2956 if (intel_dp->force_audio != HDMI_AUDIO_AUTO) { 2957 intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON); 2958 } else { 2959 edid = intel_dp_get_edid(connector, &intel_dp->adapter); 2960 if (edid) { 2961 intel_dp->has_audio = drm_detect_monitor_audio(edid); 2962 kfree(edid); 2963 } 2964 } 2965 2966 if (intel_encoder->type != INTEL_OUTPUT_EDP) 2967 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; 2968 return connector_status_connected; 2969} 2970 2971static int intel_dp_get_modes(struct drm_connector *connector) 2972{ 2973 struct intel_dp *intel_dp = intel_attached_dp(connector); 2974 struct intel_connector *intel_connector = to_intel_connector(connector); 2975 struct drm_device *dev = connector->dev; 2976 int ret; 2977 2978 /* We should parse the EDID data and find out if it has an audio sink 2979 */ 2980 2981 ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter); 2982 if (ret) 2983 return ret; 2984 2985 /* if eDP has no EDID, fall back to fixed mode */ 2986 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 2987 struct drm_display_mode *mode; 2988 mode = drm_mode_duplicate(dev, 2989 intel_connector->panel.fixed_mode); 2990 if (mode) { 2991 drm_mode_probed_add(connector, mode); 2992 return 1; 2993 } 2994 } 2995 return 0; 2996} 2997 2998static bool 2999intel_dp_detect_audio(struct drm_connector *connector) 3000{ 3001 struct intel_dp *intel_dp = intel_attached_dp(connector); 3002 struct edid *edid; 3003 bool has_audio = false; 3004 3005 edid = intel_dp_get_edid(connector, &intel_dp->adapter); 3006 if (edid) { 3007 has_audio = drm_detect_monitor_audio(edid); 3008 kfree(edid); 3009 } 3010 3011 return has_audio; 3012} 3013 3014static int 3015intel_dp_set_property(struct drm_connector *connector, 3016 struct drm_property *property, 3017 uint64_t val) 3018{ 3019 struct drm_i915_private *dev_priv = connector->dev->dev_private; 3020 struct intel_connector *intel_connector = to_intel_connector(connector); 3021 struct intel_encoder *intel_encoder = intel_attached_encoder(connector); 3022 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 3023 int ret; 3024 3025 ret = drm_object_property_set_value(&connector->base, property, val); 3026 if (ret) 3027 return ret; 3028 3029 if (property == dev_priv->force_audio_property) { 3030 int i = val; 3031 bool has_audio; 3032 3033 if (i == intel_dp->force_audio) 3034 return 0; 3035 3036 intel_dp->force_audio = i; 3037 3038 if (i == HDMI_AUDIO_AUTO) 3039 has_audio = intel_dp_detect_audio(connector); 3040 else 3041 has_audio = (i == HDMI_AUDIO_ON); 3042 3043 if (has_audio == intel_dp->has_audio) 3044 return 0; 3045 3046 intel_dp->has_audio = has_audio; 3047 goto done; 3048 } 3049 3050 if (property == dev_priv->broadcast_rgb_property) { 3051 bool old_auto = intel_dp->color_range_auto; 3052 uint32_t old_range = intel_dp->color_range; 3053 3054 switch (val) { 3055 case INTEL_BROADCAST_RGB_AUTO: 3056 intel_dp->color_range_auto = true; 3057 break; 3058 case INTEL_BROADCAST_RGB_FULL: 3059 intel_dp->color_range_auto = false; 3060 intel_dp->color_range = 0; 3061 break; 3062 case INTEL_BROADCAST_RGB_LIMITED: 3063 intel_dp->color_range_auto = false; 3064 intel_dp->color_range = DP_COLOR_RANGE_16_235; 3065 break; 3066 default: 3067 return -EINVAL; 3068 } 3069 3070 if (old_auto == intel_dp->color_range_auto && 3071 old_range == intel_dp->color_range) 3072 return 0; 3073 3074 goto done; 3075 } 3076 3077 if (is_edp(intel_dp) && 3078 property == connector->dev->mode_config.scaling_mode_property) { 3079 if (val == DRM_MODE_SCALE_NONE) { 3080 DRM_DEBUG_KMS("no scaling not supported\n"); 3081 return -EINVAL; 3082 } 3083 3084 if (intel_connector->panel.fitting_mode == val) { 3085 /* the eDP scaling property is not changed */ 3086 return 0; 3087 } 3088 intel_connector->panel.fitting_mode = val; 3089 3090 goto done; 3091 } 3092 3093 return -EINVAL; 3094 3095done: 3096 if (intel_encoder->base.crtc) 3097 intel_crtc_restore_mode(intel_encoder->base.crtc); 3098 3099 return 0; 3100} 3101 3102static void 3103intel_dp_connector_destroy(struct drm_connector *connector) 3104{ 3105 struct intel_connector *intel_connector = to_intel_connector(connector); 3106 3107 if (!IS_ERR_OR_NULL(intel_connector->edid)) 3108 kfree(intel_connector->edid); 3109 3110 /* Can't call is_edp() since the encoder may have been destroyed 3111 * already. */ 3112 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) 3113 intel_panel_fini(&intel_connector->panel); 3114 3115 drm_sysfs_connector_remove(connector); 3116 drm_connector_cleanup(connector); 3117 kfree(connector); 3118} 3119 3120void intel_dp_encoder_destroy(struct drm_encoder *encoder) 3121{ 3122 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 3123 struct intel_dp *intel_dp = &intel_dig_port->dp; 3124 struct drm_device *dev = intel_dp_to_dev(intel_dp); 3125 3126 i2c_del_adapter(&intel_dp->adapter); 3127 drm_encoder_cleanup(encoder); 3128 if (is_edp(intel_dp)) { 3129 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 3130 mutex_lock(&dev->mode_config.mutex); 3131 ironlake_panel_vdd_off_sync(intel_dp); 3132 mutex_unlock(&dev->mode_config.mutex); 3133 } 3134 kfree(intel_dig_port); 3135} 3136 3137static const struct drm_connector_funcs intel_dp_connector_funcs = { 3138 .dpms = intel_connector_dpms, 3139 .detect = intel_dp_detect, 3140 .fill_modes = drm_helper_probe_single_connector_modes, 3141 .set_property = intel_dp_set_property, 3142 .destroy = intel_dp_connector_destroy, 3143}; 3144 3145static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 3146 .get_modes = intel_dp_get_modes, 3147 .mode_valid = intel_dp_mode_valid, 3148 .best_encoder = intel_best_encoder, 3149}; 3150 3151static const struct drm_encoder_funcs intel_dp_enc_funcs = { 3152 .destroy = intel_dp_encoder_destroy, 3153}; 3154 3155static void 3156intel_dp_hot_plug(struct intel_encoder *intel_encoder) 3157{ 3158 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 3159 3160 intel_dp_check_link_status(intel_dp); 3161} 3162 3163/* Return which DP Port should be selected for Transcoder DP control */ 3164int 3165intel_trans_dp_port_sel(struct drm_crtc *crtc) 3166{ 3167 struct drm_device *dev = crtc->dev; 3168 struct intel_encoder *intel_encoder; 3169 struct intel_dp *intel_dp; 3170 3171 for_each_encoder_on_crtc(dev, crtc, intel_encoder) { 3172 intel_dp = enc_to_intel_dp(&intel_encoder->base); 3173 3174 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || 3175 intel_encoder->type == INTEL_OUTPUT_EDP) 3176 return intel_dp->output_reg; 3177 } 3178 3179 return -1; 3180} 3181 3182/* check the VBT to see whether the eDP is on DP-D port */ 3183bool intel_dpd_is_edp(struct drm_device *dev) 3184{ 3185 struct drm_i915_private *dev_priv = dev->dev_private; 3186 struct child_device_config *p_child; 3187 int i; 3188 3189 if (!dev_priv->vbt.child_dev_num) 3190 return false; 3191 3192 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { 3193 p_child = dev_priv->vbt.child_dev + i; 3194 3195 if (p_child->dvo_port == PORT_IDPD && 3196 p_child->device_type == DEVICE_TYPE_eDP) 3197 return true; 3198 } 3199 return false; 3200} 3201 3202static void 3203intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) 3204{ 3205 struct intel_connector *intel_connector = to_intel_connector(connector); 3206 3207 intel_attach_force_audio_property(connector); 3208 intel_attach_broadcast_rgb_property(connector); 3209 intel_dp->color_range_auto = true; 3210 3211 if (is_edp(intel_dp)) { 3212 drm_mode_create_scaling_mode_property(connector->dev); 3213 drm_object_attach_property( 3214 &connector->base, 3215 connector->dev->mode_config.scaling_mode_property, 3216 DRM_MODE_SCALE_ASPECT); 3217 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT; 3218 } 3219} 3220 3221static void 3222intel_dp_init_panel_power_sequencer(struct drm_device *dev, 3223 struct intel_dp *intel_dp, 3224 struct edp_power_seq *out) 3225{ 3226 struct drm_i915_private *dev_priv = dev->dev_private; 3227 struct edp_power_seq cur, vbt, spec, final; 3228 u32 pp_on, pp_off, pp_div, pp; 3229 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg; 3230 3231 if (HAS_PCH_SPLIT(dev)) { 3232 pp_ctrl_reg = PCH_PP_CONTROL; 3233 pp_on_reg = PCH_PP_ON_DELAYS; 3234 pp_off_reg = PCH_PP_OFF_DELAYS; 3235 pp_div_reg = PCH_PP_DIVISOR; 3236 } else { 3237 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp); 3238 3239 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe); 3240 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe); 3241 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe); 3242 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe); 3243 } 3244 3245 /* Workaround: Need to write PP_CONTROL with the unlock key as 3246 * the very first thing. */ 3247 pp = ironlake_get_pp_control(intel_dp); 3248 I915_WRITE(pp_ctrl_reg, pp); 3249 3250 pp_on = I915_READ(pp_on_reg); 3251 pp_off = I915_READ(pp_off_reg); 3252 pp_div = I915_READ(pp_div_reg); 3253 3254 /* Pull timing values out of registers */ 3255 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> 3256 PANEL_POWER_UP_DELAY_SHIFT; 3257 3258 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> 3259 PANEL_LIGHT_ON_DELAY_SHIFT; 3260 3261 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> 3262 PANEL_LIGHT_OFF_DELAY_SHIFT; 3263 3264 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> 3265 PANEL_POWER_DOWN_DELAY_SHIFT; 3266 3267 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> 3268 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000; 3269 3270 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 3271 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12); 3272 3273 vbt = dev_priv->vbt.edp_pps; 3274 3275 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of 3276 * our hw here, which are all in 100usec. */ 3277 spec.t1_t3 = 210 * 10; 3278 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */ 3279 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */ 3280 spec.t10 = 500 * 10; 3281 /* This one is special and actually in units of 100ms, but zero 3282 * based in the hw (so we need to add 100 ms). But the sw vbt 3283 * table multiplies it with 1000 to make it in units of 100usec, 3284 * too. */ 3285 spec.t11_t12 = (510 + 100) * 10; 3286 3287 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 3288 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12); 3289 3290 /* Use the max of the register settings and vbt. If both are 3291 * unset, fall back to the spec limits. */ 3292#define assign_final(field) final.field = (max(cur.field, vbt.field) == 0 ? \ 3293 spec.field : \ 3294 max(cur.field, vbt.field)) 3295 assign_final(t1_t3); 3296 assign_final(t8); 3297 assign_final(t9); 3298 assign_final(t10); 3299 assign_final(t11_t12); 3300#undef assign_final 3301 3302#define get_delay(field) (DIV_ROUND_UP(final.field, 10)) 3303 intel_dp->panel_power_up_delay = get_delay(t1_t3); 3304 intel_dp->backlight_on_delay = get_delay(t8); 3305 intel_dp->backlight_off_delay = get_delay(t9); 3306 intel_dp->panel_power_down_delay = get_delay(t10); 3307 intel_dp->panel_power_cycle_delay = get_delay(t11_t12); 3308#undef get_delay 3309 3310 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n", 3311 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay, 3312 intel_dp->panel_power_cycle_delay); 3313 3314 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", 3315 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); 3316 3317 if (out) 3318 *out = final; 3319} 3320 3321static void 3322intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, 3323 struct intel_dp *intel_dp, 3324 struct edp_power_seq *seq) 3325{ 3326 struct drm_i915_private *dev_priv = dev->dev_private; 3327 u32 pp_on, pp_off, pp_div, port_sel = 0; 3328 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev); 3329 int pp_on_reg, pp_off_reg, pp_div_reg; 3330 3331 if (HAS_PCH_SPLIT(dev)) { 3332 pp_on_reg = PCH_PP_ON_DELAYS; 3333 pp_off_reg = PCH_PP_OFF_DELAYS; 3334 pp_div_reg = PCH_PP_DIVISOR; 3335 } else { 3336 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp); 3337 3338 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe); 3339 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe); 3340 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe); 3341 } 3342 3343 /* And finally store the new values in the power sequencer. */ 3344 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | 3345 (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT); 3346 pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) | 3347 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT); 3348 /* Compute the divisor for the pp clock, simply match the Bspec 3349 * formula. */ 3350 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT; 3351 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000) 3352 << PANEL_POWER_CYCLE_DELAY_SHIFT); 3353 3354 /* Haswell doesn't have any port selection bits for the panel 3355 * power sequencer any more. */ 3356 if (IS_VALLEYVIEW(dev)) { 3357 if (dp_to_dig_port(intel_dp)->port == PORT_B) 3358 port_sel = PANEL_PORT_SELECT_DPB_VLV; 3359 else 3360 port_sel = PANEL_PORT_SELECT_DPC_VLV; 3361 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { 3362 if (dp_to_dig_port(intel_dp)->port == PORT_A) 3363 port_sel = PANEL_PORT_SELECT_DPA; 3364 else 3365 port_sel = PANEL_PORT_SELECT_DPD; 3366 } 3367 3368 pp_on |= port_sel; 3369 3370 I915_WRITE(pp_on_reg, pp_on); 3371 I915_WRITE(pp_off_reg, pp_off); 3372 I915_WRITE(pp_div_reg, pp_div); 3373 3374 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", 3375 I915_READ(pp_on_reg), 3376 I915_READ(pp_off_reg), 3377 I915_READ(pp_div_reg)); 3378} 3379 3380static bool intel_edp_init_connector(struct intel_dp *intel_dp, 3381 struct intel_connector *intel_connector) 3382{ 3383 struct drm_connector *connector = &intel_connector->base; 3384 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3385 struct drm_device *dev = intel_dig_port->base.base.dev; 3386 struct drm_i915_private *dev_priv = dev->dev_private; 3387 struct drm_display_mode *fixed_mode = NULL; 3388 struct edp_power_seq power_seq = { 0 }; 3389 bool has_dpcd; 3390 struct drm_display_mode *scan; 3391 struct edid *edid; 3392 3393 if (!is_edp(intel_dp)) 3394 return true; 3395 3396 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); 3397 3398 /* Cache DPCD and EDID for edp. */ 3399 ironlake_edp_panel_vdd_on(intel_dp); 3400 has_dpcd = intel_dp_get_dpcd(intel_dp); 3401 ironlake_edp_panel_vdd_off(intel_dp, false); 3402 3403 if (has_dpcd) { 3404 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) 3405 dev_priv->no_aux_handshake = 3406 intel_dp->dpcd[DP_MAX_DOWNSPREAD] & 3407 DP_NO_AUX_HANDSHAKE_LINK_TRAINING; 3408 } else { 3409 /* if this fails, presume the device is a ghost */ 3410 DRM_INFO("failed to retrieve link info, disabling eDP\n"); 3411 return false; 3412 } 3413 3414 /* We now know it's not a ghost, init power sequence regs. */ 3415 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, 3416 &power_seq); 3417 3418 ironlake_edp_panel_vdd_on(intel_dp); 3419 edid = drm_get_edid(connector, &intel_dp->adapter); 3420 if (edid) { 3421 if (drm_add_edid_modes(connector, edid)) { 3422 drm_mode_connector_update_edid_property(connector, 3423 edid); 3424 drm_edid_to_eld(connector, edid); 3425 } else { 3426 kfree(edid); 3427 edid = ERR_PTR(-EINVAL); 3428 } 3429 } else { 3430 edid = ERR_PTR(-ENOENT); 3431 } 3432 intel_connector->edid = edid; 3433 3434 /* prefer fixed mode from EDID if available */ 3435 list_for_each_entry(scan, &connector->probed_modes, head) { 3436 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) { 3437 fixed_mode = drm_mode_duplicate(dev, scan); 3438 break; 3439 } 3440 } 3441 3442 /* fallback to VBT if available for eDP */ 3443 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) { 3444 fixed_mode = drm_mode_duplicate(dev, 3445 dev_priv->vbt.lfp_lvds_vbt_mode); 3446 if (fixed_mode) 3447 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; 3448 } 3449 3450 ironlake_edp_panel_vdd_off(intel_dp, false); 3451 3452 intel_panel_init(&intel_connector->panel, fixed_mode); 3453 intel_panel_setup_backlight(connector); 3454 3455 return true; 3456} 3457 3458bool 3459intel_dp_init_connector(struct intel_digital_port *intel_dig_port, 3460 struct intel_connector *intel_connector) 3461{ 3462 struct drm_connector *connector = &intel_connector->base; 3463 struct intel_dp *intel_dp = &intel_dig_port->dp; 3464 struct intel_encoder *intel_encoder = &intel_dig_port->base; 3465 struct drm_device *dev = intel_encoder->base.dev; 3466 struct drm_i915_private *dev_priv = dev->dev_private; 3467 enum port port = intel_dig_port->port; 3468 const char *name = NULL; 3469 int type, error; 3470 3471 /* Preserve the current hw state. */ 3472 intel_dp->DP = I915_READ(intel_dp->output_reg); 3473 intel_dp->attached_connector = intel_connector; 3474 3475 type = DRM_MODE_CONNECTOR_DisplayPort; 3476 /* 3477 * FIXME : We need to initialize built-in panels before external panels. 3478 * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup 3479 */ 3480 switch (port) { 3481 case PORT_A: 3482 type = DRM_MODE_CONNECTOR_eDP; 3483 break; 3484 case PORT_C: 3485 if (IS_VALLEYVIEW(dev)) 3486 type = DRM_MODE_CONNECTOR_eDP; 3487 break; 3488 case PORT_D: 3489 if (HAS_PCH_SPLIT(dev) && intel_dpd_is_edp(dev)) 3490 type = DRM_MODE_CONNECTOR_eDP; 3491 break; 3492 default: /* silence GCC warning */ 3493 break; 3494 } 3495 3496 /* 3497 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but 3498 * for DP the encoder type can be set by the caller to 3499 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it. 3500 */ 3501 if (type == DRM_MODE_CONNECTOR_eDP) 3502 intel_encoder->type = INTEL_OUTPUT_EDP; 3503 3504 DRM_DEBUG_KMS("Adding %s connector on port %c\n", 3505 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP", 3506 port_name(port)); 3507 3508 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); 3509 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 3510 3511 connector->interlace_allowed = true; 3512 connector->doublescan_allowed = 0; 3513 3514 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, 3515 ironlake_panel_vdd_work); 3516 3517 intel_connector_attach_encoder(intel_connector, intel_encoder); 3518 drm_sysfs_connector_add(connector); 3519 3520 if (HAS_DDI(dev)) 3521 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 3522 else 3523 intel_connector->get_hw_state = intel_connector_get_hw_state; 3524 3525 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10; 3526 if (HAS_DDI(dev)) { 3527 switch (intel_dig_port->port) { 3528 case PORT_A: 3529 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL; 3530 break; 3531 case PORT_B: 3532 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL; 3533 break; 3534 case PORT_C: 3535 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL; 3536 break; 3537 case PORT_D: 3538 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL; 3539 break; 3540 default: 3541 BUG(); 3542 } 3543 } 3544 3545 /* Set up the DDC bus. */ 3546 switch (port) { 3547 case PORT_A: 3548 intel_encoder->hpd_pin = HPD_PORT_A; 3549 name = "DPDDC-A"; 3550 break; 3551 case PORT_B: 3552 intel_encoder->hpd_pin = HPD_PORT_B; 3553 name = "DPDDC-B"; 3554 break; 3555 case PORT_C: 3556 intel_encoder->hpd_pin = HPD_PORT_C; 3557 name = "DPDDC-C"; 3558 break; 3559 case PORT_D: 3560 intel_encoder->hpd_pin = HPD_PORT_D; 3561 name = "DPDDC-D"; 3562 break; 3563 default: 3564 BUG(); 3565 } 3566 3567 error = intel_dp_i2c_init(intel_dp, intel_connector, name); 3568 WARN(error, "intel_dp_i2c_init failed with error %d for port %c\n", 3569 error, port_name(port)); 3570 3571 intel_dp->psr_setup_done = false; 3572 3573 if (!intel_edp_init_connector(intel_dp, intel_connector)) { 3574 i2c_del_adapter(&intel_dp->adapter); 3575 if (is_edp(intel_dp)) { 3576 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 3577 mutex_lock(&dev->mode_config.mutex); 3578 ironlake_panel_vdd_off_sync(intel_dp); 3579 mutex_unlock(&dev->mode_config.mutex); 3580 } 3581 drm_sysfs_connector_remove(connector); 3582 drm_connector_cleanup(connector); 3583 return false; 3584 } 3585 3586 intel_dp_add_properties(intel_dp, connector); 3587 3588 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 3589 * 0xd. Failure to do so will result in spurious interrupts being 3590 * generated on the port when a cable is not attached. 3591 */ 3592 if (IS_G4X(dev) && !IS_GM45(dev)) { 3593 u32 temp = I915_READ(PEG_BAND_GAP_DATA); 3594 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); 3595 } 3596 3597 return true; 3598} 3599 3600void 3601intel_dp_init(struct drm_device *dev, int output_reg, enum port port) 3602{ 3603 struct intel_digital_port *intel_dig_port; 3604 struct intel_encoder *intel_encoder; 3605 struct drm_encoder *encoder; 3606 struct intel_connector *intel_connector; 3607 3608 intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL); 3609 if (!intel_dig_port) 3610 return; 3611 3612 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 3613 if (!intel_connector) { 3614 kfree(intel_dig_port); 3615 return; 3616 } 3617 3618 intel_encoder = &intel_dig_port->base; 3619 encoder = &intel_encoder->base; 3620 3621 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, 3622 DRM_MODE_ENCODER_TMDS); 3623 3624 intel_encoder->compute_config = intel_dp_compute_config; 3625 intel_encoder->mode_set = intel_dp_mode_set; 3626 intel_encoder->disable = intel_disable_dp; 3627 intel_encoder->post_disable = intel_post_disable_dp; 3628 intel_encoder->get_hw_state = intel_dp_get_hw_state; 3629 intel_encoder->get_config = intel_dp_get_config; 3630 if (IS_VALLEYVIEW(dev)) { 3631 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable; 3632 intel_encoder->pre_enable = vlv_pre_enable_dp; 3633 intel_encoder->enable = vlv_enable_dp; 3634 } else { 3635 intel_encoder->pre_enable = g4x_pre_enable_dp; 3636 intel_encoder->enable = g4x_enable_dp; 3637 } 3638 3639 intel_dig_port->port = port; 3640 intel_dig_port->dp.output_reg = output_reg; 3641 3642 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; 3643 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); 3644 intel_encoder->cloneable = false; 3645 intel_encoder->hot_plug = intel_dp_hot_plug; 3646 3647 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) { 3648 drm_encoder_cleanup(encoder); 3649 kfree(intel_dig_port); 3650 kfree(intel_connector); 3651 } 3652} 3653