intel_dp.c revision 5e69f97fb39ea660075e6b65a1de33247b53f9d4
1/* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Keith Packard <keithp@keithp.com> 25 * 26 */ 27 28#include <linux/i2c.h> 29#include <linux/slab.h> 30#include <linux/export.h> 31#include <drm/drmP.h> 32#include <drm/drm_crtc.h> 33#include <drm/drm_crtc_helper.h> 34#include <drm/drm_edid.h> 35#include "intel_drv.h" 36#include <drm/i915_drm.h> 37#include "i915_drv.h" 38 39#define DP_LINK_CHECK_TIMEOUT (10 * 1000) 40 41struct dp_link_dpll { 42 int link_bw; 43 struct dpll dpll; 44}; 45 46static const struct dp_link_dpll gen4_dpll[] = { 47 { DP_LINK_BW_1_62, 48 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } }, 49 { DP_LINK_BW_2_7, 50 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } } 51}; 52 53static const struct dp_link_dpll pch_dpll[] = { 54 { DP_LINK_BW_1_62, 55 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } }, 56 { DP_LINK_BW_2_7, 57 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } } 58}; 59 60static const struct dp_link_dpll vlv_dpll[] = { 61 { DP_LINK_BW_1_62, 62 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 5, .m2 = 3 } }, 63 { DP_LINK_BW_2_7, 64 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } } 65}; 66 67/** 68 * is_edp - is the given port attached to an eDP panel (either CPU or PCH) 69 * @intel_dp: DP struct 70 * 71 * If a CPU or PCH DP output is attached to an eDP panel, this function 72 * will return true, and false otherwise. 73 */ 74static bool is_edp(struct intel_dp *intel_dp) 75{ 76 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 77 78 return intel_dig_port->base.type == INTEL_OUTPUT_EDP; 79} 80 81static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp) 82{ 83 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 84 85 return intel_dig_port->base.base.dev; 86} 87 88static struct intel_dp *intel_attached_dp(struct drm_connector *connector) 89{ 90 return enc_to_intel_dp(&intel_attached_encoder(connector)->base); 91} 92 93static void intel_dp_link_down(struct intel_dp *intel_dp); 94 95static int 96intel_dp_max_link_bw(struct intel_dp *intel_dp) 97{ 98 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; 99 100 switch (max_link_bw) { 101 case DP_LINK_BW_1_62: 102 case DP_LINK_BW_2_7: 103 break; 104 case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */ 105 max_link_bw = DP_LINK_BW_2_7; 106 break; 107 default: 108 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n", 109 max_link_bw); 110 max_link_bw = DP_LINK_BW_1_62; 111 break; 112 } 113 return max_link_bw; 114} 115 116/* 117 * The units on the numbers in the next two are... bizarre. Examples will 118 * make it clearer; this one parallels an example in the eDP spec. 119 * 120 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as: 121 * 122 * 270000 * 1 * 8 / 10 == 216000 123 * 124 * The actual data capacity of that configuration is 2.16Gbit/s, so the 125 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz - 126 * or equivalently, kilopixels per second - so for 1680x1050R it'd be 127 * 119000. At 18bpp that's 2142000 kilobits per second. 128 * 129 * Thus the strange-looking division by 10 in intel_dp_link_required, to 130 * get the result in decakilobits instead of kilobits. 131 */ 132 133static int 134intel_dp_link_required(int pixel_clock, int bpp) 135{ 136 return (pixel_clock * bpp + 9) / 10; 137} 138 139static int 140intel_dp_max_data_rate(int max_link_clock, int max_lanes) 141{ 142 return (max_link_clock * max_lanes * 8) / 10; 143} 144 145static int 146intel_dp_mode_valid(struct drm_connector *connector, 147 struct drm_display_mode *mode) 148{ 149 struct intel_dp *intel_dp = intel_attached_dp(connector); 150 struct intel_connector *intel_connector = to_intel_connector(connector); 151 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; 152 int target_clock = mode->clock; 153 int max_rate, mode_rate, max_lanes, max_link_clock; 154 155 if (is_edp(intel_dp) && fixed_mode) { 156 if (mode->hdisplay > fixed_mode->hdisplay) 157 return MODE_PANEL; 158 159 if (mode->vdisplay > fixed_mode->vdisplay) 160 return MODE_PANEL; 161 162 target_clock = fixed_mode->clock; 163 } 164 165 max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp)); 166 max_lanes = drm_dp_max_lane_count(intel_dp->dpcd); 167 168 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); 169 mode_rate = intel_dp_link_required(target_clock, 18); 170 171 if (mode_rate > max_rate) 172 return MODE_CLOCK_HIGH; 173 174 if (mode->clock < 10000) 175 return MODE_CLOCK_LOW; 176 177 if (mode->flags & DRM_MODE_FLAG_DBLCLK) 178 return MODE_H_ILLEGAL; 179 180 return MODE_OK; 181} 182 183static uint32_t 184pack_aux(uint8_t *src, int src_bytes) 185{ 186 int i; 187 uint32_t v = 0; 188 189 if (src_bytes > 4) 190 src_bytes = 4; 191 for (i = 0; i < src_bytes; i++) 192 v |= ((uint32_t) src[i]) << ((3-i) * 8); 193 return v; 194} 195 196static void 197unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) 198{ 199 int i; 200 if (dst_bytes > 4) 201 dst_bytes = 4; 202 for (i = 0; i < dst_bytes; i++) 203 dst[i] = src >> ((3-i) * 8); 204} 205 206/* hrawclock is 1/4 the FSB frequency */ 207static int 208intel_hrawclk(struct drm_device *dev) 209{ 210 struct drm_i915_private *dev_priv = dev->dev_private; 211 uint32_t clkcfg; 212 213 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */ 214 if (IS_VALLEYVIEW(dev)) 215 return 200; 216 217 clkcfg = I915_READ(CLKCFG); 218 switch (clkcfg & CLKCFG_FSB_MASK) { 219 case CLKCFG_FSB_400: 220 return 100; 221 case CLKCFG_FSB_533: 222 return 133; 223 case CLKCFG_FSB_667: 224 return 166; 225 case CLKCFG_FSB_800: 226 return 200; 227 case CLKCFG_FSB_1067: 228 return 266; 229 case CLKCFG_FSB_1333: 230 return 333; 231 /* these two are just a guess; one of them might be right */ 232 case CLKCFG_FSB_1600: 233 case CLKCFG_FSB_1600_ALT: 234 return 400; 235 default: 236 return 133; 237 } 238} 239 240static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp) 241{ 242 struct drm_device *dev = intel_dp_to_dev(intel_dp); 243 struct drm_i915_private *dev_priv = dev->dev_private; 244 u32 pp_stat_reg; 245 246 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS; 247 return (I915_READ(pp_stat_reg) & PP_ON) != 0; 248} 249 250static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp) 251{ 252 struct drm_device *dev = intel_dp_to_dev(intel_dp); 253 struct drm_i915_private *dev_priv = dev->dev_private; 254 u32 pp_ctrl_reg; 255 256 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; 257 return (I915_READ(pp_ctrl_reg) & EDP_FORCE_VDD) != 0; 258} 259 260static void 261intel_dp_check_edp(struct intel_dp *intel_dp) 262{ 263 struct drm_device *dev = intel_dp_to_dev(intel_dp); 264 struct drm_i915_private *dev_priv = dev->dev_private; 265 u32 pp_stat_reg, pp_ctrl_reg; 266 267 if (!is_edp(intel_dp)) 268 return; 269 270 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS; 271 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; 272 273 if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) { 274 WARN(1, "eDP powered off while attempting aux channel communication.\n"); 275 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n", 276 I915_READ(pp_stat_reg), 277 I915_READ(pp_ctrl_reg)); 278 } 279} 280 281static uint32_t 282intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq) 283{ 284 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 285 struct drm_device *dev = intel_dig_port->base.base.dev; 286 struct drm_i915_private *dev_priv = dev->dev_private; 287 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg; 288 uint32_t status; 289 bool done; 290 291#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) 292 if (has_aux_irq) 293 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 294 msecs_to_jiffies_timeout(10)); 295 else 296 done = wait_for_atomic(C, 10) == 0; 297 if (!done) 298 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n", 299 has_aux_irq); 300#undef C 301 302 return status; 303} 304 305static uint32_t get_aux_clock_divider(struct intel_dp *intel_dp, 306 int index) 307{ 308 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 309 struct drm_device *dev = intel_dig_port->base.base.dev; 310 struct drm_i915_private *dev_priv = dev->dev_private; 311 312 /* The clock divider is based off the hrawclk, 313 * and would like to run at 2MHz. So, take the 314 * hrawclk value and divide by 2 and use that 315 * 316 * Note that PCH attached eDP panels should use a 125MHz input 317 * clock divider. 318 */ 319 if (IS_VALLEYVIEW(dev)) { 320 return index ? 0 : 100; 321 } else if (intel_dig_port->port == PORT_A) { 322 if (index) 323 return 0; 324 if (HAS_DDI(dev)) 325 return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000); 326 else if (IS_GEN6(dev) || IS_GEN7(dev)) 327 return 200; /* SNB & IVB eDP input clock at 400Mhz */ 328 else 329 return 225; /* eDP input clock at 450Mhz */ 330 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { 331 /* Workaround for non-ULT HSW */ 332 switch (index) { 333 case 0: return 63; 334 case 1: return 72; 335 default: return 0; 336 } 337 } else if (HAS_PCH_SPLIT(dev)) { 338 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2); 339 } else { 340 return index ? 0 :intel_hrawclk(dev) / 2; 341 } 342} 343 344static int 345intel_dp_aux_ch(struct intel_dp *intel_dp, 346 uint8_t *send, int send_bytes, 347 uint8_t *recv, int recv_size) 348{ 349 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 350 struct drm_device *dev = intel_dig_port->base.base.dev; 351 struct drm_i915_private *dev_priv = dev->dev_private; 352 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg; 353 uint32_t ch_data = ch_ctl + 4; 354 uint32_t aux_clock_divider; 355 int i, ret, recv_bytes; 356 uint32_t status; 357 int try, precharge, clock = 0; 358 bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev); 359 360 /* dp aux is extremely sensitive to irq latency, hence request the 361 * lowest possible wakeup latency and so prevent the cpu from going into 362 * deep sleep states. 363 */ 364 pm_qos_update_request(&dev_priv->pm_qos, 0); 365 366 intel_dp_check_edp(intel_dp); 367 368 if (IS_GEN6(dev)) 369 precharge = 3; 370 else 371 precharge = 5; 372 373 intel_aux_display_runtime_get(dev_priv); 374 375 /* Try to wait for any previous AUX channel activity */ 376 for (try = 0; try < 3; try++) { 377 status = I915_READ_NOTRACE(ch_ctl); 378 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 379 break; 380 msleep(1); 381 } 382 383 if (try == 3) { 384 WARN(1, "dp_aux_ch not started status 0x%08x\n", 385 I915_READ(ch_ctl)); 386 ret = -EBUSY; 387 goto out; 388 } 389 390 while ((aux_clock_divider = get_aux_clock_divider(intel_dp, clock++))) { 391 /* Must try at least 3 times according to DP spec */ 392 for (try = 0; try < 5; try++) { 393 /* Load the send data into the aux channel data registers */ 394 for (i = 0; i < send_bytes; i += 4) 395 I915_WRITE(ch_data + i, 396 pack_aux(send + i, send_bytes - i)); 397 398 /* Send the command and wait for it to complete */ 399 I915_WRITE(ch_ctl, 400 DP_AUX_CH_CTL_SEND_BUSY | 401 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) | 402 DP_AUX_CH_CTL_TIME_OUT_400us | 403 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 404 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 405 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | 406 DP_AUX_CH_CTL_DONE | 407 DP_AUX_CH_CTL_TIME_OUT_ERROR | 408 DP_AUX_CH_CTL_RECEIVE_ERROR); 409 410 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq); 411 412 /* Clear done status and any errors */ 413 I915_WRITE(ch_ctl, 414 status | 415 DP_AUX_CH_CTL_DONE | 416 DP_AUX_CH_CTL_TIME_OUT_ERROR | 417 DP_AUX_CH_CTL_RECEIVE_ERROR); 418 419 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR | 420 DP_AUX_CH_CTL_RECEIVE_ERROR)) 421 continue; 422 if (status & DP_AUX_CH_CTL_DONE) 423 break; 424 } 425 if (status & DP_AUX_CH_CTL_DONE) 426 break; 427 } 428 429 if ((status & DP_AUX_CH_CTL_DONE) == 0) { 430 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status); 431 ret = -EBUSY; 432 goto out; 433 } 434 435 /* Check for timeout or receive error. 436 * Timeouts occur when the sink is not connected 437 */ 438 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 439 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status); 440 ret = -EIO; 441 goto out; 442 } 443 444 /* Timeouts occur when the device isn't connected, so they're 445 * "normal" -- don't fill the kernel log with these */ 446 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { 447 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status); 448 ret = -ETIMEDOUT; 449 goto out; 450 } 451 452 /* Unload any bytes sent back from the other side */ 453 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> 454 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); 455 if (recv_bytes > recv_size) 456 recv_bytes = recv_size; 457 458 for (i = 0; i < recv_bytes; i += 4) 459 unpack_aux(I915_READ(ch_data + i), 460 recv + i, recv_bytes - i); 461 462 ret = recv_bytes; 463out: 464 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE); 465 intel_aux_display_runtime_put(dev_priv); 466 467 return ret; 468} 469 470/* Write data to the aux channel in native mode */ 471static int 472intel_dp_aux_native_write(struct intel_dp *intel_dp, 473 uint16_t address, uint8_t *send, int send_bytes) 474{ 475 int ret; 476 uint8_t msg[20]; 477 int msg_bytes; 478 uint8_t ack; 479 480 intel_dp_check_edp(intel_dp); 481 if (send_bytes > 16) 482 return -1; 483 msg[0] = AUX_NATIVE_WRITE << 4; 484 msg[1] = address >> 8; 485 msg[2] = address & 0xff; 486 msg[3] = send_bytes - 1; 487 memcpy(&msg[4], send, send_bytes); 488 msg_bytes = send_bytes + 4; 489 for (;;) { 490 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1); 491 if (ret < 0) 492 return ret; 493 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) 494 break; 495 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) 496 udelay(100); 497 else 498 return -EIO; 499 } 500 return send_bytes; 501} 502 503/* Write a single byte to the aux channel in native mode */ 504static int 505intel_dp_aux_native_write_1(struct intel_dp *intel_dp, 506 uint16_t address, uint8_t byte) 507{ 508 return intel_dp_aux_native_write(intel_dp, address, &byte, 1); 509} 510 511/* read bytes from a native aux channel */ 512static int 513intel_dp_aux_native_read(struct intel_dp *intel_dp, 514 uint16_t address, uint8_t *recv, int recv_bytes) 515{ 516 uint8_t msg[4]; 517 int msg_bytes; 518 uint8_t reply[20]; 519 int reply_bytes; 520 uint8_t ack; 521 int ret; 522 523 intel_dp_check_edp(intel_dp); 524 msg[0] = AUX_NATIVE_READ << 4; 525 msg[1] = address >> 8; 526 msg[2] = address & 0xff; 527 msg[3] = recv_bytes - 1; 528 529 msg_bytes = 4; 530 reply_bytes = recv_bytes + 1; 531 532 for (;;) { 533 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, 534 reply, reply_bytes); 535 if (ret == 0) 536 return -EPROTO; 537 if (ret < 0) 538 return ret; 539 ack = reply[0]; 540 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) { 541 memcpy(recv, reply + 1, ret - 1); 542 return ret - 1; 543 } 544 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) 545 udelay(100); 546 else 547 return -EIO; 548 } 549} 550 551static int 552intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, 553 uint8_t write_byte, uint8_t *read_byte) 554{ 555 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; 556 struct intel_dp *intel_dp = container_of(adapter, 557 struct intel_dp, 558 adapter); 559 uint16_t address = algo_data->address; 560 uint8_t msg[5]; 561 uint8_t reply[2]; 562 unsigned retry; 563 int msg_bytes; 564 int reply_bytes; 565 int ret; 566 567 intel_dp_check_edp(intel_dp); 568 /* Set up the command byte */ 569 if (mode & MODE_I2C_READ) 570 msg[0] = AUX_I2C_READ << 4; 571 else 572 msg[0] = AUX_I2C_WRITE << 4; 573 574 if (!(mode & MODE_I2C_STOP)) 575 msg[0] |= AUX_I2C_MOT << 4; 576 577 msg[1] = address >> 8; 578 msg[2] = address; 579 580 switch (mode) { 581 case MODE_I2C_WRITE: 582 msg[3] = 0; 583 msg[4] = write_byte; 584 msg_bytes = 5; 585 reply_bytes = 1; 586 break; 587 case MODE_I2C_READ: 588 msg[3] = 0; 589 msg_bytes = 4; 590 reply_bytes = 2; 591 break; 592 default: 593 msg_bytes = 3; 594 reply_bytes = 1; 595 break; 596 } 597 598 for (retry = 0; retry < 5; retry++) { 599 ret = intel_dp_aux_ch(intel_dp, 600 msg, msg_bytes, 601 reply, reply_bytes); 602 if (ret < 0) { 603 DRM_DEBUG_KMS("aux_ch failed %d\n", ret); 604 return ret; 605 } 606 607 switch (reply[0] & AUX_NATIVE_REPLY_MASK) { 608 case AUX_NATIVE_REPLY_ACK: 609 /* I2C-over-AUX Reply field is only valid 610 * when paired with AUX ACK. 611 */ 612 break; 613 case AUX_NATIVE_REPLY_NACK: 614 DRM_DEBUG_KMS("aux_ch native nack\n"); 615 return -EREMOTEIO; 616 case AUX_NATIVE_REPLY_DEFER: 617 udelay(100); 618 continue; 619 default: 620 DRM_ERROR("aux_ch invalid native reply 0x%02x\n", 621 reply[0]); 622 return -EREMOTEIO; 623 } 624 625 switch (reply[0] & AUX_I2C_REPLY_MASK) { 626 case AUX_I2C_REPLY_ACK: 627 if (mode == MODE_I2C_READ) { 628 *read_byte = reply[1]; 629 } 630 return reply_bytes - 1; 631 case AUX_I2C_REPLY_NACK: 632 DRM_DEBUG_KMS("aux_i2c nack\n"); 633 return -EREMOTEIO; 634 case AUX_I2C_REPLY_DEFER: 635 DRM_DEBUG_KMS("aux_i2c defer\n"); 636 udelay(100); 637 break; 638 default: 639 DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]); 640 return -EREMOTEIO; 641 } 642 } 643 644 DRM_ERROR("too many retries, giving up\n"); 645 return -EREMOTEIO; 646} 647 648static int 649intel_dp_i2c_init(struct intel_dp *intel_dp, 650 struct intel_connector *intel_connector, const char *name) 651{ 652 int ret; 653 654 DRM_DEBUG_KMS("i2c_init %s\n", name); 655 intel_dp->algo.running = false; 656 intel_dp->algo.address = 0; 657 intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch; 658 659 memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter)); 660 intel_dp->adapter.owner = THIS_MODULE; 661 intel_dp->adapter.class = I2C_CLASS_DDC; 662 strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1); 663 intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0'; 664 intel_dp->adapter.algo_data = &intel_dp->algo; 665 intel_dp->adapter.dev.parent = &intel_connector->base.kdev; 666 667 ironlake_edp_panel_vdd_on(intel_dp); 668 ret = i2c_dp_aux_add_bus(&intel_dp->adapter); 669 ironlake_edp_panel_vdd_off(intel_dp, false); 670 return ret; 671} 672 673static void 674intel_dp_set_clock(struct intel_encoder *encoder, 675 struct intel_crtc_config *pipe_config, int link_bw) 676{ 677 struct drm_device *dev = encoder->base.dev; 678 const struct dp_link_dpll *divisor = NULL; 679 int i, count = 0; 680 681 if (IS_G4X(dev)) { 682 divisor = gen4_dpll; 683 count = ARRAY_SIZE(gen4_dpll); 684 } else if (IS_HASWELL(dev)) { 685 /* Haswell has special-purpose DP DDI clocks. */ 686 } else if (HAS_PCH_SPLIT(dev)) { 687 divisor = pch_dpll; 688 count = ARRAY_SIZE(pch_dpll); 689 } else if (IS_VALLEYVIEW(dev)) { 690 divisor = vlv_dpll; 691 count = ARRAY_SIZE(vlv_dpll); 692 } 693 694 if (divisor && count) { 695 for (i = 0; i < count; i++) { 696 if (link_bw == divisor[i].link_bw) { 697 pipe_config->dpll = divisor[i].dpll; 698 pipe_config->clock_set = true; 699 break; 700 } 701 } 702 } 703} 704 705bool 706intel_dp_compute_config(struct intel_encoder *encoder, 707 struct intel_crtc_config *pipe_config) 708{ 709 struct drm_device *dev = encoder->base.dev; 710 struct drm_i915_private *dev_priv = dev->dev_private; 711 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; 712 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 713 enum port port = dp_to_dig_port(intel_dp)->port; 714 struct intel_crtc *intel_crtc = encoder->new_crtc; 715 struct intel_connector *intel_connector = intel_dp->attached_connector; 716 int lane_count, clock; 717 int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd); 718 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; 719 int bpp, mode_rate; 720 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; 721 int link_avail, link_clock; 722 723 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A) 724 pipe_config->has_pch_encoder = true; 725 726 pipe_config->has_dp_encoder = true; 727 728 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 729 intel_fixed_panel_mode(intel_connector->panel.fixed_mode, 730 adjusted_mode); 731 if (!HAS_PCH_SPLIT(dev)) 732 intel_gmch_panel_fitting(intel_crtc, pipe_config, 733 intel_connector->panel.fitting_mode); 734 else 735 intel_pch_panel_fitting(intel_crtc, pipe_config, 736 intel_connector->panel.fitting_mode); 737 } 738 739 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 740 return false; 741 742 DRM_DEBUG_KMS("DP link computation with max lane count %i " 743 "max bw %02x pixel clock %iKHz\n", 744 max_lane_count, bws[max_clock], adjusted_mode->clock); 745 746 /* Walk through all bpp values. Luckily they're all nicely spaced with 2 747 * bpc in between. */ 748 bpp = pipe_config->pipe_bpp; 749 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp) { 750 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n", 751 dev_priv->vbt.edp_bpp); 752 bpp = min_t(int, bpp, dev_priv->vbt.edp_bpp); 753 } 754 755 for (; bpp >= 6*3; bpp -= 2*3) { 756 mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp); 757 758 for (clock = 0; clock <= max_clock; clock++) { 759 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { 760 link_clock = drm_dp_bw_code_to_link_rate(bws[clock]); 761 link_avail = intel_dp_max_data_rate(link_clock, 762 lane_count); 763 764 if (mode_rate <= link_avail) { 765 goto found; 766 } 767 } 768 } 769 } 770 771 return false; 772 773found: 774 if (intel_dp->color_range_auto) { 775 /* 776 * See: 777 * CEA-861-E - 5.1 Default Encoding Parameters 778 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry 779 */ 780 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1) 781 intel_dp->color_range = DP_COLOR_RANGE_16_235; 782 else 783 intel_dp->color_range = 0; 784 } 785 786 if (intel_dp->color_range) 787 pipe_config->limited_color_range = true; 788 789 intel_dp->link_bw = bws[clock]; 790 intel_dp->lane_count = lane_count; 791 pipe_config->pipe_bpp = bpp; 792 pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw); 793 794 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n", 795 intel_dp->link_bw, intel_dp->lane_count, 796 pipe_config->port_clock, bpp); 797 DRM_DEBUG_KMS("DP link bw required %i available %i\n", 798 mode_rate, link_avail); 799 800 intel_link_compute_m_n(bpp, lane_count, 801 adjusted_mode->clock, pipe_config->port_clock, 802 &pipe_config->dp_m_n); 803 804 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw); 805 806 return true; 807} 808 809void intel_dp_init_link_config(struct intel_dp *intel_dp) 810{ 811 memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); 812 intel_dp->link_configuration[0] = intel_dp->link_bw; 813 intel_dp->link_configuration[1] = intel_dp->lane_count; 814 intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B; 815 /* 816 * Check for DPCD version > 1.1 and enhanced framing support 817 */ 818 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 819 (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) { 820 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; 821 } 822} 823 824static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp) 825{ 826 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 827 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); 828 struct drm_device *dev = crtc->base.dev; 829 struct drm_i915_private *dev_priv = dev->dev_private; 830 u32 dpa_ctl; 831 832 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", crtc->config.port_clock); 833 dpa_ctl = I915_READ(DP_A); 834 dpa_ctl &= ~DP_PLL_FREQ_MASK; 835 836 if (crtc->config.port_clock == 162000) { 837 /* For a long time we've carried around a ILK-DevA w/a for the 838 * 160MHz clock. If we're really unlucky, it's still required. 839 */ 840 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n"); 841 dpa_ctl |= DP_PLL_FREQ_160MHZ; 842 intel_dp->DP |= DP_PLL_FREQ_160MHZ; 843 } else { 844 dpa_ctl |= DP_PLL_FREQ_270MHZ; 845 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 846 } 847 848 I915_WRITE(DP_A, dpa_ctl); 849 850 POSTING_READ(DP_A); 851 udelay(500); 852} 853 854static void intel_dp_mode_set(struct intel_encoder *encoder) 855{ 856 struct drm_device *dev = encoder->base.dev; 857 struct drm_i915_private *dev_priv = dev->dev_private; 858 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 859 enum port port = dp_to_dig_port(intel_dp)->port; 860 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 861 struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode; 862 863 /* 864 * There are four kinds of DP registers: 865 * 866 * IBX PCH 867 * SNB CPU 868 * IVB CPU 869 * CPT PCH 870 * 871 * IBX PCH and CPU are the same for almost everything, 872 * except that the CPU DP PLL is configured in this 873 * register 874 * 875 * CPT PCH is quite different, having many bits moved 876 * to the TRANS_DP_CTL register instead. That 877 * configuration happens (oddly) in ironlake_pch_enable 878 */ 879 880 /* Preserve the BIOS-computed detected bit. This is 881 * supposed to be read-only. 882 */ 883 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED; 884 885 /* Handle DP bits in common between all three register formats */ 886 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 887 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count); 888 889 if (intel_dp->has_audio) { 890 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n", 891 pipe_name(crtc->pipe)); 892 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 893 intel_write_eld(&encoder->base, adjusted_mode); 894 } 895 896 intel_dp_init_link_config(intel_dp); 897 898 /* Split out the IBX/CPU vs CPT settings */ 899 900 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) { 901 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 902 intel_dp->DP |= DP_SYNC_HS_HIGH; 903 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 904 intel_dp->DP |= DP_SYNC_VS_HIGH; 905 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 906 907 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) 908 intel_dp->DP |= DP_ENHANCED_FRAMING; 909 910 intel_dp->DP |= crtc->pipe << 29; 911 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) { 912 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev)) 913 intel_dp->DP |= intel_dp->color_range; 914 915 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 916 intel_dp->DP |= DP_SYNC_HS_HIGH; 917 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 918 intel_dp->DP |= DP_SYNC_VS_HIGH; 919 intel_dp->DP |= DP_LINK_TRAIN_OFF; 920 921 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) 922 intel_dp->DP |= DP_ENHANCED_FRAMING; 923 924 if (crtc->pipe == 1) 925 intel_dp->DP |= DP_PIPEB_SELECT; 926 } else { 927 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 928 } 929 930 if (port == PORT_A && !IS_VALLEYVIEW(dev)) 931 ironlake_set_pll_cpu_edp(intel_dp); 932} 933 934#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 935#define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) 936 937#define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 938#define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 939 940#define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) 941#define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 942 943static void ironlake_wait_panel_status(struct intel_dp *intel_dp, 944 u32 mask, 945 u32 value) 946{ 947 struct drm_device *dev = intel_dp_to_dev(intel_dp); 948 struct drm_i915_private *dev_priv = dev->dev_private; 949 u32 pp_stat_reg, pp_ctrl_reg; 950 951 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS; 952 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; 953 954 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n", 955 mask, value, 956 I915_READ(pp_stat_reg), 957 I915_READ(pp_ctrl_reg)); 958 959 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) { 960 DRM_ERROR("Panel status timeout: status %08x control %08x\n", 961 I915_READ(pp_stat_reg), 962 I915_READ(pp_ctrl_reg)); 963 } 964} 965 966static void ironlake_wait_panel_on(struct intel_dp *intel_dp) 967{ 968 DRM_DEBUG_KMS("Wait for panel power on\n"); 969 ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); 970} 971 972static void ironlake_wait_panel_off(struct intel_dp *intel_dp) 973{ 974 DRM_DEBUG_KMS("Wait for panel power off time\n"); 975 ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); 976} 977 978static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp) 979{ 980 DRM_DEBUG_KMS("Wait for panel power cycle\n"); 981 ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); 982} 983 984 985/* Read the current pp_control value, unlocking the register if it 986 * is locked 987 */ 988 989static u32 ironlake_get_pp_control(struct intel_dp *intel_dp) 990{ 991 struct drm_device *dev = intel_dp_to_dev(intel_dp); 992 struct drm_i915_private *dev_priv = dev->dev_private; 993 u32 control; 994 u32 pp_ctrl_reg; 995 996 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; 997 control = I915_READ(pp_ctrl_reg); 998 999 control &= ~PANEL_UNLOCK_MASK; 1000 control |= PANEL_UNLOCK_REGS; 1001 return control; 1002} 1003 1004void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) 1005{ 1006 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1007 struct drm_i915_private *dev_priv = dev->dev_private; 1008 u32 pp; 1009 u32 pp_stat_reg, pp_ctrl_reg; 1010 1011 if (!is_edp(intel_dp)) 1012 return; 1013 DRM_DEBUG_KMS("Turn eDP VDD on\n"); 1014 1015 WARN(intel_dp->want_panel_vdd, 1016 "eDP VDD already requested on\n"); 1017 1018 intel_dp->want_panel_vdd = true; 1019 1020 if (ironlake_edp_have_panel_vdd(intel_dp)) { 1021 DRM_DEBUG_KMS("eDP VDD already on\n"); 1022 return; 1023 } 1024 1025 if (!ironlake_edp_have_panel_power(intel_dp)) 1026 ironlake_wait_panel_power_cycle(intel_dp); 1027 1028 pp = ironlake_get_pp_control(intel_dp); 1029 pp |= EDP_FORCE_VDD; 1030 1031 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS; 1032 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; 1033 1034 I915_WRITE(pp_ctrl_reg, pp); 1035 POSTING_READ(pp_ctrl_reg); 1036 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 1037 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); 1038 /* 1039 * If the panel wasn't on, delay before accessing aux channel 1040 */ 1041 if (!ironlake_edp_have_panel_power(intel_dp)) { 1042 DRM_DEBUG_KMS("eDP was not running\n"); 1043 msleep(intel_dp->panel_power_up_delay); 1044 } 1045} 1046 1047static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp) 1048{ 1049 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1050 struct drm_i915_private *dev_priv = dev->dev_private; 1051 u32 pp; 1052 u32 pp_stat_reg, pp_ctrl_reg; 1053 1054 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 1055 1056 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) { 1057 pp = ironlake_get_pp_control(intel_dp); 1058 pp &= ~EDP_FORCE_VDD; 1059 1060 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS; 1061 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; 1062 1063 I915_WRITE(pp_ctrl_reg, pp); 1064 POSTING_READ(pp_ctrl_reg); 1065 1066 /* Make sure sequencer is idle before allowing subsequent activity */ 1067 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 1068 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); 1069 msleep(intel_dp->panel_power_down_delay); 1070 } 1071} 1072 1073static void ironlake_panel_vdd_work(struct work_struct *__work) 1074{ 1075 struct intel_dp *intel_dp = container_of(to_delayed_work(__work), 1076 struct intel_dp, panel_vdd_work); 1077 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1078 1079 mutex_lock(&dev->mode_config.mutex); 1080 ironlake_panel_vdd_off_sync(intel_dp); 1081 mutex_unlock(&dev->mode_config.mutex); 1082} 1083 1084void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) 1085{ 1086 if (!is_edp(intel_dp)) 1087 return; 1088 1089 DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd); 1090 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on"); 1091 1092 intel_dp->want_panel_vdd = false; 1093 1094 if (sync) { 1095 ironlake_panel_vdd_off_sync(intel_dp); 1096 } else { 1097 /* 1098 * Queue the timer to fire a long 1099 * time from now (relative to the power down delay) 1100 * to keep the panel power up across a sequence of operations 1101 */ 1102 schedule_delayed_work(&intel_dp->panel_vdd_work, 1103 msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5)); 1104 } 1105} 1106 1107void ironlake_edp_panel_on(struct intel_dp *intel_dp) 1108{ 1109 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1110 struct drm_i915_private *dev_priv = dev->dev_private; 1111 u32 pp; 1112 u32 pp_ctrl_reg; 1113 1114 if (!is_edp(intel_dp)) 1115 return; 1116 1117 DRM_DEBUG_KMS("Turn eDP power on\n"); 1118 1119 if (ironlake_edp_have_panel_power(intel_dp)) { 1120 DRM_DEBUG_KMS("eDP power already on\n"); 1121 return; 1122 } 1123 1124 ironlake_wait_panel_power_cycle(intel_dp); 1125 1126 pp = ironlake_get_pp_control(intel_dp); 1127 if (IS_GEN5(dev)) { 1128 /* ILK workaround: disable reset around power sequence */ 1129 pp &= ~PANEL_POWER_RESET; 1130 I915_WRITE(PCH_PP_CONTROL, pp); 1131 POSTING_READ(PCH_PP_CONTROL); 1132 } 1133 1134 pp |= POWER_TARGET_ON; 1135 if (!IS_GEN5(dev)) 1136 pp |= PANEL_POWER_RESET; 1137 1138 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; 1139 1140 I915_WRITE(pp_ctrl_reg, pp); 1141 POSTING_READ(pp_ctrl_reg); 1142 1143 ironlake_wait_panel_on(intel_dp); 1144 1145 if (IS_GEN5(dev)) { 1146 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 1147 I915_WRITE(PCH_PP_CONTROL, pp); 1148 POSTING_READ(PCH_PP_CONTROL); 1149 } 1150} 1151 1152void ironlake_edp_panel_off(struct intel_dp *intel_dp) 1153{ 1154 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1155 struct drm_i915_private *dev_priv = dev->dev_private; 1156 u32 pp; 1157 u32 pp_ctrl_reg; 1158 1159 if (!is_edp(intel_dp)) 1160 return; 1161 1162 DRM_DEBUG_KMS("Turn eDP power off\n"); 1163 1164 WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n"); 1165 1166 pp = ironlake_get_pp_control(intel_dp); 1167 /* We need to switch off panel power _and_ force vdd, for otherwise some 1168 * panels get very unhappy and cease to work. */ 1169 pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE); 1170 1171 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; 1172 1173 I915_WRITE(pp_ctrl_reg, pp); 1174 POSTING_READ(pp_ctrl_reg); 1175 1176 intel_dp->want_panel_vdd = false; 1177 1178 ironlake_wait_panel_off(intel_dp); 1179} 1180 1181void ironlake_edp_backlight_on(struct intel_dp *intel_dp) 1182{ 1183 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1184 struct drm_device *dev = intel_dig_port->base.base.dev; 1185 struct drm_i915_private *dev_priv = dev->dev_private; 1186 int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe; 1187 u32 pp; 1188 u32 pp_ctrl_reg; 1189 1190 if (!is_edp(intel_dp)) 1191 return; 1192 1193 DRM_DEBUG_KMS("\n"); 1194 /* 1195 * If we enable the backlight right away following a panel power 1196 * on, we may see slight flicker as the panel syncs with the eDP 1197 * link. So delay a bit to make sure the image is solid before 1198 * allowing it to appear. 1199 */ 1200 msleep(intel_dp->backlight_on_delay); 1201 pp = ironlake_get_pp_control(intel_dp); 1202 pp |= EDP_BLC_ENABLE; 1203 1204 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; 1205 1206 I915_WRITE(pp_ctrl_reg, pp); 1207 POSTING_READ(pp_ctrl_reg); 1208 1209 intel_panel_enable_backlight(dev, pipe); 1210} 1211 1212void ironlake_edp_backlight_off(struct intel_dp *intel_dp) 1213{ 1214 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1215 struct drm_i915_private *dev_priv = dev->dev_private; 1216 u32 pp; 1217 u32 pp_ctrl_reg; 1218 1219 if (!is_edp(intel_dp)) 1220 return; 1221 1222 intel_panel_disable_backlight(dev); 1223 1224 DRM_DEBUG_KMS("\n"); 1225 pp = ironlake_get_pp_control(intel_dp); 1226 pp &= ~EDP_BLC_ENABLE; 1227 1228 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; 1229 1230 I915_WRITE(pp_ctrl_reg, pp); 1231 POSTING_READ(pp_ctrl_reg); 1232 msleep(intel_dp->backlight_off_delay); 1233} 1234 1235static void ironlake_edp_pll_on(struct intel_dp *intel_dp) 1236{ 1237 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1238 struct drm_crtc *crtc = intel_dig_port->base.base.crtc; 1239 struct drm_device *dev = crtc->dev; 1240 struct drm_i915_private *dev_priv = dev->dev_private; 1241 u32 dpa_ctl; 1242 1243 assert_pipe_disabled(dev_priv, 1244 to_intel_crtc(crtc)->pipe); 1245 1246 DRM_DEBUG_KMS("\n"); 1247 dpa_ctl = I915_READ(DP_A); 1248 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n"); 1249 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n"); 1250 1251 /* We don't adjust intel_dp->DP while tearing down the link, to 1252 * facilitate link retraining (e.g. after hotplug). Hence clear all 1253 * enable bits here to ensure that we don't enable too much. */ 1254 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE); 1255 intel_dp->DP |= DP_PLL_ENABLE; 1256 I915_WRITE(DP_A, intel_dp->DP); 1257 POSTING_READ(DP_A); 1258 udelay(200); 1259} 1260 1261static void ironlake_edp_pll_off(struct intel_dp *intel_dp) 1262{ 1263 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1264 struct drm_crtc *crtc = intel_dig_port->base.base.crtc; 1265 struct drm_device *dev = crtc->dev; 1266 struct drm_i915_private *dev_priv = dev->dev_private; 1267 u32 dpa_ctl; 1268 1269 assert_pipe_disabled(dev_priv, 1270 to_intel_crtc(crtc)->pipe); 1271 1272 dpa_ctl = I915_READ(DP_A); 1273 WARN((dpa_ctl & DP_PLL_ENABLE) == 0, 1274 "dp pll off, should be on\n"); 1275 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n"); 1276 1277 /* We can't rely on the value tracked for the DP register in 1278 * intel_dp->DP because link_down must not change that (otherwise link 1279 * re-training will fail. */ 1280 dpa_ctl &= ~DP_PLL_ENABLE; 1281 I915_WRITE(DP_A, dpa_ctl); 1282 POSTING_READ(DP_A); 1283 udelay(200); 1284} 1285 1286/* If the sink supports it, try to set the power state appropriately */ 1287void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) 1288{ 1289 int ret, i; 1290 1291 /* Should have a valid DPCD by this point */ 1292 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 1293 return; 1294 1295 if (mode != DRM_MODE_DPMS_ON) { 1296 ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER, 1297 DP_SET_POWER_D3); 1298 if (ret != 1) 1299 DRM_DEBUG_DRIVER("failed to write sink power state\n"); 1300 } else { 1301 /* 1302 * When turning on, we need to retry for 1ms to give the sink 1303 * time to wake up. 1304 */ 1305 for (i = 0; i < 3; i++) { 1306 ret = intel_dp_aux_native_write_1(intel_dp, 1307 DP_SET_POWER, 1308 DP_SET_POWER_D0); 1309 if (ret == 1) 1310 break; 1311 msleep(1); 1312 } 1313 } 1314} 1315 1316static bool intel_dp_get_hw_state(struct intel_encoder *encoder, 1317 enum pipe *pipe) 1318{ 1319 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1320 enum port port = dp_to_dig_port(intel_dp)->port; 1321 struct drm_device *dev = encoder->base.dev; 1322 struct drm_i915_private *dev_priv = dev->dev_private; 1323 u32 tmp = I915_READ(intel_dp->output_reg); 1324 1325 if (!(tmp & DP_PORT_EN)) 1326 return false; 1327 1328 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) { 1329 *pipe = PORT_TO_PIPE_CPT(tmp); 1330 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) { 1331 *pipe = PORT_TO_PIPE(tmp); 1332 } else { 1333 u32 trans_sel; 1334 u32 trans_dp; 1335 int i; 1336 1337 switch (intel_dp->output_reg) { 1338 case PCH_DP_B: 1339 trans_sel = TRANS_DP_PORT_SEL_B; 1340 break; 1341 case PCH_DP_C: 1342 trans_sel = TRANS_DP_PORT_SEL_C; 1343 break; 1344 case PCH_DP_D: 1345 trans_sel = TRANS_DP_PORT_SEL_D; 1346 break; 1347 default: 1348 return true; 1349 } 1350 1351 for_each_pipe(i) { 1352 trans_dp = I915_READ(TRANS_DP_CTL(i)); 1353 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) { 1354 *pipe = i; 1355 return true; 1356 } 1357 } 1358 1359 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", 1360 intel_dp->output_reg); 1361 } 1362 1363 return true; 1364} 1365 1366static void intel_dp_get_config(struct intel_encoder *encoder, 1367 struct intel_crtc_config *pipe_config) 1368{ 1369 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1370 u32 tmp, flags = 0; 1371 struct drm_device *dev = encoder->base.dev; 1372 struct drm_i915_private *dev_priv = dev->dev_private; 1373 enum port port = dp_to_dig_port(intel_dp)->port; 1374 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 1375 1376 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) { 1377 tmp = I915_READ(intel_dp->output_reg); 1378 if (tmp & DP_SYNC_HS_HIGH) 1379 flags |= DRM_MODE_FLAG_PHSYNC; 1380 else 1381 flags |= DRM_MODE_FLAG_NHSYNC; 1382 1383 if (tmp & DP_SYNC_VS_HIGH) 1384 flags |= DRM_MODE_FLAG_PVSYNC; 1385 else 1386 flags |= DRM_MODE_FLAG_NVSYNC; 1387 } else { 1388 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe)); 1389 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH) 1390 flags |= DRM_MODE_FLAG_PHSYNC; 1391 else 1392 flags |= DRM_MODE_FLAG_NHSYNC; 1393 1394 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH) 1395 flags |= DRM_MODE_FLAG_PVSYNC; 1396 else 1397 flags |= DRM_MODE_FLAG_NVSYNC; 1398 } 1399 1400 pipe_config->adjusted_mode.flags |= flags; 1401 1402 if (dp_to_dig_port(intel_dp)->port == PORT_A) { 1403 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ) 1404 pipe_config->port_clock = 162000; 1405 else 1406 pipe_config->port_clock = 270000; 1407 } 1408} 1409 1410static bool is_edp_psr(struct intel_dp *intel_dp) 1411{ 1412 return is_edp(intel_dp) && 1413 intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED; 1414} 1415 1416static bool intel_edp_is_psr_enabled(struct drm_device *dev) 1417{ 1418 struct drm_i915_private *dev_priv = dev->dev_private; 1419 1420 if (!IS_HASWELL(dev)) 1421 return false; 1422 1423 return I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE; 1424} 1425 1426static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp, 1427 struct edp_vsc_psr *vsc_psr) 1428{ 1429 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1430 struct drm_device *dev = dig_port->base.base.dev; 1431 struct drm_i915_private *dev_priv = dev->dev_private; 1432 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); 1433 u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder); 1434 u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder); 1435 uint32_t *data = (uint32_t *) vsc_psr; 1436 unsigned int i; 1437 1438 /* As per BSPec (Pipe Video Data Island Packet), we need to disable 1439 the video DIP being updated before program video DIP data buffer 1440 registers for DIP being updated. */ 1441 I915_WRITE(ctl_reg, 0); 1442 POSTING_READ(ctl_reg); 1443 1444 for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) { 1445 if (i < sizeof(struct edp_vsc_psr)) 1446 I915_WRITE(data_reg + i, *data++); 1447 else 1448 I915_WRITE(data_reg + i, 0); 1449 } 1450 1451 I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW); 1452 POSTING_READ(ctl_reg); 1453} 1454 1455static void intel_edp_psr_setup(struct intel_dp *intel_dp) 1456{ 1457 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1458 struct drm_i915_private *dev_priv = dev->dev_private; 1459 struct edp_vsc_psr psr_vsc; 1460 1461 if (intel_dp->psr_setup_done) 1462 return; 1463 1464 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */ 1465 memset(&psr_vsc, 0, sizeof(psr_vsc)); 1466 psr_vsc.sdp_header.HB0 = 0; 1467 psr_vsc.sdp_header.HB1 = 0x7; 1468 psr_vsc.sdp_header.HB2 = 0x2; 1469 psr_vsc.sdp_header.HB3 = 0x8; 1470 intel_edp_psr_write_vsc(intel_dp, &psr_vsc); 1471 1472 /* Avoid continuous PSR exit by masking memup and hpd */ 1473 I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP | 1474 EDP_PSR_DEBUG_MASK_HPD); 1475 1476 intel_dp->psr_setup_done = true; 1477} 1478 1479static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp) 1480{ 1481 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1482 struct drm_i915_private *dev_priv = dev->dev_private; 1483 uint32_t aux_clock_divider = get_aux_clock_divider(intel_dp, 0); 1484 int precharge = 0x3; 1485 int msg_size = 5; /* Header(4) + Message(1) */ 1486 1487 /* Enable PSR in sink */ 1488 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) 1489 intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG, 1490 DP_PSR_ENABLE & 1491 ~DP_PSR_MAIN_LINK_ACTIVE); 1492 else 1493 intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG, 1494 DP_PSR_ENABLE | 1495 DP_PSR_MAIN_LINK_ACTIVE); 1496 1497 /* Setup AUX registers */ 1498 I915_WRITE(EDP_PSR_AUX_DATA1, EDP_PSR_DPCD_COMMAND); 1499 I915_WRITE(EDP_PSR_AUX_DATA2, EDP_PSR_DPCD_NORMAL_OPERATION); 1500 I915_WRITE(EDP_PSR_AUX_CTL, 1501 DP_AUX_CH_CTL_TIME_OUT_400us | 1502 (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 1503 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 1504 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT)); 1505} 1506 1507static void intel_edp_psr_enable_source(struct intel_dp *intel_dp) 1508{ 1509 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1510 struct drm_i915_private *dev_priv = dev->dev_private; 1511 uint32_t max_sleep_time = 0x1f; 1512 uint32_t idle_frames = 1; 1513 uint32_t val = 0x0; 1514 1515 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) { 1516 val |= EDP_PSR_LINK_STANDBY; 1517 val |= EDP_PSR_TP2_TP3_TIME_0us; 1518 val |= EDP_PSR_TP1_TIME_0us; 1519 val |= EDP_PSR_SKIP_AUX_EXIT; 1520 } else 1521 val |= EDP_PSR_LINK_DISABLE; 1522 1523 I915_WRITE(EDP_PSR_CTL, val | 1524 EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES | 1525 max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT | 1526 idle_frames << EDP_PSR_IDLE_FRAME_SHIFT | 1527 EDP_PSR_ENABLE); 1528} 1529 1530static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp) 1531{ 1532 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1533 struct drm_device *dev = dig_port->base.base.dev; 1534 struct drm_i915_private *dev_priv = dev->dev_private; 1535 struct drm_crtc *crtc = dig_port->base.base.crtc; 1536 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1537 struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->fb)->obj; 1538 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; 1539 1540 if (!IS_HASWELL(dev)) { 1541 DRM_DEBUG_KMS("PSR not supported on this platform\n"); 1542 dev_priv->no_psr_reason = PSR_NO_SOURCE; 1543 return false; 1544 } 1545 1546 if ((intel_encoder->type != INTEL_OUTPUT_EDP) || 1547 (dig_port->port != PORT_A)) { 1548 DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n"); 1549 dev_priv->no_psr_reason = PSR_HSW_NOT_DDIA; 1550 return false; 1551 } 1552 1553 if (!is_edp_psr(intel_dp)) { 1554 DRM_DEBUG_KMS("PSR not supported by this panel\n"); 1555 dev_priv->no_psr_reason = PSR_NO_SINK; 1556 return false; 1557 } 1558 1559 if (!i915_enable_psr) { 1560 DRM_DEBUG_KMS("PSR disable by flag\n"); 1561 dev_priv->no_psr_reason = PSR_MODULE_PARAM; 1562 return false; 1563 } 1564 1565 crtc = dig_port->base.base.crtc; 1566 if (crtc == NULL) { 1567 DRM_DEBUG_KMS("crtc not active for PSR\n"); 1568 dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE; 1569 return false; 1570 } 1571 1572 intel_crtc = to_intel_crtc(crtc); 1573 if (!intel_crtc->active || !crtc->fb || !crtc->mode.clock) { 1574 DRM_DEBUG_KMS("crtc not active for PSR\n"); 1575 dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE; 1576 return false; 1577 } 1578 1579 obj = to_intel_framebuffer(crtc->fb)->obj; 1580 if (obj->tiling_mode != I915_TILING_X || 1581 obj->fence_reg == I915_FENCE_REG_NONE) { 1582 DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n"); 1583 dev_priv->no_psr_reason = PSR_NOT_TILED; 1584 return false; 1585 } 1586 1587 if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) { 1588 DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n"); 1589 dev_priv->no_psr_reason = PSR_SPRITE_ENABLED; 1590 return false; 1591 } 1592 1593 if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) & 1594 S3D_ENABLE) { 1595 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n"); 1596 dev_priv->no_psr_reason = PSR_S3D_ENABLED; 1597 return false; 1598 } 1599 1600 if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) { 1601 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n"); 1602 dev_priv->no_psr_reason = PSR_INTERLACED_ENABLED; 1603 return false; 1604 } 1605 1606 return true; 1607} 1608 1609static void intel_edp_psr_do_enable(struct intel_dp *intel_dp) 1610{ 1611 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1612 1613 if (!intel_edp_psr_match_conditions(intel_dp) || 1614 intel_edp_is_psr_enabled(dev)) 1615 return; 1616 1617 /* Setup PSR once */ 1618 intel_edp_psr_setup(intel_dp); 1619 1620 /* Enable PSR on the panel */ 1621 intel_edp_psr_enable_sink(intel_dp); 1622 1623 /* Enable PSR on the host */ 1624 intel_edp_psr_enable_source(intel_dp); 1625} 1626 1627void intel_edp_psr_enable(struct intel_dp *intel_dp) 1628{ 1629 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1630 1631 if (intel_edp_psr_match_conditions(intel_dp) && 1632 !intel_edp_is_psr_enabled(dev)) 1633 intel_edp_psr_do_enable(intel_dp); 1634} 1635 1636void intel_edp_psr_disable(struct intel_dp *intel_dp) 1637{ 1638 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1639 struct drm_i915_private *dev_priv = dev->dev_private; 1640 1641 if (!intel_edp_is_psr_enabled(dev)) 1642 return; 1643 1644 I915_WRITE(EDP_PSR_CTL, I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE); 1645 1646 /* Wait till PSR is idle */ 1647 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) & 1648 EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10)) 1649 DRM_ERROR("Timed out waiting for PSR Idle State\n"); 1650} 1651 1652void intel_edp_psr_update(struct drm_device *dev) 1653{ 1654 struct intel_encoder *encoder; 1655 struct intel_dp *intel_dp = NULL; 1656 1657 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) 1658 if (encoder->type == INTEL_OUTPUT_EDP) { 1659 intel_dp = enc_to_intel_dp(&encoder->base); 1660 1661 if (!is_edp_psr(intel_dp)) 1662 return; 1663 1664 if (!intel_edp_psr_match_conditions(intel_dp)) 1665 intel_edp_psr_disable(intel_dp); 1666 else 1667 if (!intel_edp_is_psr_enabled(dev)) 1668 intel_edp_psr_do_enable(intel_dp); 1669 } 1670} 1671 1672static void intel_disable_dp(struct intel_encoder *encoder) 1673{ 1674 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1675 enum port port = dp_to_dig_port(intel_dp)->port; 1676 struct drm_device *dev = encoder->base.dev; 1677 1678 /* Make sure the panel is off before trying to change the mode. But also 1679 * ensure that we have vdd while we switch off the panel. */ 1680 ironlake_edp_panel_vdd_on(intel_dp); 1681 ironlake_edp_backlight_off(intel_dp); 1682 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1683 ironlake_edp_panel_off(intel_dp); 1684 1685 /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */ 1686 if (!(port == PORT_A || IS_VALLEYVIEW(dev))) 1687 intel_dp_link_down(intel_dp); 1688} 1689 1690static void intel_post_disable_dp(struct intel_encoder *encoder) 1691{ 1692 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1693 enum port port = dp_to_dig_port(intel_dp)->port; 1694 struct drm_device *dev = encoder->base.dev; 1695 1696 if (port == PORT_A || IS_VALLEYVIEW(dev)) { 1697 intel_dp_link_down(intel_dp); 1698 if (!IS_VALLEYVIEW(dev)) 1699 ironlake_edp_pll_off(intel_dp); 1700 } 1701} 1702 1703static void intel_enable_dp(struct intel_encoder *encoder) 1704{ 1705 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1706 struct drm_device *dev = encoder->base.dev; 1707 struct drm_i915_private *dev_priv = dev->dev_private; 1708 uint32_t dp_reg = I915_READ(intel_dp->output_reg); 1709 1710 if (WARN_ON(dp_reg & DP_PORT_EN)) 1711 return; 1712 1713 ironlake_edp_panel_vdd_on(intel_dp); 1714 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1715 intel_dp_start_link_train(intel_dp); 1716 ironlake_edp_panel_on(intel_dp); 1717 ironlake_edp_panel_vdd_off(intel_dp, true); 1718 intel_dp_complete_link_train(intel_dp); 1719 intel_dp_stop_link_train(intel_dp); 1720 ironlake_edp_backlight_on(intel_dp); 1721} 1722 1723static void vlv_enable_dp(struct intel_encoder *encoder) 1724{ 1725} 1726 1727static void intel_pre_enable_dp(struct intel_encoder *encoder) 1728{ 1729 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1730 struct intel_digital_port *dport = dp_to_dig_port(intel_dp); 1731 1732 if (dport->port == PORT_A) 1733 ironlake_edp_pll_on(intel_dp); 1734} 1735 1736static void vlv_pre_enable_dp(struct intel_encoder *encoder) 1737{ 1738 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1739 struct intel_digital_port *dport = dp_to_dig_port(intel_dp); 1740 struct drm_device *dev = encoder->base.dev; 1741 struct drm_i915_private *dev_priv = dev->dev_private; 1742 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 1743 int port = vlv_dport_to_channel(dport); 1744 int pipe = intel_crtc->pipe; 1745 u32 val; 1746 1747 mutex_lock(&dev_priv->dpio_lock); 1748 1749 val = vlv_dpio_read(dev_priv, pipe, DPIO_DATA_LANE_A(port)); 1750 val = 0; 1751 if (pipe) 1752 val |= (1<<21); 1753 else 1754 val &= ~(1<<21); 1755 val |= 0x001000c4; 1756 vlv_dpio_write(dev_priv, pipe, DPIO_DATA_CHANNEL(port), val); 1757 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF0(port), 0x00760018); 1758 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF8(port), 0x00400888); 1759 1760 mutex_unlock(&dev_priv->dpio_lock); 1761 1762 intel_enable_dp(encoder); 1763 1764 vlv_wait_port_ready(dev_priv, port); 1765} 1766 1767static void intel_dp_pre_pll_enable(struct intel_encoder *encoder) 1768{ 1769 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); 1770 struct drm_device *dev = encoder->base.dev; 1771 struct drm_i915_private *dev_priv = dev->dev_private; 1772 struct intel_crtc *intel_crtc = 1773 to_intel_crtc(encoder->base.crtc); 1774 int port = vlv_dport_to_channel(dport); 1775 int pipe = intel_crtc->pipe; 1776 1777 if (!IS_VALLEYVIEW(dev)) 1778 return; 1779 1780 /* Program Tx lane resets to default */ 1781 mutex_lock(&dev_priv->dpio_lock); 1782 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port), 1783 DPIO_PCS_TX_LANE2_RESET | 1784 DPIO_PCS_TX_LANE1_RESET); 1785 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port), 1786 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN | 1787 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN | 1788 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) | 1789 DPIO_PCS_CLK_SOFT_RESET); 1790 1791 /* Fix up inter-pair skew failure */ 1792 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER1(port), 0x00750f00); 1793 vlv_dpio_write(dev_priv, pipe, DPIO_TX_CTL(port), 0x00001500); 1794 vlv_dpio_write(dev_priv, pipe, DPIO_TX_LANE(port), 0x40400000); 1795 mutex_unlock(&dev_priv->dpio_lock); 1796} 1797 1798/* 1799 * Native read with retry for link status and receiver capability reads for 1800 * cases where the sink may still be asleep. 1801 */ 1802static bool 1803intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address, 1804 uint8_t *recv, int recv_bytes) 1805{ 1806 int ret, i; 1807 1808 /* 1809 * Sinks are *supposed* to come up within 1ms from an off state, 1810 * but we're also supposed to retry 3 times per the spec. 1811 */ 1812 for (i = 0; i < 3; i++) { 1813 ret = intel_dp_aux_native_read(intel_dp, address, recv, 1814 recv_bytes); 1815 if (ret == recv_bytes) 1816 return true; 1817 msleep(1); 1818 } 1819 1820 return false; 1821} 1822 1823/* 1824 * Fetch AUX CH registers 0x202 - 0x207 which contain 1825 * link status information 1826 */ 1827static bool 1828intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 1829{ 1830 return intel_dp_aux_native_read_retry(intel_dp, 1831 DP_LANE0_1_STATUS, 1832 link_status, 1833 DP_LINK_STATUS_SIZE); 1834} 1835 1836#if 0 1837static char *voltage_names[] = { 1838 "0.4V", "0.6V", "0.8V", "1.2V" 1839}; 1840static char *pre_emph_names[] = { 1841 "0dB", "3.5dB", "6dB", "9.5dB" 1842}; 1843static char *link_train_names[] = { 1844 "pattern 1", "pattern 2", "idle", "off" 1845}; 1846#endif 1847 1848/* 1849 * These are source-specific values; current Intel hardware supports 1850 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB 1851 */ 1852 1853static uint8_t 1854intel_dp_voltage_max(struct intel_dp *intel_dp) 1855{ 1856 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1857 enum port port = dp_to_dig_port(intel_dp)->port; 1858 1859 if (IS_VALLEYVIEW(dev)) 1860 return DP_TRAIN_VOLTAGE_SWING_1200; 1861 else if (IS_GEN7(dev) && port == PORT_A) 1862 return DP_TRAIN_VOLTAGE_SWING_800; 1863 else if (HAS_PCH_CPT(dev) && port != PORT_A) 1864 return DP_TRAIN_VOLTAGE_SWING_1200; 1865 else 1866 return DP_TRAIN_VOLTAGE_SWING_800; 1867} 1868 1869static uint8_t 1870intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) 1871{ 1872 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1873 enum port port = dp_to_dig_port(intel_dp)->port; 1874 1875 if (HAS_DDI(dev)) { 1876 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1877 case DP_TRAIN_VOLTAGE_SWING_400: 1878 return DP_TRAIN_PRE_EMPHASIS_9_5; 1879 case DP_TRAIN_VOLTAGE_SWING_600: 1880 return DP_TRAIN_PRE_EMPHASIS_6; 1881 case DP_TRAIN_VOLTAGE_SWING_800: 1882 return DP_TRAIN_PRE_EMPHASIS_3_5; 1883 case DP_TRAIN_VOLTAGE_SWING_1200: 1884 default: 1885 return DP_TRAIN_PRE_EMPHASIS_0; 1886 } 1887 } else if (IS_VALLEYVIEW(dev)) { 1888 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1889 case DP_TRAIN_VOLTAGE_SWING_400: 1890 return DP_TRAIN_PRE_EMPHASIS_9_5; 1891 case DP_TRAIN_VOLTAGE_SWING_600: 1892 return DP_TRAIN_PRE_EMPHASIS_6; 1893 case DP_TRAIN_VOLTAGE_SWING_800: 1894 return DP_TRAIN_PRE_EMPHASIS_3_5; 1895 case DP_TRAIN_VOLTAGE_SWING_1200: 1896 default: 1897 return DP_TRAIN_PRE_EMPHASIS_0; 1898 } 1899 } else if (IS_GEN7(dev) && port == PORT_A) { 1900 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1901 case DP_TRAIN_VOLTAGE_SWING_400: 1902 return DP_TRAIN_PRE_EMPHASIS_6; 1903 case DP_TRAIN_VOLTAGE_SWING_600: 1904 case DP_TRAIN_VOLTAGE_SWING_800: 1905 return DP_TRAIN_PRE_EMPHASIS_3_5; 1906 default: 1907 return DP_TRAIN_PRE_EMPHASIS_0; 1908 } 1909 } else { 1910 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1911 case DP_TRAIN_VOLTAGE_SWING_400: 1912 return DP_TRAIN_PRE_EMPHASIS_6; 1913 case DP_TRAIN_VOLTAGE_SWING_600: 1914 return DP_TRAIN_PRE_EMPHASIS_6; 1915 case DP_TRAIN_VOLTAGE_SWING_800: 1916 return DP_TRAIN_PRE_EMPHASIS_3_5; 1917 case DP_TRAIN_VOLTAGE_SWING_1200: 1918 default: 1919 return DP_TRAIN_PRE_EMPHASIS_0; 1920 } 1921 } 1922} 1923 1924static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp) 1925{ 1926 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1927 struct drm_i915_private *dev_priv = dev->dev_private; 1928 struct intel_digital_port *dport = dp_to_dig_port(intel_dp); 1929 struct intel_crtc *intel_crtc = 1930 to_intel_crtc(dport->base.base.crtc); 1931 unsigned long demph_reg_value, preemph_reg_value, 1932 uniqtranscale_reg_value; 1933 uint8_t train_set = intel_dp->train_set[0]; 1934 int port = vlv_dport_to_channel(dport); 1935 int pipe = intel_crtc->pipe; 1936 1937 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 1938 case DP_TRAIN_PRE_EMPHASIS_0: 1939 preemph_reg_value = 0x0004000; 1940 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 1941 case DP_TRAIN_VOLTAGE_SWING_400: 1942 demph_reg_value = 0x2B405555; 1943 uniqtranscale_reg_value = 0x552AB83A; 1944 break; 1945 case DP_TRAIN_VOLTAGE_SWING_600: 1946 demph_reg_value = 0x2B404040; 1947 uniqtranscale_reg_value = 0x5548B83A; 1948 break; 1949 case DP_TRAIN_VOLTAGE_SWING_800: 1950 demph_reg_value = 0x2B245555; 1951 uniqtranscale_reg_value = 0x5560B83A; 1952 break; 1953 case DP_TRAIN_VOLTAGE_SWING_1200: 1954 demph_reg_value = 0x2B405555; 1955 uniqtranscale_reg_value = 0x5598DA3A; 1956 break; 1957 default: 1958 return 0; 1959 } 1960 break; 1961 case DP_TRAIN_PRE_EMPHASIS_3_5: 1962 preemph_reg_value = 0x0002000; 1963 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 1964 case DP_TRAIN_VOLTAGE_SWING_400: 1965 demph_reg_value = 0x2B404040; 1966 uniqtranscale_reg_value = 0x5552B83A; 1967 break; 1968 case DP_TRAIN_VOLTAGE_SWING_600: 1969 demph_reg_value = 0x2B404848; 1970 uniqtranscale_reg_value = 0x5580B83A; 1971 break; 1972 case DP_TRAIN_VOLTAGE_SWING_800: 1973 demph_reg_value = 0x2B404040; 1974 uniqtranscale_reg_value = 0x55ADDA3A; 1975 break; 1976 default: 1977 return 0; 1978 } 1979 break; 1980 case DP_TRAIN_PRE_EMPHASIS_6: 1981 preemph_reg_value = 0x0000000; 1982 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 1983 case DP_TRAIN_VOLTAGE_SWING_400: 1984 demph_reg_value = 0x2B305555; 1985 uniqtranscale_reg_value = 0x5570B83A; 1986 break; 1987 case DP_TRAIN_VOLTAGE_SWING_600: 1988 demph_reg_value = 0x2B2B4040; 1989 uniqtranscale_reg_value = 0x55ADDA3A; 1990 break; 1991 default: 1992 return 0; 1993 } 1994 break; 1995 case DP_TRAIN_PRE_EMPHASIS_9_5: 1996 preemph_reg_value = 0x0006000; 1997 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 1998 case DP_TRAIN_VOLTAGE_SWING_400: 1999 demph_reg_value = 0x1B405555; 2000 uniqtranscale_reg_value = 0x55ADDA3A; 2001 break; 2002 default: 2003 return 0; 2004 } 2005 break; 2006 default: 2007 return 0; 2008 } 2009 2010 mutex_lock(&dev_priv->dpio_lock); 2011 vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0x00000000); 2012 vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL4(port), demph_reg_value); 2013 vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL2(port), 2014 uniqtranscale_reg_value); 2015 vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL3(port), 0x0C782040); 2016 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER0(port), 0x00030000); 2017 vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port), preemph_reg_value); 2018 vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0x80000000); 2019 mutex_unlock(&dev_priv->dpio_lock); 2020 2021 return 0; 2022} 2023 2024static void 2025intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 2026{ 2027 uint8_t v = 0; 2028 uint8_t p = 0; 2029 int lane; 2030 uint8_t voltage_max; 2031 uint8_t preemph_max; 2032 2033 for (lane = 0; lane < intel_dp->lane_count; lane++) { 2034 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane); 2035 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); 2036 2037 if (this_v > v) 2038 v = this_v; 2039 if (this_p > p) 2040 p = this_p; 2041 } 2042 2043 voltage_max = intel_dp_voltage_max(intel_dp); 2044 if (v >= voltage_max) 2045 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; 2046 2047 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v); 2048 if (p >= preemph_max) 2049 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; 2050 2051 for (lane = 0; lane < 4; lane++) 2052 intel_dp->train_set[lane] = v | p; 2053} 2054 2055static uint32_t 2056intel_gen4_signal_levels(uint8_t train_set) 2057{ 2058 uint32_t signal_levels = 0; 2059 2060 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 2061 case DP_TRAIN_VOLTAGE_SWING_400: 2062 default: 2063 signal_levels |= DP_VOLTAGE_0_4; 2064 break; 2065 case DP_TRAIN_VOLTAGE_SWING_600: 2066 signal_levels |= DP_VOLTAGE_0_6; 2067 break; 2068 case DP_TRAIN_VOLTAGE_SWING_800: 2069 signal_levels |= DP_VOLTAGE_0_8; 2070 break; 2071 case DP_TRAIN_VOLTAGE_SWING_1200: 2072 signal_levels |= DP_VOLTAGE_1_2; 2073 break; 2074 } 2075 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 2076 case DP_TRAIN_PRE_EMPHASIS_0: 2077 default: 2078 signal_levels |= DP_PRE_EMPHASIS_0; 2079 break; 2080 case DP_TRAIN_PRE_EMPHASIS_3_5: 2081 signal_levels |= DP_PRE_EMPHASIS_3_5; 2082 break; 2083 case DP_TRAIN_PRE_EMPHASIS_6: 2084 signal_levels |= DP_PRE_EMPHASIS_6; 2085 break; 2086 case DP_TRAIN_PRE_EMPHASIS_9_5: 2087 signal_levels |= DP_PRE_EMPHASIS_9_5; 2088 break; 2089 } 2090 return signal_levels; 2091} 2092 2093/* Gen6's DP voltage swing and pre-emphasis control */ 2094static uint32_t 2095intel_gen6_edp_signal_levels(uint8_t train_set) 2096{ 2097 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 2098 DP_TRAIN_PRE_EMPHASIS_MASK); 2099 switch (signal_levels) { 2100 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 2101 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 2102 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 2103 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 2104 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B; 2105 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 2106 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: 2107 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B; 2108 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 2109 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 2110 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B; 2111 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 2112 case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0: 2113 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B; 2114 default: 2115 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 2116 "0x%x\n", signal_levels); 2117 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 2118 } 2119} 2120 2121/* Gen7's DP voltage swing and pre-emphasis control */ 2122static uint32_t 2123intel_gen7_edp_signal_levels(uint8_t train_set) 2124{ 2125 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 2126 DP_TRAIN_PRE_EMPHASIS_MASK); 2127 switch (signal_levels) { 2128 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 2129 return EDP_LINK_TRAIN_400MV_0DB_IVB; 2130 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 2131 return EDP_LINK_TRAIN_400MV_3_5DB_IVB; 2132 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 2133 return EDP_LINK_TRAIN_400MV_6DB_IVB; 2134 2135 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 2136 return EDP_LINK_TRAIN_600MV_0DB_IVB; 2137 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 2138 return EDP_LINK_TRAIN_600MV_3_5DB_IVB; 2139 2140 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 2141 return EDP_LINK_TRAIN_800MV_0DB_IVB; 2142 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 2143 return EDP_LINK_TRAIN_800MV_3_5DB_IVB; 2144 2145 default: 2146 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 2147 "0x%x\n", signal_levels); 2148 return EDP_LINK_TRAIN_500MV_0DB_IVB; 2149 } 2150} 2151 2152/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */ 2153static uint32_t 2154intel_hsw_signal_levels(uint8_t train_set) 2155{ 2156 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 2157 DP_TRAIN_PRE_EMPHASIS_MASK); 2158 switch (signal_levels) { 2159 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 2160 return DDI_BUF_EMP_400MV_0DB_HSW; 2161 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 2162 return DDI_BUF_EMP_400MV_3_5DB_HSW; 2163 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 2164 return DDI_BUF_EMP_400MV_6DB_HSW; 2165 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5: 2166 return DDI_BUF_EMP_400MV_9_5DB_HSW; 2167 2168 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 2169 return DDI_BUF_EMP_600MV_0DB_HSW; 2170 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 2171 return DDI_BUF_EMP_600MV_3_5DB_HSW; 2172 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: 2173 return DDI_BUF_EMP_600MV_6DB_HSW; 2174 2175 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 2176 return DDI_BUF_EMP_800MV_0DB_HSW; 2177 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 2178 return DDI_BUF_EMP_800MV_3_5DB_HSW; 2179 default: 2180 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 2181 "0x%x\n", signal_levels); 2182 return DDI_BUF_EMP_400MV_0DB_HSW; 2183 } 2184} 2185 2186/* Properly updates "DP" with the correct signal levels. */ 2187static void 2188intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP) 2189{ 2190 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2191 enum port port = intel_dig_port->port; 2192 struct drm_device *dev = intel_dig_port->base.base.dev; 2193 uint32_t signal_levels, mask; 2194 uint8_t train_set = intel_dp->train_set[0]; 2195 2196 if (HAS_DDI(dev)) { 2197 signal_levels = intel_hsw_signal_levels(train_set); 2198 mask = DDI_BUF_EMP_MASK; 2199 } else if (IS_VALLEYVIEW(dev)) { 2200 signal_levels = intel_vlv_signal_levels(intel_dp); 2201 mask = 0; 2202 } else if (IS_GEN7(dev) && port == PORT_A) { 2203 signal_levels = intel_gen7_edp_signal_levels(train_set); 2204 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB; 2205 } else if (IS_GEN6(dev) && port == PORT_A) { 2206 signal_levels = intel_gen6_edp_signal_levels(train_set); 2207 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB; 2208 } else { 2209 signal_levels = intel_gen4_signal_levels(train_set); 2210 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK; 2211 } 2212 2213 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels); 2214 2215 *DP = (*DP & ~mask) | signal_levels; 2216} 2217 2218static bool 2219intel_dp_set_link_train(struct intel_dp *intel_dp, 2220 uint32_t dp_reg_value, 2221 uint8_t dp_train_pat) 2222{ 2223 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2224 struct drm_device *dev = intel_dig_port->base.base.dev; 2225 struct drm_i915_private *dev_priv = dev->dev_private; 2226 enum port port = intel_dig_port->port; 2227 int ret; 2228 2229 if (HAS_DDI(dev)) { 2230 uint32_t temp = I915_READ(DP_TP_CTL(port)); 2231 2232 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) 2233 temp |= DP_TP_CTL_SCRAMBLE_DISABLE; 2234 else 2235 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE; 2236 2237 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; 2238 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 2239 case DP_TRAINING_PATTERN_DISABLE: 2240 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; 2241 2242 break; 2243 case DP_TRAINING_PATTERN_1: 2244 temp |= DP_TP_CTL_LINK_TRAIN_PAT1; 2245 break; 2246 case DP_TRAINING_PATTERN_2: 2247 temp |= DP_TP_CTL_LINK_TRAIN_PAT2; 2248 break; 2249 case DP_TRAINING_PATTERN_3: 2250 temp |= DP_TP_CTL_LINK_TRAIN_PAT3; 2251 break; 2252 } 2253 I915_WRITE(DP_TP_CTL(port), temp); 2254 2255 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) { 2256 dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT; 2257 2258 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 2259 case DP_TRAINING_PATTERN_DISABLE: 2260 dp_reg_value |= DP_LINK_TRAIN_OFF_CPT; 2261 break; 2262 case DP_TRAINING_PATTERN_1: 2263 dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT; 2264 break; 2265 case DP_TRAINING_PATTERN_2: 2266 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT; 2267 break; 2268 case DP_TRAINING_PATTERN_3: 2269 DRM_ERROR("DP training pattern 3 not supported\n"); 2270 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT; 2271 break; 2272 } 2273 2274 } else { 2275 dp_reg_value &= ~DP_LINK_TRAIN_MASK; 2276 2277 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 2278 case DP_TRAINING_PATTERN_DISABLE: 2279 dp_reg_value |= DP_LINK_TRAIN_OFF; 2280 break; 2281 case DP_TRAINING_PATTERN_1: 2282 dp_reg_value |= DP_LINK_TRAIN_PAT_1; 2283 break; 2284 case DP_TRAINING_PATTERN_2: 2285 dp_reg_value |= DP_LINK_TRAIN_PAT_2; 2286 break; 2287 case DP_TRAINING_PATTERN_3: 2288 DRM_ERROR("DP training pattern 3 not supported\n"); 2289 dp_reg_value |= DP_LINK_TRAIN_PAT_2; 2290 break; 2291 } 2292 } 2293 2294 I915_WRITE(intel_dp->output_reg, dp_reg_value); 2295 POSTING_READ(intel_dp->output_reg); 2296 2297 intel_dp_aux_native_write_1(intel_dp, 2298 DP_TRAINING_PATTERN_SET, 2299 dp_train_pat); 2300 2301 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) != 2302 DP_TRAINING_PATTERN_DISABLE) { 2303 ret = intel_dp_aux_native_write(intel_dp, 2304 DP_TRAINING_LANE0_SET, 2305 intel_dp->train_set, 2306 intel_dp->lane_count); 2307 if (ret != intel_dp->lane_count) 2308 return false; 2309 } 2310 2311 return true; 2312} 2313 2314static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) 2315{ 2316 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2317 struct drm_device *dev = intel_dig_port->base.base.dev; 2318 struct drm_i915_private *dev_priv = dev->dev_private; 2319 enum port port = intel_dig_port->port; 2320 uint32_t val; 2321 2322 if (!HAS_DDI(dev)) 2323 return; 2324 2325 val = I915_READ(DP_TP_CTL(port)); 2326 val &= ~DP_TP_CTL_LINK_TRAIN_MASK; 2327 val |= DP_TP_CTL_LINK_TRAIN_IDLE; 2328 I915_WRITE(DP_TP_CTL(port), val); 2329 2330 /* 2331 * On PORT_A we can have only eDP in SST mode. There the only reason 2332 * we need to set idle transmission mode is to work around a HW issue 2333 * where we enable the pipe while not in idle link-training mode. 2334 * In this case there is requirement to wait for a minimum number of 2335 * idle patterns to be sent. 2336 */ 2337 if (port == PORT_A) 2338 return; 2339 2340 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE), 2341 1)) 2342 DRM_ERROR("Timed out waiting for DP idle patterns\n"); 2343} 2344 2345/* Enable corresponding port and start training pattern 1 */ 2346void 2347intel_dp_start_link_train(struct intel_dp *intel_dp) 2348{ 2349 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base; 2350 struct drm_device *dev = encoder->dev; 2351 int i; 2352 uint8_t voltage; 2353 int voltage_tries, loop_tries; 2354 uint32_t DP = intel_dp->DP; 2355 2356 if (HAS_DDI(dev)) 2357 intel_ddi_prepare_link_retrain(encoder); 2358 2359 /* Write the link configuration data */ 2360 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, 2361 intel_dp->link_configuration, 2362 DP_LINK_CONFIGURATION_SIZE); 2363 2364 DP |= DP_PORT_EN; 2365 2366 memset(intel_dp->train_set, 0, 4); 2367 voltage = 0xff; 2368 voltage_tries = 0; 2369 loop_tries = 0; 2370 for (;;) { 2371 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 2372 uint8_t link_status[DP_LINK_STATUS_SIZE]; 2373 2374 intel_dp_set_signal_levels(intel_dp, &DP); 2375 2376 /* Set training pattern 1 */ 2377 if (!intel_dp_set_link_train(intel_dp, DP, 2378 DP_TRAINING_PATTERN_1 | 2379 DP_LINK_SCRAMBLING_DISABLE)) 2380 break; 2381 2382 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd); 2383 if (!intel_dp_get_link_status(intel_dp, link_status)) { 2384 DRM_ERROR("failed to get link status\n"); 2385 break; 2386 } 2387 2388 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { 2389 DRM_DEBUG_KMS("clock recovery OK\n"); 2390 break; 2391 } 2392 2393 /* Check to see if we've tried the max voltage */ 2394 for (i = 0; i < intel_dp->lane_count; i++) 2395 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) 2396 break; 2397 if (i == intel_dp->lane_count) { 2398 ++loop_tries; 2399 if (loop_tries == 5) { 2400 DRM_DEBUG_KMS("too many full retries, give up\n"); 2401 break; 2402 } 2403 memset(intel_dp->train_set, 0, 4); 2404 voltage_tries = 0; 2405 continue; 2406 } 2407 2408 /* Check to see if we've tried the same voltage 5 times */ 2409 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { 2410 ++voltage_tries; 2411 if (voltage_tries == 5) { 2412 DRM_DEBUG_KMS("too many voltage retries, give up\n"); 2413 break; 2414 } 2415 } else 2416 voltage_tries = 0; 2417 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; 2418 2419 /* Compute new intel_dp->train_set as requested by target */ 2420 intel_get_adjust_train(intel_dp, link_status); 2421 } 2422 2423 intel_dp->DP = DP; 2424} 2425 2426void 2427intel_dp_complete_link_train(struct intel_dp *intel_dp) 2428{ 2429 bool channel_eq = false; 2430 int tries, cr_tries; 2431 uint32_t DP = intel_dp->DP; 2432 2433 /* channel equalization */ 2434 tries = 0; 2435 cr_tries = 0; 2436 channel_eq = false; 2437 for (;;) { 2438 uint8_t link_status[DP_LINK_STATUS_SIZE]; 2439 2440 if (cr_tries > 5) { 2441 DRM_ERROR("failed to train DP, aborting\n"); 2442 intel_dp_link_down(intel_dp); 2443 break; 2444 } 2445 2446 intel_dp_set_signal_levels(intel_dp, &DP); 2447 2448 /* channel eq pattern */ 2449 if (!intel_dp_set_link_train(intel_dp, DP, 2450 DP_TRAINING_PATTERN_2 | 2451 DP_LINK_SCRAMBLING_DISABLE)) 2452 break; 2453 2454 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd); 2455 if (!intel_dp_get_link_status(intel_dp, link_status)) 2456 break; 2457 2458 /* Make sure clock is still ok */ 2459 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { 2460 intel_dp_start_link_train(intel_dp); 2461 cr_tries++; 2462 continue; 2463 } 2464 2465 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { 2466 channel_eq = true; 2467 break; 2468 } 2469 2470 /* Try 5 times, then try clock recovery if that fails */ 2471 if (tries > 5) { 2472 intel_dp_link_down(intel_dp); 2473 intel_dp_start_link_train(intel_dp); 2474 tries = 0; 2475 cr_tries++; 2476 continue; 2477 } 2478 2479 /* Compute new intel_dp->train_set as requested by target */ 2480 intel_get_adjust_train(intel_dp, link_status); 2481 ++tries; 2482 } 2483 2484 intel_dp_set_idle_link_train(intel_dp); 2485 2486 intel_dp->DP = DP; 2487 2488 if (channel_eq) 2489 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n"); 2490 2491} 2492 2493void intel_dp_stop_link_train(struct intel_dp *intel_dp) 2494{ 2495 intel_dp_set_link_train(intel_dp, intel_dp->DP, 2496 DP_TRAINING_PATTERN_DISABLE); 2497} 2498 2499static void 2500intel_dp_link_down(struct intel_dp *intel_dp) 2501{ 2502 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2503 enum port port = intel_dig_port->port; 2504 struct drm_device *dev = intel_dig_port->base.base.dev; 2505 struct drm_i915_private *dev_priv = dev->dev_private; 2506 struct intel_crtc *intel_crtc = 2507 to_intel_crtc(intel_dig_port->base.base.crtc); 2508 uint32_t DP = intel_dp->DP; 2509 2510 /* 2511 * DDI code has a strict mode set sequence and we should try to respect 2512 * it, otherwise we might hang the machine in many different ways. So we 2513 * really should be disabling the port only on a complete crtc_disable 2514 * sequence. This function is just called under two conditions on DDI 2515 * code: 2516 * - Link train failed while doing crtc_enable, and on this case we 2517 * really should respect the mode set sequence and wait for a 2518 * crtc_disable. 2519 * - Someone turned the monitor off and intel_dp_check_link_status 2520 * called us. We don't need to disable the whole port on this case, so 2521 * when someone turns the monitor on again, 2522 * intel_ddi_prepare_link_retrain will take care of redoing the link 2523 * train. 2524 */ 2525 if (HAS_DDI(dev)) 2526 return; 2527 2528 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)) 2529 return; 2530 2531 DRM_DEBUG_KMS("\n"); 2532 2533 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) { 2534 DP &= ~DP_LINK_TRAIN_MASK_CPT; 2535 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); 2536 } else { 2537 DP &= ~DP_LINK_TRAIN_MASK; 2538 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); 2539 } 2540 POSTING_READ(intel_dp->output_reg); 2541 2542 /* We don't really know why we're doing this */ 2543 intel_wait_for_vblank(dev, intel_crtc->pipe); 2544 2545 if (HAS_PCH_IBX(dev) && 2546 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { 2547 struct drm_crtc *crtc = intel_dig_port->base.base.crtc; 2548 2549 /* Hardware workaround: leaving our transcoder select 2550 * set to transcoder B while it's off will prevent the 2551 * corresponding HDMI output on transcoder A. 2552 * 2553 * Combine this with another hardware workaround: 2554 * transcoder select bit can only be cleared while the 2555 * port is enabled. 2556 */ 2557 DP &= ~DP_PIPEB_SELECT; 2558 I915_WRITE(intel_dp->output_reg, DP); 2559 2560 /* Changes to enable or select take place the vblank 2561 * after being written. 2562 */ 2563 if (WARN_ON(crtc == NULL)) { 2564 /* We should never try to disable a port without a crtc 2565 * attached. For paranoia keep the code around for a 2566 * bit. */ 2567 POSTING_READ(intel_dp->output_reg); 2568 msleep(50); 2569 } else 2570 intel_wait_for_vblank(dev, intel_crtc->pipe); 2571 } 2572 2573 DP &= ~DP_AUDIO_OUTPUT_ENABLE; 2574 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); 2575 POSTING_READ(intel_dp->output_reg); 2576 msleep(intel_dp->panel_power_down_delay); 2577} 2578 2579static bool 2580intel_dp_get_dpcd(struct intel_dp *intel_dp) 2581{ 2582 char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3]; 2583 2584 if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd, 2585 sizeof(intel_dp->dpcd)) == 0) 2586 return false; /* aux transfer failed */ 2587 2588 hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd), 2589 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false); 2590 DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump); 2591 2592 if (intel_dp->dpcd[DP_DPCD_REV] == 0) 2593 return false; /* DPCD not present */ 2594 2595 /* Check if the panel supports PSR */ 2596 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd)); 2597 intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT, 2598 intel_dp->psr_dpcd, 2599 sizeof(intel_dp->psr_dpcd)); 2600 if (is_edp_psr(intel_dp)) 2601 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n"); 2602 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 2603 DP_DWN_STRM_PORT_PRESENT)) 2604 return true; /* native DP sink */ 2605 2606 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10) 2607 return true; /* no per-port downstream info */ 2608 2609 if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0, 2610 intel_dp->downstream_ports, 2611 DP_MAX_DOWNSTREAM_PORTS) == 0) 2612 return false; /* downstream port status fetch failed */ 2613 2614 return true; 2615} 2616 2617static void 2618intel_dp_probe_oui(struct intel_dp *intel_dp) 2619{ 2620 u8 buf[3]; 2621 2622 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) 2623 return; 2624 2625 ironlake_edp_panel_vdd_on(intel_dp); 2626 2627 if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3)) 2628 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", 2629 buf[0], buf[1], buf[2]); 2630 2631 if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3)) 2632 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", 2633 buf[0], buf[1], buf[2]); 2634 2635 ironlake_edp_panel_vdd_off(intel_dp, false); 2636} 2637 2638static bool 2639intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector) 2640{ 2641 int ret; 2642 2643 ret = intel_dp_aux_native_read_retry(intel_dp, 2644 DP_DEVICE_SERVICE_IRQ_VECTOR, 2645 sink_irq_vector, 1); 2646 if (!ret) 2647 return false; 2648 2649 return true; 2650} 2651 2652static void 2653intel_dp_handle_test_request(struct intel_dp *intel_dp) 2654{ 2655 /* NAK by default */ 2656 intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK); 2657} 2658 2659/* 2660 * According to DP spec 2661 * 5.1.2: 2662 * 1. Read DPCD 2663 * 2. Configure link according to Receiver Capabilities 2664 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 2665 * 4. Check link status on receipt of hot-plug interrupt 2666 */ 2667 2668void 2669intel_dp_check_link_status(struct intel_dp *intel_dp) 2670{ 2671 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; 2672 u8 sink_irq_vector; 2673 u8 link_status[DP_LINK_STATUS_SIZE]; 2674 2675 if (!intel_encoder->connectors_active) 2676 return; 2677 2678 if (WARN_ON(!intel_encoder->base.crtc)) 2679 return; 2680 2681 /* Try to read receiver status if the link appears to be up */ 2682 if (!intel_dp_get_link_status(intel_dp, link_status)) { 2683 intel_dp_link_down(intel_dp); 2684 return; 2685 } 2686 2687 /* Now read the DPCD to see if it's actually running */ 2688 if (!intel_dp_get_dpcd(intel_dp)) { 2689 intel_dp_link_down(intel_dp); 2690 return; 2691 } 2692 2693 /* Try to read the source of the interrupt */ 2694 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 2695 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) { 2696 /* Clear interrupt source */ 2697 intel_dp_aux_native_write_1(intel_dp, 2698 DP_DEVICE_SERVICE_IRQ_VECTOR, 2699 sink_irq_vector); 2700 2701 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST) 2702 intel_dp_handle_test_request(intel_dp); 2703 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ)) 2704 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); 2705 } 2706 2707 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { 2708 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", 2709 drm_get_encoder_name(&intel_encoder->base)); 2710 intel_dp_start_link_train(intel_dp); 2711 intel_dp_complete_link_train(intel_dp); 2712 intel_dp_stop_link_train(intel_dp); 2713 } 2714} 2715 2716/* XXX this is probably wrong for multiple downstream ports */ 2717static enum drm_connector_status 2718intel_dp_detect_dpcd(struct intel_dp *intel_dp) 2719{ 2720 uint8_t *dpcd = intel_dp->dpcd; 2721 bool hpd; 2722 uint8_t type; 2723 2724 if (!intel_dp_get_dpcd(intel_dp)) 2725 return connector_status_disconnected; 2726 2727 /* if there's no downstream port, we're done */ 2728 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT)) 2729 return connector_status_connected; 2730 2731 /* If we're HPD-aware, SINK_COUNT changes dynamically */ 2732 hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD); 2733 if (hpd) { 2734 uint8_t reg; 2735 if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT, 2736 ®, 1)) 2737 return connector_status_unknown; 2738 return DP_GET_SINK_COUNT(reg) ? connector_status_connected 2739 : connector_status_disconnected; 2740 } 2741 2742 /* If no HPD, poke DDC gently */ 2743 if (drm_probe_ddc(&intel_dp->adapter)) 2744 return connector_status_connected; 2745 2746 /* Well we tried, say unknown for unreliable port types */ 2747 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 2748 if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID) 2749 return connector_status_unknown; 2750 2751 /* Anything else is out of spec, warn and ignore */ 2752 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n"); 2753 return connector_status_disconnected; 2754} 2755 2756static enum drm_connector_status 2757ironlake_dp_detect(struct intel_dp *intel_dp) 2758{ 2759 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2760 struct drm_i915_private *dev_priv = dev->dev_private; 2761 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2762 enum drm_connector_status status; 2763 2764 /* Can't disconnect eDP, but you can close the lid... */ 2765 if (is_edp(intel_dp)) { 2766 status = intel_panel_detect(dev); 2767 if (status == connector_status_unknown) 2768 status = connector_status_connected; 2769 return status; 2770 } 2771 2772 if (!ibx_digital_port_connected(dev_priv, intel_dig_port)) 2773 return connector_status_disconnected; 2774 2775 return intel_dp_detect_dpcd(intel_dp); 2776} 2777 2778static enum drm_connector_status 2779g4x_dp_detect(struct intel_dp *intel_dp) 2780{ 2781 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2782 struct drm_i915_private *dev_priv = dev->dev_private; 2783 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2784 uint32_t bit; 2785 2786 /* Can't disconnect eDP, but you can close the lid... */ 2787 if (is_edp(intel_dp)) { 2788 enum drm_connector_status status; 2789 2790 status = intel_panel_detect(dev); 2791 if (status == connector_status_unknown) 2792 status = connector_status_connected; 2793 return status; 2794 } 2795 2796 switch (intel_dig_port->port) { 2797 case PORT_B: 2798 bit = PORTB_HOTPLUG_LIVE_STATUS; 2799 break; 2800 case PORT_C: 2801 bit = PORTC_HOTPLUG_LIVE_STATUS; 2802 break; 2803 case PORT_D: 2804 bit = PORTD_HOTPLUG_LIVE_STATUS; 2805 break; 2806 default: 2807 return connector_status_unknown; 2808 } 2809 2810 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0) 2811 return connector_status_disconnected; 2812 2813 return intel_dp_detect_dpcd(intel_dp); 2814} 2815 2816static struct edid * 2817intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) 2818{ 2819 struct intel_connector *intel_connector = to_intel_connector(connector); 2820 2821 /* use cached edid if we have one */ 2822 if (intel_connector->edid) { 2823 struct edid *edid; 2824 int size; 2825 2826 /* invalid edid */ 2827 if (IS_ERR(intel_connector->edid)) 2828 return NULL; 2829 2830 size = (intel_connector->edid->extensions + 1) * EDID_LENGTH; 2831 edid = kmemdup(intel_connector->edid, size, GFP_KERNEL); 2832 if (!edid) 2833 return NULL; 2834 2835 return edid; 2836 } 2837 2838 return drm_get_edid(connector, adapter); 2839} 2840 2841static int 2842intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter) 2843{ 2844 struct intel_connector *intel_connector = to_intel_connector(connector); 2845 2846 /* use cached edid if we have one */ 2847 if (intel_connector->edid) { 2848 /* invalid edid */ 2849 if (IS_ERR(intel_connector->edid)) 2850 return 0; 2851 2852 return intel_connector_update_modes(connector, 2853 intel_connector->edid); 2854 } 2855 2856 return intel_ddc_get_modes(connector, adapter); 2857} 2858 2859static enum drm_connector_status 2860intel_dp_detect(struct drm_connector *connector, bool force) 2861{ 2862 struct intel_dp *intel_dp = intel_attached_dp(connector); 2863 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2864 struct intel_encoder *intel_encoder = &intel_dig_port->base; 2865 struct drm_device *dev = connector->dev; 2866 enum drm_connector_status status; 2867 struct edid *edid = NULL; 2868 2869 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 2870 connector->base.id, drm_get_connector_name(connector)); 2871 2872 intel_dp->has_audio = false; 2873 2874 if (HAS_PCH_SPLIT(dev)) 2875 status = ironlake_dp_detect(intel_dp); 2876 else 2877 status = g4x_dp_detect(intel_dp); 2878 2879 if (status != connector_status_connected) 2880 return status; 2881 2882 intel_dp_probe_oui(intel_dp); 2883 2884 if (intel_dp->force_audio != HDMI_AUDIO_AUTO) { 2885 intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON); 2886 } else { 2887 edid = intel_dp_get_edid(connector, &intel_dp->adapter); 2888 if (edid) { 2889 intel_dp->has_audio = drm_detect_monitor_audio(edid); 2890 kfree(edid); 2891 } 2892 } 2893 2894 if (intel_encoder->type != INTEL_OUTPUT_EDP) 2895 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; 2896 return connector_status_connected; 2897} 2898 2899static int intel_dp_get_modes(struct drm_connector *connector) 2900{ 2901 struct intel_dp *intel_dp = intel_attached_dp(connector); 2902 struct intel_connector *intel_connector = to_intel_connector(connector); 2903 struct drm_device *dev = connector->dev; 2904 int ret; 2905 2906 /* We should parse the EDID data and find out if it has an audio sink 2907 */ 2908 2909 ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter); 2910 if (ret) 2911 return ret; 2912 2913 /* if eDP has no EDID, fall back to fixed mode */ 2914 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 2915 struct drm_display_mode *mode; 2916 mode = drm_mode_duplicate(dev, 2917 intel_connector->panel.fixed_mode); 2918 if (mode) { 2919 drm_mode_probed_add(connector, mode); 2920 return 1; 2921 } 2922 } 2923 return 0; 2924} 2925 2926static bool 2927intel_dp_detect_audio(struct drm_connector *connector) 2928{ 2929 struct intel_dp *intel_dp = intel_attached_dp(connector); 2930 struct edid *edid; 2931 bool has_audio = false; 2932 2933 edid = intel_dp_get_edid(connector, &intel_dp->adapter); 2934 if (edid) { 2935 has_audio = drm_detect_monitor_audio(edid); 2936 kfree(edid); 2937 } 2938 2939 return has_audio; 2940} 2941 2942static int 2943intel_dp_set_property(struct drm_connector *connector, 2944 struct drm_property *property, 2945 uint64_t val) 2946{ 2947 struct drm_i915_private *dev_priv = connector->dev->dev_private; 2948 struct intel_connector *intel_connector = to_intel_connector(connector); 2949 struct intel_encoder *intel_encoder = intel_attached_encoder(connector); 2950 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 2951 int ret; 2952 2953 ret = drm_object_property_set_value(&connector->base, property, val); 2954 if (ret) 2955 return ret; 2956 2957 if (property == dev_priv->force_audio_property) { 2958 int i = val; 2959 bool has_audio; 2960 2961 if (i == intel_dp->force_audio) 2962 return 0; 2963 2964 intel_dp->force_audio = i; 2965 2966 if (i == HDMI_AUDIO_AUTO) 2967 has_audio = intel_dp_detect_audio(connector); 2968 else 2969 has_audio = (i == HDMI_AUDIO_ON); 2970 2971 if (has_audio == intel_dp->has_audio) 2972 return 0; 2973 2974 intel_dp->has_audio = has_audio; 2975 goto done; 2976 } 2977 2978 if (property == dev_priv->broadcast_rgb_property) { 2979 bool old_auto = intel_dp->color_range_auto; 2980 uint32_t old_range = intel_dp->color_range; 2981 2982 switch (val) { 2983 case INTEL_BROADCAST_RGB_AUTO: 2984 intel_dp->color_range_auto = true; 2985 break; 2986 case INTEL_BROADCAST_RGB_FULL: 2987 intel_dp->color_range_auto = false; 2988 intel_dp->color_range = 0; 2989 break; 2990 case INTEL_BROADCAST_RGB_LIMITED: 2991 intel_dp->color_range_auto = false; 2992 intel_dp->color_range = DP_COLOR_RANGE_16_235; 2993 break; 2994 default: 2995 return -EINVAL; 2996 } 2997 2998 if (old_auto == intel_dp->color_range_auto && 2999 old_range == intel_dp->color_range) 3000 return 0; 3001 3002 goto done; 3003 } 3004 3005 if (is_edp(intel_dp) && 3006 property == connector->dev->mode_config.scaling_mode_property) { 3007 if (val == DRM_MODE_SCALE_NONE) { 3008 DRM_DEBUG_KMS("no scaling not supported\n"); 3009 return -EINVAL; 3010 } 3011 3012 if (intel_connector->panel.fitting_mode == val) { 3013 /* the eDP scaling property is not changed */ 3014 return 0; 3015 } 3016 intel_connector->panel.fitting_mode = val; 3017 3018 goto done; 3019 } 3020 3021 return -EINVAL; 3022 3023done: 3024 if (intel_encoder->base.crtc) 3025 intel_crtc_restore_mode(intel_encoder->base.crtc); 3026 3027 return 0; 3028} 3029 3030static void 3031intel_dp_connector_destroy(struct drm_connector *connector) 3032{ 3033 struct intel_connector *intel_connector = to_intel_connector(connector); 3034 3035 if (!IS_ERR_OR_NULL(intel_connector->edid)) 3036 kfree(intel_connector->edid); 3037 3038 /* Can't call is_edp() since the encoder may have been destroyed 3039 * already. */ 3040 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) 3041 intel_panel_fini(&intel_connector->panel); 3042 3043 drm_sysfs_connector_remove(connector); 3044 drm_connector_cleanup(connector); 3045 kfree(connector); 3046} 3047 3048void intel_dp_encoder_destroy(struct drm_encoder *encoder) 3049{ 3050 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 3051 struct intel_dp *intel_dp = &intel_dig_port->dp; 3052 struct drm_device *dev = intel_dp_to_dev(intel_dp); 3053 3054 i2c_del_adapter(&intel_dp->adapter); 3055 drm_encoder_cleanup(encoder); 3056 if (is_edp(intel_dp)) { 3057 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 3058 mutex_lock(&dev->mode_config.mutex); 3059 ironlake_panel_vdd_off_sync(intel_dp); 3060 mutex_unlock(&dev->mode_config.mutex); 3061 } 3062 kfree(intel_dig_port); 3063} 3064 3065static const struct drm_connector_funcs intel_dp_connector_funcs = { 3066 .dpms = intel_connector_dpms, 3067 .detect = intel_dp_detect, 3068 .fill_modes = drm_helper_probe_single_connector_modes, 3069 .set_property = intel_dp_set_property, 3070 .destroy = intel_dp_connector_destroy, 3071}; 3072 3073static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 3074 .get_modes = intel_dp_get_modes, 3075 .mode_valid = intel_dp_mode_valid, 3076 .best_encoder = intel_best_encoder, 3077}; 3078 3079static const struct drm_encoder_funcs intel_dp_enc_funcs = { 3080 .destroy = intel_dp_encoder_destroy, 3081}; 3082 3083static void 3084intel_dp_hot_plug(struct intel_encoder *intel_encoder) 3085{ 3086 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 3087 3088 intel_dp_check_link_status(intel_dp); 3089} 3090 3091/* Return which DP Port should be selected for Transcoder DP control */ 3092int 3093intel_trans_dp_port_sel(struct drm_crtc *crtc) 3094{ 3095 struct drm_device *dev = crtc->dev; 3096 struct intel_encoder *intel_encoder; 3097 struct intel_dp *intel_dp; 3098 3099 for_each_encoder_on_crtc(dev, crtc, intel_encoder) { 3100 intel_dp = enc_to_intel_dp(&intel_encoder->base); 3101 3102 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || 3103 intel_encoder->type == INTEL_OUTPUT_EDP) 3104 return intel_dp->output_reg; 3105 } 3106 3107 return -1; 3108} 3109 3110/* check the VBT to see whether the eDP is on DP-D port */ 3111bool intel_dpd_is_edp(struct drm_device *dev) 3112{ 3113 struct drm_i915_private *dev_priv = dev->dev_private; 3114 struct child_device_config *p_child; 3115 int i; 3116 3117 if (!dev_priv->vbt.child_dev_num) 3118 return false; 3119 3120 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { 3121 p_child = dev_priv->vbt.child_dev + i; 3122 3123 if (p_child->dvo_port == PORT_IDPD && 3124 p_child->device_type == DEVICE_TYPE_eDP) 3125 return true; 3126 } 3127 return false; 3128} 3129 3130static void 3131intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) 3132{ 3133 struct intel_connector *intel_connector = to_intel_connector(connector); 3134 3135 intel_attach_force_audio_property(connector); 3136 intel_attach_broadcast_rgb_property(connector); 3137 intel_dp->color_range_auto = true; 3138 3139 if (is_edp(intel_dp)) { 3140 drm_mode_create_scaling_mode_property(connector->dev); 3141 drm_object_attach_property( 3142 &connector->base, 3143 connector->dev->mode_config.scaling_mode_property, 3144 DRM_MODE_SCALE_ASPECT); 3145 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT; 3146 } 3147} 3148 3149static void 3150intel_dp_init_panel_power_sequencer(struct drm_device *dev, 3151 struct intel_dp *intel_dp, 3152 struct edp_power_seq *out) 3153{ 3154 struct drm_i915_private *dev_priv = dev->dev_private; 3155 struct edp_power_seq cur, vbt, spec, final; 3156 u32 pp_on, pp_off, pp_div, pp; 3157 int pp_control_reg, pp_on_reg, pp_off_reg, pp_div_reg; 3158 3159 if (HAS_PCH_SPLIT(dev)) { 3160 pp_control_reg = PCH_PP_CONTROL; 3161 pp_on_reg = PCH_PP_ON_DELAYS; 3162 pp_off_reg = PCH_PP_OFF_DELAYS; 3163 pp_div_reg = PCH_PP_DIVISOR; 3164 } else { 3165 pp_control_reg = PIPEA_PP_CONTROL; 3166 pp_on_reg = PIPEA_PP_ON_DELAYS; 3167 pp_off_reg = PIPEA_PP_OFF_DELAYS; 3168 pp_div_reg = PIPEA_PP_DIVISOR; 3169 } 3170 3171 /* Workaround: Need to write PP_CONTROL with the unlock key as 3172 * the very first thing. */ 3173 pp = ironlake_get_pp_control(intel_dp); 3174 I915_WRITE(pp_control_reg, pp); 3175 3176 pp_on = I915_READ(pp_on_reg); 3177 pp_off = I915_READ(pp_off_reg); 3178 pp_div = I915_READ(pp_div_reg); 3179 3180 /* Pull timing values out of registers */ 3181 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> 3182 PANEL_POWER_UP_DELAY_SHIFT; 3183 3184 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> 3185 PANEL_LIGHT_ON_DELAY_SHIFT; 3186 3187 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> 3188 PANEL_LIGHT_OFF_DELAY_SHIFT; 3189 3190 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> 3191 PANEL_POWER_DOWN_DELAY_SHIFT; 3192 3193 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> 3194 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000; 3195 3196 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 3197 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12); 3198 3199 vbt = dev_priv->vbt.edp_pps; 3200 3201 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of 3202 * our hw here, which are all in 100usec. */ 3203 spec.t1_t3 = 210 * 10; 3204 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */ 3205 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */ 3206 spec.t10 = 500 * 10; 3207 /* This one is special and actually in units of 100ms, but zero 3208 * based in the hw (so we need to add 100 ms). But the sw vbt 3209 * table multiplies it with 1000 to make it in units of 100usec, 3210 * too. */ 3211 spec.t11_t12 = (510 + 100) * 10; 3212 3213 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 3214 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12); 3215 3216 /* Use the max of the register settings and vbt. If both are 3217 * unset, fall back to the spec limits. */ 3218#define assign_final(field) final.field = (max(cur.field, vbt.field) == 0 ? \ 3219 spec.field : \ 3220 max(cur.field, vbt.field)) 3221 assign_final(t1_t3); 3222 assign_final(t8); 3223 assign_final(t9); 3224 assign_final(t10); 3225 assign_final(t11_t12); 3226#undef assign_final 3227 3228#define get_delay(field) (DIV_ROUND_UP(final.field, 10)) 3229 intel_dp->panel_power_up_delay = get_delay(t1_t3); 3230 intel_dp->backlight_on_delay = get_delay(t8); 3231 intel_dp->backlight_off_delay = get_delay(t9); 3232 intel_dp->panel_power_down_delay = get_delay(t10); 3233 intel_dp->panel_power_cycle_delay = get_delay(t11_t12); 3234#undef get_delay 3235 3236 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n", 3237 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay, 3238 intel_dp->panel_power_cycle_delay); 3239 3240 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", 3241 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); 3242 3243 if (out) 3244 *out = final; 3245} 3246 3247static void 3248intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, 3249 struct intel_dp *intel_dp, 3250 struct edp_power_seq *seq) 3251{ 3252 struct drm_i915_private *dev_priv = dev->dev_private; 3253 u32 pp_on, pp_off, pp_div, port_sel = 0; 3254 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev); 3255 int pp_on_reg, pp_off_reg, pp_div_reg; 3256 3257 if (HAS_PCH_SPLIT(dev)) { 3258 pp_on_reg = PCH_PP_ON_DELAYS; 3259 pp_off_reg = PCH_PP_OFF_DELAYS; 3260 pp_div_reg = PCH_PP_DIVISOR; 3261 } else { 3262 pp_on_reg = PIPEA_PP_ON_DELAYS; 3263 pp_off_reg = PIPEA_PP_OFF_DELAYS; 3264 pp_div_reg = PIPEA_PP_DIVISOR; 3265 } 3266 3267 /* And finally store the new values in the power sequencer. */ 3268 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | 3269 (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT); 3270 pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) | 3271 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT); 3272 /* Compute the divisor for the pp clock, simply match the Bspec 3273 * formula. */ 3274 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT; 3275 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000) 3276 << PANEL_POWER_CYCLE_DELAY_SHIFT); 3277 3278 /* Haswell doesn't have any port selection bits for the panel 3279 * power sequencer any more. */ 3280 if (IS_VALLEYVIEW(dev)) { 3281 port_sel = I915_READ(pp_on_reg) & 0xc0000000; 3282 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { 3283 if (dp_to_dig_port(intel_dp)->port == PORT_A) 3284 port_sel = PANEL_POWER_PORT_DP_A; 3285 else 3286 port_sel = PANEL_POWER_PORT_DP_D; 3287 } 3288 3289 pp_on |= port_sel; 3290 3291 I915_WRITE(pp_on_reg, pp_on); 3292 I915_WRITE(pp_off_reg, pp_off); 3293 I915_WRITE(pp_div_reg, pp_div); 3294 3295 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", 3296 I915_READ(pp_on_reg), 3297 I915_READ(pp_off_reg), 3298 I915_READ(pp_div_reg)); 3299} 3300 3301static bool intel_edp_init_connector(struct intel_dp *intel_dp, 3302 struct intel_connector *intel_connector) 3303{ 3304 struct drm_connector *connector = &intel_connector->base; 3305 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3306 struct drm_device *dev = intel_dig_port->base.base.dev; 3307 struct drm_i915_private *dev_priv = dev->dev_private; 3308 struct drm_display_mode *fixed_mode = NULL; 3309 struct edp_power_seq power_seq = { 0 }; 3310 bool has_dpcd; 3311 struct drm_display_mode *scan; 3312 struct edid *edid; 3313 3314 if (!is_edp(intel_dp)) 3315 return true; 3316 3317 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); 3318 3319 /* Cache DPCD and EDID for edp. */ 3320 ironlake_edp_panel_vdd_on(intel_dp); 3321 has_dpcd = intel_dp_get_dpcd(intel_dp); 3322 ironlake_edp_panel_vdd_off(intel_dp, false); 3323 3324 if (has_dpcd) { 3325 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) 3326 dev_priv->no_aux_handshake = 3327 intel_dp->dpcd[DP_MAX_DOWNSPREAD] & 3328 DP_NO_AUX_HANDSHAKE_LINK_TRAINING; 3329 } else { 3330 /* if this fails, presume the device is a ghost */ 3331 DRM_INFO("failed to retrieve link info, disabling eDP\n"); 3332 return false; 3333 } 3334 3335 /* We now know it's not a ghost, init power sequence regs. */ 3336 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, 3337 &power_seq); 3338 3339 ironlake_edp_panel_vdd_on(intel_dp); 3340 edid = drm_get_edid(connector, &intel_dp->adapter); 3341 if (edid) { 3342 if (drm_add_edid_modes(connector, edid)) { 3343 drm_mode_connector_update_edid_property(connector, 3344 edid); 3345 drm_edid_to_eld(connector, edid); 3346 } else { 3347 kfree(edid); 3348 edid = ERR_PTR(-EINVAL); 3349 } 3350 } else { 3351 edid = ERR_PTR(-ENOENT); 3352 } 3353 intel_connector->edid = edid; 3354 3355 /* prefer fixed mode from EDID if available */ 3356 list_for_each_entry(scan, &connector->probed_modes, head) { 3357 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) { 3358 fixed_mode = drm_mode_duplicate(dev, scan); 3359 break; 3360 } 3361 } 3362 3363 /* fallback to VBT if available for eDP */ 3364 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) { 3365 fixed_mode = drm_mode_duplicate(dev, 3366 dev_priv->vbt.lfp_lvds_vbt_mode); 3367 if (fixed_mode) 3368 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; 3369 } 3370 3371 ironlake_edp_panel_vdd_off(intel_dp, false); 3372 3373 intel_panel_init(&intel_connector->panel, fixed_mode); 3374 intel_panel_setup_backlight(connector); 3375 3376 return true; 3377} 3378 3379bool 3380intel_dp_init_connector(struct intel_digital_port *intel_dig_port, 3381 struct intel_connector *intel_connector) 3382{ 3383 struct drm_connector *connector = &intel_connector->base; 3384 struct intel_dp *intel_dp = &intel_dig_port->dp; 3385 struct intel_encoder *intel_encoder = &intel_dig_port->base; 3386 struct drm_device *dev = intel_encoder->base.dev; 3387 struct drm_i915_private *dev_priv = dev->dev_private; 3388 enum port port = intel_dig_port->port; 3389 const char *name = NULL; 3390 int type, error; 3391 3392 /* Preserve the current hw state. */ 3393 intel_dp->DP = I915_READ(intel_dp->output_reg); 3394 intel_dp->attached_connector = intel_connector; 3395 3396 type = DRM_MODE_CONNECTOR_DisplayPort; 3397 /* 3398 * FIXME : We need to initialize built-in panels before external panels. 3399 * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup 3400 */ 3401 switch (port) { 3402 case PORT_A: 3403 type = DRM_MODE_CONNECTOR_eDP; 3404 break; 3405 case PORT_C: 3406 if (IS_VALLEYVIEW(dev)) 3407 type = DRM_MODE_CONNECTOR_eDP; 3408 break; 3409 case PORT_D: 3410 if (HAS_PCH_SPLIT(dev) && intel_dpd_is_edp(dev)) 3411 type = DRM_MODE_CONNECTOR_eDP; 3412 break; 3413 default: /* silence GCC warning */ 3414 break; 3415 } 3416 3417 /* 3418 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but 3419 * for DP the encoder type can be set by the caller to 3420 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it. 3421 */ 3422 if (type == DRM_MODE_CONNECTOR_eDP) 3423 intel_encoder->type = INTEL_OUTPUT_EDP; 3424 3425 DRM_DEBUG_KMS("Adding %s connector on port %c\n", 3426 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP", 3427 port_name(port)); 3428 3429 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); 3430 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 3431 3432 connector->interlace_allowed = true; 3433 connector->doublescan_allowed = 0; 3434 3435 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, 3436 ironlake_panel_vdd_work); 3437 3438 intel_connector_attach_encoder(intel_connector, intel_encoder); 3439 drm_sysfs_connector_add(connector); 3440 3441 if (HAS_DDI(dev)) 3442 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 3443 else 3444 intel_connector->get_hw_state = intel_connector_get_hw_state; 3445 3446 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10; 3447 if (HAS_DDI(dev)) { 3448 switch (intel_dig_port->port) { 3449 case PORT_A: 3450 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL; 3451 break; 3452 case PORT_B: 3453 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL; 3454 break; 3455 case PORT_C: 3456 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL; 3457 break; 3458 case PORT_D: 3459 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL; 3460 break; 3461 default: 3462 BUG(); 3463 } 3464 } 3465 3466 /* Set up the DDC bus. */ 3467 switch (port) { 3468 case PORT_A: 3469 intel_encoder->hpd_pin = HPD_PORT_A; 3470 name = "DPDDC-A"; 3471 break; 3472 case PORT_B: 3473 intel_encoder->hpd_pin = HPD_PORT_B; 3474 name = "DPDDC-B"; 3475 break; 3476 case PORT_C: 3477 intel_encoder->hpd_pin = HPD_PORT_C; 3478 name = "DPDDC-C"; 3479 break; 3480 case PORT_D: 3481 intel_encoder->hpd_pin = HPD_PORT_D; 3482 name = "DPDDC-D"; 3483 break; 3484 default: 3485 BUG(); 3486 } 3487 3488 error = intel_dp_i2c_init(intel_dp, intel_connector, name); 3489 WARN(error, "intel_dp_i2c_init failed with error %d for port %c\n", 3490 error, port_name(port)); 3491 3492 intel_dp->psr_setup_done = false; 3493 3494 if (!intel_edp_init_connector(intel_dp, intel_connector)) { 3495 i2c_del_adapter(&intel_dp->adapter); 3496 if (is_edp(intel_dp)) { 3497 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 3498 mutex_lock(&dev->mode_config.mutex); 3499 ironlake_panel_vdd_off_sync(intel_dp); 3500 mutex_unlock(&dev->mode_config.mutex); 3501 } 3502 drm_sysfs_connector_remove(connector); 3503 drm_connector_cleanup(connector); 3504 return false; 3505 } 3506 3507 intel_dp_add_properties(intel_dp, connector); 3508 3509 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 3510 * 0xd. Failure to do so will result in spurious interrupts being 3511 * generated on the port when a cable is not attached. 3512 */ 3513 if (IS_G4X(dev) && !IS_GM45(dev)) { 3514 u32 temp = I915_READ(PEG_BAND_GAP_DATA); 3515 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); 3516 } 3517 3518 return true; 3519} 3520 3521void 3522intel_dp_init(struct drm_device *dev, int output_reg, enum port port) 3523{ 3524 struct intel_digital_port *intel_dig_port; 3525 struct intel_encoder *intel_encoder; 3526 struct drm_encoder *encoder; 3527 struct intel_connector *intel_connector; 3528 3529 intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL); 3530 if (!intel_dig_port) 3531 return; 3532 3533 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 3534 if (!intel_connector) { 3535 kfree(intel_dig_port); 3536 return; 3537 } 3538 3539 intel_encoder = &intel_dig_port->base; 3540 encoder = &intel_encoder->base; 3541 3542 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, 3543 DRM_MODE_ENCODER_TMDS); 3544 3545 intel_encoder->compute_config = intel_dp_compute_config; 3546 intel_encoder->mode_set = intel_dp_mode_set; 3547 intel_encoder->disable = intel_disable_dp; 3548 intel_encoder->post_disable = intel_post_disable_dp; 3549 intel_encoder->get_hw_state = intel_dp_get_hw_state; 3550 intel_encoder->get_config = intel_dp_get_config; 3551 if (IS_VALLEYVIEW(dev)) { 3552 intel_encoder->pre_pll_enable = intel_dp_pre_pll_enable; 3553 intel_encoder->pre_enable = vlv_pre_enable_dp; 3554 intel_encoder->enable = vlv_enable_dp; 3555 } else { 3556 intel_encoder->pre_enable = intel_pre_enable_dp; 3557 intel_encoder->enable = intel_enable_dp; 3558 } 3559 3560 intel_dig_port->port = port; 3561 intel_dig_port->dp.output_reg = output_reg; 3562 3563 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; 3564 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); 3565 intel_encoder->cloneable = false; 3566 intel_encoder->hot_plug = intel_dp_hot_plug; 3567 3568 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) { 3569 drm_encoder_cleanup(encoder); 3570 kfree(intel_dig_port); 3571 kfree(intel_connector); 3572 } 3573} 3574