intel_dp.c revision 2293bb5c0383f522ac659946ccfadb0e6d2f03c5
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 *    Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
29#include <linux/slab.h>
30#include <linux/export.h>
31#include <drm/drmP.h>
32#include <drm/drm_crtc.h>
33#include <drm/drm_crtc_helper.h>
34#include <drm/drm_edid.h>
35#include "intel_drv.h"
36#include <drm/i915_drm.h>
37#include "i915_drv.h"
38
39#define DP_LINK_CHECK_TIMEOUT	(10 * 1000)
40
41/**
42 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
43 * @intel_dp: DP struct
44 *
45 * If a CPU or PCH DP output is attached to an eDP panel, this function
46 * will return true, and false otherwise.
47 */
48static bool is_edp(struct intel_dp *intel_dp)
49{
50	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
51
52	return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
53}
54
55static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
56{
57	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
58
59	return intel_dig_port->base.base.dev;
60}
61
62static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
63{
64	return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
65}
66
67static void intel_dp_link_down(struct intel_dp *intel_dp);
68
69static int
70intel_dp_max_link_bw(struct intel_dp *intel_dp)
71{
72	int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
73
74	switch (max_link_bw) {
75	case DP_LINK_BW_1_62:
76	case DP_LINK_BW_2_7:
77		break;
78	default:
79		max_link_bw = DP_LINK_BW_1_62;
80		break;
81	}
82	return max_link_bw;
83}
84
85/*
86 * The units on the numbers in the next two are... bizarre.  Examples will
87 * make it clearer; this one parallels an example in the eDP spec.
88 *
89 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
90 *
91 *     270000 * 1 * 8 / 10 == 216000
92 *
93 * The actual data capacity of that configuration is 2.16Gbit/s, so the
94 * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
95 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
96 * 119000.  At 18bpp that's 2142000 kilobits per second.
97 *
98 * Thus the strange-looking division by 10 in intel_dp_link_required, to
99 * get the result in decakilobits instead of kilobits.
100 */
101
102static int
103intel_dp_link_required(int pixel_clock, int bpp)
104{
105	return (pixel_clock * bpp + 9) / 10;
106}
107
108static int
109intel_dp_max_data_rate(int max_link_clock, int max_lanes)
110{
111	return (max_link_clock * max_lanes * 8) / 10;
112}
113
114static int
115intel_dp_mode_valid(struct drm_connector *connector,
116		    struct drm_display_mode *mode)
117{
118	struct intel_dp *intel_dp = intel_attached_dp(connector);
119	struct intel_connector *intel_connector = to_intel_connector(connector);
120	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
121	int target_clock = mode->clock;
122	int max_rate, mode_rate, max_lanes, max_link_clock;
123
124	if (is_edp(intel_dp) && fixed_mode) {
125		if (mode->hdisplay > fixed_mode->hdisplay)
126			return MODE_PANEL;
127
128		if (mode->vdisplay > fixed_mode->vdisplay)
129			return MODE_PANEL;
130
131		target_clock = fixed_mode->clock;
132	}
133
134	max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
135	max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
136
137	max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
138	mode_rate = intel_dp_link_required(target_clock, 18);
139
140	if (mode_rate > max_rate)
141		return MODE_CLOCK_HIGH;
142
143	if (mode->clock < 10000)
144		return MODE_CLOCK_LOW;
145
146	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
147		return MODE_H_ILLEGAL;
148
149	return MODE_OK;
150}
151
152static uint32_t
153pack_aux(uint8_t *src, int src_bytes)
154{
155	int	i;
156	uint32_t v = 0;
157
158	if (src_bytes > 4)
159		src_bytes = 4;
160	for (i = 0; i < src_bytes; i++)
161		v |= ((uint32_t) src[i]) << ((3-i) * 8);
162	return v;
163}
164
165static void
166unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
167{
168	int i;
169	if (dst_bytes > 4)
170		dst_bytes = 4;
171	for (i = 0; i < dst_bytes; i++)
172		dst[i] = src >> ((3-i) * 8);
173}
174
175/* hrawclock is 1/4 the FSB frequency */
176static int
177intel_hrawclk(struct drm_device *dev)
178{
179	struct drm_i915_private *dev_priv = dev->dev_private;
180	uint32_t clkcfg;
181
182	/* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
183	if (IS_VALLEYVIEW(dev))
184		return 200;
185
186	clkcfg = I915_READ(CLKCFG);
187	switch (clkcfg & CLKCFG_FSB_MASK) {
188	case CLKCFG_FSB_400:
189		return 100;
190	case CLKCFG_FSB_533:
191		return 133;
192	case CLKCFG_FSB_667:
193		return 166;
194	case CLKCFG_FSB_800:
195		return 200;
196	case CLKCFG_FSB_1067:
197		return 266;
198	case CLKCFG_FSB_1333:
199		return 333;
200	/* these two are just a guess; one of them might be right */
201	case CLKCFG_FSB_1600:
202	case CLKCFG_FSB_1600_ALT:
203		return 400;
204	default:
205		return 133;
206	}
207}
208
209static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
210{
211	struct drm_device *dev = intel_dp_to_dev(intel_dp);
212	struct drm_i915_private *dev_priv = dev->dev_private;
213	u32 pp_stat_reg;
214
215	pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
216	return (I915_READ(pp_stat_reg) & PP_ON) != 0;
217}
218
219static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
220{
221	struct drm_device *dev = intel_dp_to_dev(intel_dp);
222	struct drm_i915_private *dev_priv = dev->dev_private;
223	u32 pp_ctrl_reg;
224
225	pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
226	return (I915_READ(pp_ctrl_reg) & EDP_FORCE_VDD) != 0;
227}
228
229static void
230intel_dp_check_edp(struct intel_dp *intel_dp)
231{
232	struct drm_device *dev = intel_dp_to_dev(intel_dp);
233	struct drm_i915_private *dev_priv = dev->dev_private;
234	u32 pp_stat_reg, pp_ctrl_reg;
235
236	if (!is_edp(intel_dp))
237		return;
238
239	pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
240	pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
241
242	if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
243		WARN(1, "eDP powered off while attempting aux channel communication.\n");
244		DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
245				I915_READ(pp_stat_reg),
246				I915_READ(pp_ctrl_reg));
247	}
248}
249
250static uint32_t
251intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
252{
253	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
254	struct drm_device *dev = intel_dig_port->base.base.dev;
255	struct drm_i915_private *dev_priv = dev->dev_private;
256	uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
257	uint32_t status;
258	bool done;
259
260#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
261	if (has_aux_irq)
262		done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
263					  msecs_to_jiffies_timeout(10));
264	else
265		done = wait_for_atomic(C, 10) == 0;
266	if (!done)
267		DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
268			  has_aux_irq);
269#undef C
270
271	return status;
272}
273
274static int
275intel_dp_aux_ch(struct intel_dp *intel_dp,
276		uint8_t *send, int send_bytes,
277		uint8_t *recv, int recv_size)
278{
279	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
280	struct drm_device *dev = intel_dig_port->base.base.dev;
281	struct drm_i915_private *dev_priv = dev->dev_private;
282	uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
283	uint32_t ch_data = ch_ctl + 4;
284	int i, ret, recv_bytes;
285	uint32_t status;
286	uint32_t aux_clock_divider;
287	int try, precharge;
288	bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
289
290	/* dp aux is extremely sensitive to irq latency, hence request the
291	 * lowest possible wakeup latency and so prevent the cpu from going into
292	 * deep sleep states.
293	 */
294	pm_qos_update_request(&dev_priv->pm_qos, 0);
295
296	intel_dp_check_edp(intel_dp);
297	/* The clock divider is based off the hrawclk,
298	 * and would like to run at 2MHz. So, take the
299	 * hrawclk value and divide by 2 and use that
300	 *
301	 * Note that PCH attached eDP panels should use a 125MHz input
302	 * clock divider.
303	 */
304	if (IS_VALLEYVIEW(dev)) {
305		aux_clock_divider = 100;
306	} else if (intel_dig_port->port == PORT_A) {
307		if (HAS_DDI(dev))
308			aux_clock_divider = DIV_ROUND_CLOSEST(
309				intel_ddi_get_cdclk_freq(dev_priv), 2000);
310		else if (IS_GEN6(dev) || IS_GEN7(dev))
311			aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
312		else
313			aux_clock_divider = 225; /* eDP input clock at 450Mhz */
314	} else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
315		/* Workaround for non-ULT HSW */
316		aux_clock_divider = 74;
317	} else if (HAS_PCH_SPLIT(dev)) {
318		aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
319	} else {
320		aux_clock_divider = intel_hrawclk(dev) / 2;
321	}
322
323	if (IS_GEN6(dev))
324		precharge = 3;
325	else
326		precharge = 5;
327
328	/* Try to wait for any previous AUX channel activity */
329	for (try = 0; try < 3; try++) {
330		status = I915_READ_NOTRACE(ch_ctl);
331		if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
332			break;
333		msleep(1);
334	}
335
336	if (try == 3) {
337		WARN(1, "dp_aux_ch not started status 0x%08x\n",
338		     I915_READ(ch_ctl));
339		ret = -EBUSY;
340		goto out;
341	}
342
343	/* Must try at least 3 times according to DP spec */
344	for (try = 0; try < 5; try++) {
345		/* Load the send data into the aux channel data registers */
346		for (i = 0; i < send_bytes; i += 4)
347			I915_WRITE(ch_data + i,
348				   pack_aux(send + i, send_bytes - i));
349
350		/* Send the command and wait for it to complete */
351		I915_WRITE(ch_ctl,
352			   DP_AUX_CH_CTL_SEND_BUSY |
353			   (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
354			   DP_AUX_CH_CTL_TIME_OUT_400us |
355			   (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
356			   (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
357			   (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
358			   DP_AUX_CH_CTL_DONE |
359			   DP_AUX_CH_CTL_TIME_OUT_ERROR |
360			   DP_AUX_CH_CTL_RECEIVE_ERROR);
361
362		status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
363
364		/* Clear done status and any errors */
365		I915_WRITE(ch_ctl,
366			   status |
367			   DP_AUX_CH_CTL_DONE |
368			   DP_AUX_CH_CTL_TIME_OUT_ERROR |
369			   DP_AUX_CH_CTL_RECEIVE_ERROR);
370
371		if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
372			      DP_AUX_CH_CTL_RECEIVE_ERROR))
373			continue;
374		if (status & DP_AUX_CH_CTL_DONE)
375			break;
376	}
377
378	if ((status & DP_AUX_CH_CTL_DONE) == 0) {
379		DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
380		ret = -EBUSY;
381		goto out;
382	}
383
384	/* Check for timeout or receive error.
385	 * Timeouts occur when the sink is not connected
386	 */
387	if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
388		DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
389		ret = -EIO;
390		goto out;
391	}
392
393	/* Timeouts occur when the device isn't connected, so they're
394	 * "normal" -- don't fill the kernel log with these */
395	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
396		DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
397		ret = -ETIMEDOUT;
398		goto out;
399	}
400
401	/* Unload any bytes sent back from the other side */
402	recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
403		      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
404	if (recv_bytes > recv_size)
405		recv_bytes = recv_size;
406
407	for (i = 0; i < recv_bytes; i += 4)
408		unpack_aux(I915_READ(ch_data + i),
409			   recv + i, recv_bytes - i);
410
411	ret = recv_bytes;
412out:
413	pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
414
415	return ret;
416}
417
418/* Write data to the aux channel in native mode */
419static int
420intel_dp_aux_native_write(struct intel_dp *intel_dp,
421			  uint16_t address, uint8_t *send, int send_bytes)
422{
423	int ret;
424	uint8_t	msg[20];
425	int msg_bytes;
426	uint8_t	ack;
427
428	intel_dp_check_edp(intel_dp);
429	if (send_bytes > 16)
430		return -1;
431	msg[0] = AUX_NATIVE_WRITE << 4;
432	msg[1] = address >> 8;
433	msg[2] = address & 0xff;
434	msg[3] = send_bytes - 1;
435	memcpy(&msg[4], send, send_bytes);
436	msg_bytes = send_bytes + 4;
437	for (;;) {
438		ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
439		if (ret < 0)
440			return ret;
441		if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
442			break;
443		else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
444			udelay(100);
445		else
446			return -EIO;
447	}
448	return send_bytes;
449}
450
451/* Write a single byte to the aux channel in native mode */
452static int
453intel_dp_aux_native_write_1(struct intel_dp *intel_dp,
454			    uint16_t address, uint8_t byte)
455{
456	return intel_dp_aux_native_write(intel_dp, address, &byte, 1);
457}
458
459/* read bytes from a native aux channel */
460static int
461intel_dp_aux_native_read(struct intel_dp *intel_dp,
462			 uint16_t address, uint8_t *recv, int recv_bytes)
463{
464	uint8_t msg[4];
465	int msg_bytes;
466	uint8_t reply[20];
467	int reply_bytes;
468	uint8_t ack;
469	int ret;
470
471	intel_dp_check_edp(intel_dp);
472	msg[0] = AUX_NATIVE_READ << 4;
473	msg[1] = address >> 8;
474	msg[2] = address & 0xff;
475	msg[3] = recv_bytes - 1;
476
477	msg_bytes = 4;
478	reply_bytes = recv_bytes + 1;
479
480	for (;;) {
481		ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
482				      reply, reply_bytes);
483		if (ret == 0)
484			return -EPROTO;
485		if (ret < 0)
486			return ret;
487		ack = reply[0];
488		if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) {
489			memcpy(recv, reply + 1, ret - 1);
490			return ret - 1;
491		}
492		else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
493			udelay(100);
494		else
495			return -EIO;
496	}
497}
498
499static int
500intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
501		    uint8_t write_byte, uint8_t *read_byte)
502{
503	struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
504	struct intel_dp *intel_dp = container_of(adapter,
505						struct intel_dp,
506						adapter);
507	uint16_t address = algo_data->address;
508	uint8_t msg[5];
509	uint8_t reply[2];
510	unsigned retry;
511	int msg_bytes;
512	int reply_bytes;
513	int ret;
514
515	intel_dp_check_edp(intel_dp);
516	/* Set up the command byte */
517	if (mode & MODE_I2C_READ)
518		msg[0] = AUX_I2C_READ << 4;
519	else
520		msg[0] = AUX_I2C_WRITE << 4;
521
522	if (!(mode & MODE_I2C_STOP))
523		msg[0] |= AUX_I2C_MOT << 4;
524
525	msg[1] = address >> 8;
526	msg[2] = address;
527
528	switch (mode) {
529	case MODE_I2C_WRITE:
530		msg[3] = 0;
531		msg[4] = write_byte;
532		msg_bytes = 5;
533		reply_bytes = 1;
534		break;
535	case MODE_I2C_READ:
536		msg[3] = 0;
537		msg_bytes = 4;
538		reply_bytes = 2;
539		break;
540	default:
541		msg_bytes = 3;
542		reply_bytes = 1;
543		break;
544	}
545
546	for (retry = 0; retry < 5; retry++) {
547		ret = intel_dp_aux_ch(intel_dp,
548				      msg, msg_bytes,
549				      reply, reply_bytes);
550		if (ret < 0) {
551			DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
552			return ret;
553		}
554
555		switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
556		case AUX_NATIVE_REPLY_ACK:
557			/* I2C-over-AUX Reply field is only valid
558			 * when paired with AUX ACK.
559			 */
560			break;
561		case AUX_NATIVE_REPLY_NACK:
562			DRM_DEBUG_KMS("aux_ch native nack\n");
563			return -EREMOTEIO;
564		case AUX_NATIVE_REPLY_DEFER:
565			udelay(100);
566			continue;
567		default:
568			DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
569				  reply[0]);
570			return -EREMOTEIO;
571		}
572
573		switch (reply[0] & AUX_I2C_REPLY_MASK) {
574		case AUX_I2C_REPLY_ACK:
575			if (mode == MODE_I2C_READ) {
576				*read_byte = reply[1];
577			}
578			return reply_bytes - 1;
579		case AUX_I2C_REPLY_NACK:
580			DRM_DEBUG_KMS("aux_i2c nack\n");
581			return -EREMOTEIO;
582		case AUX_I2C_REPLY_DEFER:
583			DRM_DEBUG_KMS("aux_i2c defer\n");
584			udelay(100);
585			break;
586		default:
587			DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
588			return -EREMOTEIO;
589		}
590	}
591
592	DRM_ERROR("too many retries, giving up\n");
593	return -EREMOTEIO;
594}
595
596static int
597intel_dp_i2c_init(struct intel_dp *intel_dp,
598		  struct intel_connector *intel_connector, const char *name)
599{
600	int	ret;
601
602	DRM_DEBUG_KMS("i2c_init %s\n", name);
603	intel_dp->algo.running = false;
604	intel_dp->algo.address = 0;
605	intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch;
606
607	memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter));
608	intel_dp->adapter.owner = THIS_MODULE;
609	intel_dp->adapter.class = I2C_CLASS_DDC;
610	strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
611	intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
612	intel_dp->adapter.algo_data = &intel_dp->algo;
613	intel_dp->adapter.dev.parent = &intel_connector->base.kdev;
614
615	ironlake_edp_panel_vdd_on(intel_dp);
616	ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
617	ironlake_edp_panel_vdd_off(intel_dp, false);
618	return ret;
619}
620
621static void
622intel_dp_set_clock(struct intel_encoder *encoder,
623		   struct intel_crtc_config *pipe_config, int link_bw)
624{
625	struct drm_device *dev = encoder->base.dev;
626
627	if (IS_G4X(dev)) {
628		if (link_bw == DP_LINK_BW_1_62) {
629			pipe_config->dpll.p1 = 2;
630			pipe_config->dpll.p2 = 10;
631			pipe_config->dpll.n = 2;
632			pipe_config->dpll.m1 = 23;
633			pipe_config->dpll.m2 = 8;
634		} else {
635			pipe_config->dpll.p1 = 1;
636			pipe_config->dpll.p2 = 10;
637			pipe_config->dpll.n = 1;
638			pipe_config->dpll.m1 = 14;
639			pipe_config->dpll.m2 = 2;
640		}
641		pipe_config->clock_set = true;
642	} else if (IS_HASWELL(dev)) {
643		/* Haswell has special-purpose DP DDI clocks. */
644	} else if (HAS_PCH_SPLIT(dev)) {
645		if (link_bw == DP_LINK_BW_1_62) {
646			pipe_config->dpll.n = 1;
647			pipe_config->dpll.p1 = 2;
648			pipe_config->dpll.p2 = 10;
649			pipe_config->dpll.m1 = 12;
650			pipe_config->dpll.m2 = 9;
651		} else {
652			pipe_config->dpll.n = 2;
653			pipe_config->dpll.p1 = 1;
654			pipe_config->dpll.p2 = 10;
655			pipe_config->dpll.m1 = 14;
656			pipe_config->dpll.m2 = 8;
657		}
658		pipe_config->clock_set = true;
659	} else if (IS_VALLEYVIEW(dev)) {
660		/* FIXME: Need to figure out optimized DP clocks for vlv. */
661	}
662}
663
664bool
665intel_dp_compute_config(struct intel_encoder *encoder,
666			struct intel_crtc_config *pipe_config)
667{
668	struct drm_device *dev = encoder->base.dev;
669	struct drm_i915_private *dev_priv = dev->dev_private;
670	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
671	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
672	enum port port = dp_to_dig_port(intel_dp)->port;
673	struct intel_crtc *intel_crtc = encoder->new_crtc;
674	struct intel_connector *intel_connector = intel_dp->attached_connector;
675	int lane_count, clock;
676	int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
677	int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
678	int bpp, mode_rate;
679	static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
680	int link_avail, link_clock;
681
682	if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
683		pipe_config->has_pch_encoder = true;
684
685	pipe_config->has_dp_encoder = true;
686
687	if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
688		intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
689				       adjusted_mode);
690		if (!HAS_PCH_SPLIT(dev))
691			intel_gmch_panel_fitting(intel_crtc, pipe_config,
692						 intel_connector->panel.fitting_mode);
693		else
694			intel_pch_panel_fitting(intel_crtc, pipe_config,
695						intel_connector->panel.fitting_mode);
696	}
697
698	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
699		return false;
700
701	DRM_DEBUG_KMS("DP link computation with max lane count %i "
702		      "max bw %02x pixel clock %iKHz\n",
703		      max_lane_count, bws[max_clock], adjusted_mode->clock);
704
705	/* Walk through all bpp values. Luckily they're all nicely spaced with 2
706	 * bpc in between. */
707	bpp = pipe_config->pipe_bpp;
708	if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp)
709		bpp = min_t(int, bpp, dev_priv->vbt.edp_bpp);
710
711	for (; bpp >= 6*3; bpp -= 2*3) {
712		mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
713
714		for (clock = 0; clock <= max_clock; clock++) {
715			for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
716				link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
717				link_avail = intel_dp_max_data_rate(link_clock,
718								    lane_count);
719
720				if (mode_rate <= link_avail) {
721					goto found;
722				}
723			}
724		}
725	}
726
727	return false;
728
729found:
730	if (intel_dp->color_range_auto) {
731		/*
732		 * See:
733		 * CEA-861-E - 5.1 Default Encoding Parameters
734		 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
735		 */
736		if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
737			intel_dp->color_range = DP_COLOR_RANGE_16_235;
738		else
739			intel_dp->color_range = 0;
740	}
741
742	if (intel_dp->color_range)
743		pipe_config->limited_color_range = true;
744
745	intel_dp->link_bw = bws[clock];
746	intel_dp->lane_count = lane_count;
747	pipe_config->pipe_bpp = bpp;
748	pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
749
750	DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
751		      intel_dp->link_bw, intel_dp->lane_count,
752		      pipe_config->port_clock, bpp);
753	DRM_DEBUG_KMS("DP link bw required %i available %i\n",
754		      mode_rate, link_avail);
755
756	intel_link_compute_m_n(bpp, lane_count,
757			       adjusted_mode->clock, pipe_config->port_clock,
758			       &pipe_config->dp_m_n);
759
760	intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
761
762	return true;
763}
764
765void intel_dp_init_link_config(struct intel_dp *intel_dp)
766{
767	memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
768	intel_dp->link_configuration[0] = intel_dp->link_bw;
769	intel_dp->link_configuration[1] = intel_dp->lane_count;
770	intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
771	/*
772	 * Check for DPCD version > 1.1 and enhanced framing support
773	 */
774	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
775	    (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
776		intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
777	}
778}
779
780static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
781{
782	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
783	struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
784	struct drm_device *dev = crtc->base.dev;
785	struct drm_i915_private *dev_priv = dev->dev_private;
786	u32 dpa_ctl;
787
788	DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", crtc->config.port_clock);
789	dpa_ctl = I915_READ(DP_A);
790	dpa_ctl &= ~DP_PLL_FREQ_MASK;
791
792	if (crtc->config.port_clock == 162000) {
793		/* For a long time we've carried around a ILK-DevA w/a for the
794		 * 160MHz clock. If we're really unlucky, it's still required.
795		 */
796		DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
797		dpa_ctl |= DP_PLL_FREQ_160MHZ;
798		intel_dp->DP |= DP_PLL_FREQ_160MHZ;
799	} else {
800		dpa_ctl |= DP_PLL_FREQ_270MHZ;
801		intel_dp->DP |= DP_PLL_FREQ_270MHZ;
802	}
803
804	I915_WRITE(DP_A, dpa_ctl);
805
806	POSTING_READ(DP_A);
807	udelay(500);
808}
809
810static void
811intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
812		  struct drm_display_mode *adjusted_mode)
813{
814	struct drm_device *dev = encoder->dev;
815	struct drm_i915_private *dev_priv = dev->dev_private;
816	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
817	enum port port = dp_to_dig_port(intel_dp)->port;
818	struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
819
820	/*
821	 * There are four kinds of DP registers:
822	 *
823	 * 	IBX PCH
824	 * 	SNB CPU
825	 *	IVB CPU
826	 * 	CPT PCH
827	 *
828	 * IBX PCH and CPU are the same for almost everything,
829	 * except that the CPU DP PLL is configured in this
830	 * register
831	 *
832	 * CPT PCH is quite different, having many bits moved
833	 * to the TRANS_DP_CTL register instead. That
834	 * configuration happens (oddly) in ironlake_pch_enable
835	 */
836
837	/* Preserve the BIOS-computed detected bit. This is
838	 * supposed to be read-only.
839	 */
840	intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
841
842	/* Handle DP bits in common between all three register formats */
843	intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
844	intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
845
846	if (intel_dp->has_audio) {
847		DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
848				 pipe_name(crtc->pipe));
849		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
850		intel_write_eld(encoder, adjusted_mode);
851	}
852
853	intel_dp_init_link_config(intel_dp);
854
855	/* Split out the IBX/CPU vs CPT settings */
856
857	if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
858		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
859			intel_dp->DP |= DP_SYNC_HS_HIGH;
860		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
861			intel_dp->DP |= DP_SYNC_VS_HIGH;
862		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
863
864		if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
865			intel_dp->DP |= DP_ENHANCED_FRAMING;
866
867		intel_dp->DP |= crtc->pipe << 29;
868	} else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
869		if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
870			intel_dp->DP |= intel_dp->color_range;
871
872		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
873			intel_dp->DP |= DP_SYNC_HS_HIGH;
874		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
875			intel_dp->DP |= DP_SYNC_VS_HIGH;
876		intel_dp->DP |= DP_LINK_TRAIN_OFF;
877
878		if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
879			intel_dp->DP |= DP_ENHANCED_FRAMING;
880
881		if (crtc->pipe == 1)
882			intel_dp->DP |= DP_PIPEB_SELECT;
883	} else {
884		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
885	}
886
887	if (port == PORT_A && !IS_VALLEYVIEW(dev))
888		ironlake_set_pll_cpu_edp(intel_dp);
889}
890
891#define IDLE_ON_MASK		(PP_ON | 0 	  | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
892#define IDLE_ON_VALUE   	(PP_ON | 0 	  | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
893
894#define IDLE_OFF_MASK		(PP_ON | 0        | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
895#define IDLE_OFF_VALUE		(0     | 0        | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
896
897#define IDLE_CYCLE_MASK		(PP_ON | 0        | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
898#define IDLE_CYCLE_VALUE	(0     | 0        | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
899
900static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
901				       u32 mask,
902				       u32 value)
903{
904	struct drm_device *dev = intel_dp_to_dev(intel_dp);
905	struct drm_i915_private *dev_priv = dev->dev_private;
906	u32 pp_stat_reg, pp_ctrl_reg;
907
908	pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
909	pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
910
911	DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
912			mask, value,
913			I915_READ(pp_stat_reg),
914			I915_READ(pp_ctrl_reg));
915
916	if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
917		DRM_ERROR("Panel status timeout: status %08x control %08x\n",
918				I915_READ(pp_stat_reg),
919				I915_READ(pp_ctrl_reg));
920	}
921}
922
923static void ironlake_wait_panel_on(struct intel_dp *intel_dp)
924{
925	DRM_DEBUG_KMS("Wait for panel power on\n");
926	ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
927}
928
929static void ironlake_wait_panel_off(struct intel_dp *intel_dp)
930{
931	DRM_DEBUG_KMS("Wait for panel power off time\n");
932	ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
933}
934
935static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp)
936{
937	DRM_DEBUG_KMS("Wait for panel power cycle\n");
938	ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
939}
940
941
942/* Read the current pp_control value, unlocking the register if it
943 * is locked
944 */
945
946static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
947{
948	struct drm_device *dev = intel_dp_to_dev(intel_dp);
949	struct drm_i915_private *dev_priv = dev->dev_private;
950	u32 control;
951	u32 pp_ctrl_reg;
952
953	pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
954	control = I915_READ(pp_ctrl_reg);
955
956	control &= ~PANEL_UNLOCK_MASK;
957	control |= PANEL_UNLOCK_REGS;
958	return control;
959}
960
961void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
962{
963	struct drm_device *dev = intel_dp_to_dev(intel_dp);
964	struct drm_i915_private *dev_priv = dev->dev_private;
965	u32 pp;
966	u32 pp_stat_reg, pp_ctrl_reg;
967
968	if (!is_edp(intel_dp))
969		return;
970	DRM_DEBUG_KMS("Turn eDP VDD on\n");
971
972	WARN(intel_dp->want_panel_vdd,
973	     "eDP VDD already requested on\n");
974
975	intel_dp->want_panel_vdd = true;
976
977	if (ironlake_edp_have_panel_vdd(intel_dp)) {
978		DRM_DEBUG_KMS("eDP VDD already on\n");
979		return;
980	}
981
982	if (!ironlake_edp_have_panel_power(intel_dp))
983		ironlake_wait_panel_power_cycle(intel_dp);
984
985	pp = ironlake_get_pp_control(intel_dp);
986	pp |= EDP_FORCE_VDD;
987
988	pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
989	pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
990
991	I915_WRITE(pp_ctrl_reg, pp);
992	POSTING_READ(pp_ctrl_reg);
993	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
994			I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
995	/*
996	 * If the panel wasn't on, delay before accessing aux channel
997	 */
998	if (!ironlake_edp_have_panel_power(intel_dp)) {
999		DRM_DEBUG_KMS("eDP was not running\n");
1000		msleep(intel_dp->panel_power_up_delay);
1001	}
1002}
1003
1004static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
1005{
1006	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1007	struct drm_i915_private *dev_priv = dev->dev_private;
1008	u32 pp;
1009	u32 pp_stat_reg, pp_ctrl_reg;
1010
1011	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
1012
1013	if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
1014		pp = ironlake_get_pp_control(intel_dp);
1015		pp &= ~EDP_FORCE_VDD;
1016
1017		pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
1018		pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
1019
1020		I915_WRITE(pp_ctrl_reg, pp);
1021		POSTING_READ(pp_ctrl_reg);
1022
1023		/* Make sure sequencer is idle before allowing subsequent activity */
1024		DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1025		I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1026		msleep(intel_dp->panel_power_down_delay);
1027	}
1028}
1029
1030static void ironlake_panel_vdd_work(struct work_struct *__work)
1031{
1032	struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1033						 struct intel_dp, panel_vdd_work);
1034	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1035
1036	mutex_lock(&dev->mode_config.mutex);
1037	ironlake_panel_vdd_off_sync(intel_dp);
1038	mutex_unlock(&dev->mode_config.mutex);
1039}
1040
1041void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1042{
1043	if (!is_edp(intel_dp))
1044		return;
1045
1046	DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd);
1047	WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
1048
1049	intel_dp->want_panel_vdd = false;
1050
1051	if (sync) {
1052		ironlake_panel_vdd_off_sync(intel_dp);
1053	} else {
1054		/*
1055		 * Queue the timer to fire a long
1056		 * time from now (relative to the power down delay)
1057		 * to keep the panel power up across a sequence of operations
1058		 */
1059		schedule_delayed_work(&intel_dp->panel_vdd_work,
1060				      msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
1061	}
1062}
1063
1064void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1065{
1066	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1067	struct drm_i915_private *dev_priv = dev->dev_private;
1068	u32 pp;
1069	u32 pp_ctrl_reg;
1070
1071	if (!is_edp(intel_dp))
1072		return;
1073
1074	DRM_DEBUG_KMS("Turn eDP power on\n");
1075
1076	if (ironlake_edp_have_panel_power(intel_dp)) {
1077		DRM_DEBUG_KMS("eDP power already on\n");
1078		return;
1079	}
1080
1081	ironlake_wait_panel_power_cycle(intel_dp);
1082
1083	pp = ironlake_get_pp_control(intel_dp);
1084	if (IS_GEN5(dev)) {
1085		/* ILK workaround: disable reset around power sequence */
1086		pp &= ~PANEL_POWER_RESET;
1087		I915_WRITE(PCH_PP_CONTROL, pp);
1088		POSTING_READ(PCH_PP_CONTROL);
1089	}
1090
1091	pp |= POWER_TARGET_ON;
1092	if (!IS_GEN5(dev))
1093		pp |= PANEL_POWER_RESET;
1094
1095	pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
1096
1097	I915_WRITE(pp_ctrl_reg, pp);
1098	POSTING_READ(pp_ctrl_reg);
1099
1100	ironlake_wait_panel_on(intel_dp);
1101
1102	if (IS_GEN5(dev)) {
1103		pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1104		I915_WRITE(PCH_PP_CONTROL, pp);
1105		POSTING_READ(PCH_PP_CONTROL);
1106	}
1107}
1108
1109void ironlake_edp_panel_off(struct intel_dp *intel_dp)
1110{
1111	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1112	struct drm_i915_private *dev_priv = dev->dev_private;
1113	u32 pp;
1114	u32 pp_ctrl_reg;
1115
1116	if (!is_edp(intel_dp))
1117		return;
1118
1119	DRM_DEBUG_KMS("Turn eDP power off\n");
1120
1121	WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
1122
1123	pp = ironlake_get_pp_control(intel_dp);
1124	/* We need to switch off panel power _and_ force vdd, for otherwise some
1125	 * panels get very unhappy and cease to work. */
1126	pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
1127
1128	pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
1129
1130	I915_WRITE(pp_ctrl_reg, pp);
1131	POSTING_READ(pp_ctrl_reg);
1132
1133	intel_dp->want_panel_vdd = false;
1134
1135	ironlake_wait_panel_off(intel_dp);
1136}
1137
1138void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
1139{
1140	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1141	struct drm_device *dev = intel_dig_port->base.base.dev;
1142	struct drm_i915_private *dev_priv = dev->dev_private;
1143	int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe;
1144	u32 pp;
1145	u32 pp_ctrl_reg;
1146
1147	if (!is_edp(intel_dp))
1148		return;
1149
1150	DRM_DEBUG_KMS("\n");
1151	/*
1152	 * If we enable the backlight right away following a panel power
1153	 * on, we may see slight flicker as the panel syncs with the eDP
1154	 * link.  So delay a bit to make sure the image is solid before
1155	 * allowing it to appear.
1156	 */
1157	msleep(intel_dp->backlight_on_delay);
1158	pp = ironlake_get_pp_control(intel_dp);
1159	pp |= EDP_BLC_ENABLE;
1160
1161	pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
1162
1163	I915_WRITE(pp_ctrl_reg, pp);
1164	POSTING_READ(pp_ctrl_reg);
1165
1166	intel_panel_enable_backlight(dev, pipe);
1167}
1168
1169void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
1170{
1171	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1172	struct drm_i915_private *dev_priv = dev->dev_private;
1173	u32 pp;
1174	u32 pp_ctrl_reg;
1175
1176	if (!is_edp(intel_dp))
1177		return;
1178
1179	intel_panel_disable_backlight(dev);
1180
1181	DRM_DEBUG_KMS("\n");
1182	pp = ironlake_get_pp_control(intel_dp);
1183	pp &= ~EDP_BLC_ENABLE;
1184
1185	pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
1186
1187	I915_WRITE(pp_ctrl_reg, pp);
1188	POSTING_READ(pp_ctrl_reg);
1189	msleep(intel_dp->backlight_off_delay);
1190}
1191
1192static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
1193{
1194	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1195	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1196	struct drm_device *dev = crtc->dev;
1197	struct drm_i915_private *dev_priv = dev->dev_private;
1198	u32 dpa_ctl;
1199
1200	assert_pipe_disabled(dev_priv,
1201			     to_intel_crtc(crtc)->pipe);
1202
1203	DRM_DEBUG_KMS("\n");
1204	dpa_ctl = I915_READ(DP_A);
1205	WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
1206	WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1207
1208	/* We don't adjust intel_dp->DP while tearing down the link, to
1209	 * facilitate link retraining (e.g. after hotplug). Hence clear all
1210	 * enable bits here to ensure that we don't enable too much. */
1211	intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
1212	intel_dp->DP |= DP_PLL_ENABLE;
1213	I915_WRITE(DP_A, intel_dp->DP);
1214	POSTING_READ(DP_A);
1215	udelay(200);
1216}
1217
1218static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
1219{
1220	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1221	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1222	struct drm_device *dev = crtc->dev;
1223	struct drm_i915_private *dev_priv = dev->dev_private;
1224	u32 dpa_ctl;
1225
1226	assert_pipe_disabled(dev_priv,
1227			     to_intel_crtc(crtc)->pipe);
1228
1229	dpa_ctl = I915_READ(DP_A);
1230	WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
1231	     "dp pll off, should be on\n");
1232	WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1233
1234	/* We can't rely on the value tracked for the DP register in
1235	 * intel_dp->DP because link_down must not change that (otherwise link
1236	 * re-training will fail. */
1237	dpa_ctl &= ~DP_PLL_ENABLE;
1238	I915_WRITE(DP_A, dpa_ctl);
1239	POSTING_READ(DP_A);
1240	udelay(200);
1241}
1242
1243/* If the sink supports it, try to set the power state appropriately */
1244void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
1245{
1246	int ret, i;
1247
1248	/* Should have a valid DPCD by this point */
1249	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
1250		return;
1251
1252	if (mode != DRM_MODE_DPMS_ON) {
1253		ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER,
1254						  DP_SET_POWER_D3);
1255		if (ret != 1)
1256			DRM_DEBUG_DRIVER("failed to write sink power state\n");
1257	} else {
1258		/*
1259		 * When turning on, we need to retry for 1ms to give the sink
1260		 * time to wake up.
1261		 */
1262		for (i = 0; i < 3; i++) {
1263			ret = intel_dp_aux_native_write_1(intel_dp,
1264							  DP_SET_POWER,
1265							  DP_SET_POWER_D0);
1266			if (ret == 1)
1267				break;
1268			msleep(1);
1269		}
1270	}
1271}
1272
1273static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
1274				  enum pipe *pipe)
1275{
1276	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1277	enum port port = dp_to_dig_port(intel_dp)->port;
1278	struct drm_device *dev = encoder->base.dev;
1279	struct drm_i915_private *dev_priv = dev->dev_private;
1280	u32 tmp = I915_READ(intel_dp->output_reg);
1281
1282	if (!(tmp & DP_PORT_EN))
1283		return false;
1284
1285	if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1286		*pipe = PORT_TO_PIPE_CPT(tmp);
1287	} else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
1288		*pipe = PORT_TO_PIPE(tmp);
1289	} else {
1290		u32 trans_sel;
1291		u32 trans_dp;
1292		int i;
1293
1294		switch (intel_dp->output_reg) {
1295		case PCH_DP_B:
1296			trans_sel = TRANS_DP_PORT_SEL_B;
1297			break;
1298		case PCH_DP_C:
1299			trans_sel = TRANS_DP_PORT_SEL_C;
1300			break;
1301		case PCH_DP_D:
1302			trans_sel = TRANS_DP_PORT_SEL_D;
1303			break;
1304		default:
1305			return true;
1306		}
1307
1308		for_each_pipe(i) {
1309			trans_dp = I915_READ(TRANS_DP_CTL(i));
1310			if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
1311				*pipe = i;
1312				return true;
1313			}
1314		}
1315
1316		DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
1317			      intel_dp->output_reg);
1318	}
1319
1320	return true;
1321}
1322
1323static void intel_dp_get_config(struct intel_encoder *encoder,
1324				struct intel_crtc_config *pipe_config)
1325{
1326	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1327	u32 tmp, flags = 0;
1328	struct drm_device *dev = encoder->base.dev;
1329	struct drm_i915_private *dev_priv = dev->dev_private;
1330	enum port port = dp_to_dig_port(intel_dp)->port;
1331	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1332
1333	if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
1334		tmp = I915_READ(intel_dp->output_reg);
1335		if (tmp & DP_SYNC_HS_HIGH)
1336			flags |= DRM_MODE_FLAG_PHSYNC;
1337		else
1338			flags |= DRM_MODE_FLAG_NHSYNC;
1339
1340		if (tmp & DP_SYNC_VS_HIGH)
1341			flags |= DRM_MODE_FLAG_PVSYNC;
1342		else
1343			flags |= DRM_MODE_FLAG_NVSYNC;
1344	} else {
1345		tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1346		if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
1347			flags |= DRM_MODE_FLAG_PHSYNC;
1348		else
1349			flags |= DRM_MODE_FLAG_NHSYNC;
1350
1351		if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
1352			flags |= DRM_MODE_FLAG_PVSYNC;
1353		else
1354			flags |= DRM_MODE_FLAG_NVSYNC;
1355	}
1356
1357	pipe_config->adjusted_mode.flags |= flags;
1358
1359	if (dp_to_dig_port(intel_dp)->port == PORT_A) {
1360		if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
1361			pipe_config->port_clock = 162000;
1362		else
1363			pipe_config->port_clock = 270000;
1364	}
1365}
1366
1367static bool is_edp_psr(struct intel_dp *intel_dp)
1368{
1369	return is_edp(intel_dp) &&
1370		intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
1371}
1372
1373static void intel_disable_dp(struct intel_encoder *encoder)
1374{
1375	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1376	enum port port = dp_to_dig_port(intel_dp)->port;
1377	struct drm_device *dev = encoder->base.dev;
1378
1379	/* Make sure the panel is off before trying to change the mode. But also
1380	 * ensure that we have vdd while we switch off the panel. */
1381	ironlake_edp_panel_vdd_on(intel_dp);
1382	ironlake_edp_backlight_off(intel_dp);
1383	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1384	ironlake_edp_panel_off(intel_dp);
1385
1386	/* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
1387	if (!(port == PORT_A || IS_VALLEYVIEW(dev)))
1388		intel_dp_link_down(intel_dp);
1389}
1390
1391static void intel_post_disable_dp(struct intel_encoder *encoder)
1392{
1393	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1394	enum port port = dp_to_dig_port(intel_dp)->port;
1395	struct drm_device *dev = encoder->base.dev;
1396
1397	if (port == PORT_A || IS_VALLEYVIEW(dev)) {
1398		intel_dp_link_down(intel_dp);
1399		if (!IS_VALLEYVIEW(dev))
1400			ironlake_edp_pll_off(intel_dp);
1401	}
1402}
1403
1404static void intel_enable_dp(struct intel_encoder *encoder)
1405{
1406	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1407	struct drm_device *dev = encoder->base.dev;
1408	struct drm_i915_private *dev_priv = dev->dev_private;
1409	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
1410
1411	if (WARN_ON(dp_reg & DP_PORT_EN))
1412		return;
1413
1414	ironlake_edp_panel_vdd_on(intel_dp);
1415	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1416	intel_dp_start_link_train(intel_dp);
1417	ironlake_edp_panel_on(intel_dp);
1418	ironlake_edp_panel_vdd_off(intel_dp, true);
1419	intel_dp_complete_link_train(intel_dp);
1420	intel_dp_stop_link_train(intel_dp);
1421	ironlake_edp_backlight_on(intel_dp);
1422
1423	if (IS_VALLEYVIEW(dev)) {
1424		struct intel_digital_port *dport =
1425			enc_to_dig_port(&encoder->base);
1426		int channel = vlv_dport_to_channel(dport);
1427
1428		vlv_wait_port_ready(dev_priv, channel);
1429	}
1430}
1431
1432static void intel_pre_enable_dp(struct intel_encoder *encoder)
1433{
1434	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1435	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
1436	struct drm_device *dev = encoder->base.dev;
1437	struct drm_i915_private *dev_priv = dev->dev_private;
1438
1439	if (dport->port == PORT_A && !IS_VALLEYVIEW(dev))
1440		ironlake_edp_pll_on(intel_dp);
1441
1442	if (IS_VALLEYVIEW(dev)) {
1443		struct intel_crtc *intel_crtc =
1444			to_intel_crtc(encoder->base.crtc);
1445		int port = vlv_dport_to_channel(dport);
1446		int pipe = intel_crtc->pipe;
1447		u32 val;
1448
1449		val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
1450		val = 0;
1451		if (pipe)
1452			val |= (1<<21);
1453		else
1454			val &= ~(1<<21);
1455		val |= 0x001000c4;
1456		vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val);
1457
1458		vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port),
1459				 0x00760018);
1460		vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port),
1461				 0x00400888);
1462	}
1463}
1464
1465static void intel_dp_pre_pll_enable(struct intel_encoder *encoder)
1466{
1467	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1468	struct drm_device *dev = encoder->base.dev;
1469	struct drm_i915_private *dev_priv = dev->dev_private;
1470	int port = vlv_dport_to_channel(dport);
1471
1472	if (!IS_VALLEYVIEW(dev))
1473		return;
1474
1475	/* Program Tx lane resets to default */
1476	vlv_dpio_write(dev_priv, DPIO_PCS_TX(port),
1477			 DPIO_PCS_TX_LANE2_RESET |
1478			 DPIO_PCS_TX_LANE1_RESET);
1479	vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port),
1480			 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
1481			 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
1482			 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
1483				 DPIO_PCS_CLK_SOFT_RESET);
1484
1485	/* Fix up inter-pair skew failure */
1486	vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00);
1487	vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500);
1488	vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000);
1489}
1490
1491/*
1492 * Native read with retry for link status and receiver capability reads for
1493 * cases where the sink may still be asleep.
1494 */
1495static bool
1496intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
1497			       uint8_t *recv, int recv_bytes)
1498{
1499	int ret, i;
1500
1501	/*
1502	 * Sinks are *supposed* to come up within 1ms from an off state,
1503	 * but we're also supposed to retry 3 times per the spec.
1504	 */
1505	for (i = 0; i < 3; i++) {
1506		ret = intel_dp_aux_native_read(intel_dp, address, recv,
1507					       recv_bytes);
1508		if (ret == recv_bytes)
1509			return true;
1510		msleep(1);
1511	}
1512
1513	return false;
1514}
1515
1516/*
1517 * Fetch AUX CH registers 0x202 - 0x207 which contain
1518 * link status information
1519 */
1520static bool
1521intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
1522{
1523	return intel_dp_aux_native_read_retry(intel_dp,
1524					      DP_LANE0_1_STATUS,
1525					      link_status,
1526					      DP_LINK_STATUS_SIZE);
1527}
1528
1529#if 0
1530static char	*voltage_names[] = {
1531	"0.4V", "0.6V", "0.8V", "1.2V"
1532};
1533static char	*pre_emph_names[] = {
1534	"0dB", "3.5dB", "6dB", "9.5dB"
1535};
1536static char	*link_train_names[] = {
1537	"pattern 1", "pattern 2", "idle", "off"
1538};
1539#endif
1540
1541/*
1542 * These are source-specific values; current Intel hardware supports
1543 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
1544 */
1545
1546static uint8_t
1547intel_dp_voltage_max(struct intel_dp *intel_dp)
1548{
1549	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1550	enum port port = dp_to_dig_port(intel_dp)->port;
1551
1552	if (IS_VALLEYVIEW(dev))
1553		return DP_TRAIN_VOLTAGE_SWING_1200;
1554	else if (IS_GEN7(dev) && port == PORT_A)
1555		return DP_TRAIN_VOLTAGE_SWING_800;
1556	else if (HAS_PCH_CPT(dev) && port != PORT_A)
1557		return DP_TRAIN_VOLTAGE_SWING_1200;
1558	else
1559		return DP_TRAIN_VOLTAGE_SWING_800;
1560}
1561
1562static uint8_t
1563intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
1564{
1565	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1566	enum port port = dp_to_dig_port(intel_dp)->port;
1567
1568	if (HAS_DDI(dev)) {
1569		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1570		case DP_TRAIN_VOLTAGE_SWING_400:
1571			return DP_TRAIN_PRE_EMPHASIS_9_5;
1572		case DP_TRAIN_VOLTAGE_SWING_600:
1573			return DP_TRAIN_PRE_EMPHASIS_6;
1574		case DP_TRAIN_VOLTAGE_SWING_800:
1575			return DP_TRAIN_PRE_EMPHASIS_3_5;
1576		case DP_TRAIN_VOLTAGE_SWING_1200:
1577		default:
1578			return DP_TRAIN_PRE_EMPHASIS_0;
1579		}
1580	} else if (IS_VALLEYVIEW(dev)) {
1581		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1582		case DP_TRAIN_VOLTAGE_SWING_400:
1583			return DP_TRAIN_PRE_EMPHASIS_9_5;
1584		case DP_TRAIN_VOLTAGE_SWING_600:
1585			return DP_TRAIN_PRE_EMPHASIS_6;
1586		case DP_TRAIN_VOLTAGE_SWING_800:
1587			return DP_TRAIN_PRE_EMPHASIS_3_5;
1588		case DP_TRAIN_VOLTAGE_SWING_1200:
1589		default:
1590			return DP_TRAIN_PRE_EMPHASIS_0;
1591		}
1592	} else if (IS_GEN7(dev) && port == PORT_A) {
1593		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1594		case DP_TRAIN_VOLTAGE_SWING_400:
1595			return DP_TRAIN_PRE_EMPHASIS_6;
1596		case DP_TRAIN_VOLTAGE_SWING_600:
1597		case DP_TRAIN_VOLTAGE_SWING_800:
1598			return DP_TRAIN_PRE_EMPHASIS_3_5;
1599		default:
1600			return DP_TRAIN_PRE_EMPHASIS_0;
1601		}
1602	} else {
1603		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1604		case DP_TRAIN_VOLTAGE_SWING_400:
1605			return DP_TRAIN_PRE_EMPHASIS_6;
1606		case DP_TRAIN_VOLTAGE_SWING_600:
1607			return DP_TRAIN_PRE_EMPHASIS_6;
1608		case DP_TRAIN_VOLTAGE_SWING_800:
1609			return DP_TRAIN_PRE_EMPHASIS_3_5;
1610		case DP_TRAIN_VOLTAGE_SWING_1200:
1611		default:
1612			return DP_TRAIN_PRE_EMPHASIS_0;
1613		}
1614	}
1615}
1616
1617static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
1618{
1619	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1620	struct drm_i915_private *dev_priv = dev->dev_private;
1621	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
1622	unsigned long demph_reg_value, preemph_reg_value,
1623		uniqtranscale_reg_value;
1624	uint8_t train_set = intel_dp->train_set[0];
1625	int port = vlv_dport_to_channel(dport);
1626
1627	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
1628	case DP_TRAIN_PRE_EMPHASIS_0:
1629		preemph_reg_value = 0x0004000;
1630		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
1631		case DP_TRAIN_VOLTAGE_SWING_400:
1632			demph_reg_value = 0x2B405555;
1633			uniqtranscale_reg_value = 0x552AB83A;
1634			break;
1635		case DP_TRAIN_VOLTAGE_SWING_600:
1636			demph_reg_value = 0x2B404040;
1637			uniqtranscale_reg_value = 0x5548B83A;
1638			break;
1639		case DP_TRAIN_VOLTAGE_SWING_800:
1640			demph_reg_value = 0x2B245555;
1641			uniqtranscale_reg_value = 0x5560B83A;
1642			break;
1643		case DP_TRAIN_VOLTAGE_SWING_1200:
1644			demph_reg_value = 0x2B405555;
1645			uniqtranscale_reg_value = 0x5598DA3A;
1646			break;
1647		default:
1648			return 0;
1649		}
1650		break;
1651	case DP_TRAIN_PRE_EMPHASIS_3_5:
1652		preemph_reg_value = 0x0002000;
1653		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
1654		case DP_TRAIN_VOLTAGE_SWING_400:
1655			demph_reg_value = 0x2B404040;
1656			uniqtranscale_reg_value = 0x5552B83A;
1657			break;
1658		case DP_TRAIN_VOLTAGE_SWING_600:
1659			demph_reg_value = 0x2B404848;
1660			uniqtranscale_reg_value = 0x5580B83A;
1661			break;
1662		case DP_TRAIN_VOLTAGE_SWING_800:
1663			demph_reg_value = 0x2B404040;
1664			uniqtranscale_reg_value = 0x55ADDA3A;
1665			break;
1666		default:
1667			return 0;
1668		}
1669		break;
1670	case DP_TRAIN_PRE_EMPHASIS_6:
1671		preemph_reg_value = 0x0000000;
1672		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
1673		case DP_TRAIN_VOLTAGE_SWING_400:
1674			demph_reg_value = 0x2B305555;
1675			uniqtranscale_reg_value = 0x5570B83A;
1676			break;
1677		case DP_TRAIN_VOLTAGE_SWING_600:
1678			demph_reg_value = 0x2B2B4040;
1679			uniqtranscale_reg_value = 0x55ADDA3A;
1680			break;
1681		default:
1682			return 0;
1683		}
1684		break;
1685	case DP_TRAIN_PRE_EMPHASIS_9_5:
1686		preemph_reg_value = 0x0006000;
1687		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
1688		case DP_TRAIN_VOLTAGE_SWING_400:
1689			demph_reg_value = 0x1B405555;
1690			uniqtranscale_reg_value = 0x55ADDA3A;
1691			break;
1692		default:
1693			return 0;
1694		}
1695		break;
1696	default:
1697		return 0;
1698	}
1699
1700	vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x00000000);
1701	vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port), demph_reg_value);
1702	vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port),
1703			 uniqtranscale_reg_value);
1704	vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL3(port), 0x0C782040);
1705	vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000);
1706	vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port), preemph_reg_value);
1707	vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x80000000);
1708
1709	return 0;
1710}
1711
1712static void
1713intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
1714{
1715	uint8_t v = 0;
1716	uint8_t p = 0;
1717	int lane;
1718	uint8_t voltage_max;
1719	uint8_t preemph_max;
1720
1721	for (lane = 0; lane < intel_dp->lane_count; lane++) {
1722		uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
1723		uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
1724
1725		if (this_v > v)
1726			v = this_v;
1727		if (this_p > p)
1728			p = this_p;
1729	}
1730
1731	voltage_max = intel_dp_voltage_max(intel_dp);
1732	if (v >= voltage_max)
1733		v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
1734
1735	preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
1736	if (p >= preemph_max)
1737		p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
1738
1739	for (lane = 0; lane < 4; lane++)
1740		intel_dp->train_set[lane] = v | p;
1741}
1742
1743static uint32_t
1744intel_gen4_signal_levels(uint8_t train_set)
1745{
1746	uint32_t	signal_levels = 0;
1747
1748	switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
1749	case DP_TRAIN_VOLTAGE_SWING_400:
1750	default:
1751		signal_levels |= DP_VOLTAGE_0_4;
1752		break;
1753	case DP_TRAIN_VOLTAGE_SWING_600:
1754		signal_levels |= DP_VOLTAGE_0_6;
1755		break;
1756	case DP_TRAIN_VOLTAGE_SWING_800:
1757		signal_levels |= DP_VOLTAGE_0_8;
1758		break;
1759	case DP_TRAIN_VOLTAGE_SWING_1200:
1760		signal_levels |= DP_VOLTAGE_1_2;
1761		break;
1762	}
1763	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
1764	case DP_TRAIN_PRE_EMPHASIS_0:
1765	default:
1766		signal_levels |= DP_PRE_EMPHASIS_0;
1767		break;
1768	case DP_TRAIN_PRE_EMPHASIS_3_5:
1769		signal_levels |= DP_PRE_EMPHASIS_3_5;
1770		break;
1771	case DP_TRAIN_PRE_EMPHASIS_6:
1772		signal_levels |= DP_PRE_EMPHASIS_6;
1773		break;
1774	case DP_TRAIN_PRE_EMPHASIS_9_5:
1775		signal_levels |= DP_PRE_EMPHASIS_9_5;
1776		break;
1777	}
1778	return signal_levels;
1779}
1780
1781/* Gen6's DP voltage swing and pre-emphasis control */
1782static uint32_t
1783intel_gen6_edp_signal_levels(uint8_t train_set)
1784{
1785	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
1786					 DP_TRAIN_PRE_EMPHASIS_MASK);
1787	switch (signal_levels) {
1788	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
1789	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
1790		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
1791	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
1792		return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
1793	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
1794	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
1795		return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
1796	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
1797	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
1798		return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
1799	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
1800	case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
1801		return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
1802	default:
1803		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
1804			      "0x%x\n", signal_levels);
1805		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
1806	}
1807}
1808
1809/* Gen7's DP voltage swing and pre-emphasis control */
1810static uint32_t
1811intel_gen7_edp_signal_levels(uint8_t train_set)
1812{
1813	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
1814					 DP_TRAIN_PRE_EMPHASIS_MASK);
1815	switch (signal_levels) {
1816	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
1817		return EDP_LINK_TRAIN_400MV_0DB_IVB;
1818	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
1819		return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
1820	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
1821		return EDP_LINK_TRAIN_400MV_6DB_IVB;
1822
1823	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
1824		return EDP_LINK_TRAIN_600MV_0DB_IVB;
1825	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
1826		return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
1827
1828	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
1829		return EDP_LINK_TRAIN_800MV_0DB_IVB;
1830	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
1831		return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
1832
1833	default:
1834		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
1835			      "0x%x\n", signal_levels);
1836		return EDP_LINK_TRAIN_500MV_0DB_IVB;
1837	}
1838}
1839
1840/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
1841static uint32_t
1842intel_hsw_signal_levels(uint8_t train_set)
1843{
1844	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
1845					 DP_TRAIN_PRE_EMPHASIS_MASK);
1846	switch (signal_levels) {
1847	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
1848		return DDI_BUF_EMP_400MV_0DB_HSW;
1849	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
1850		return DDI_BUF_EMP_400MV_3_5DB_HSW;
1851	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
1852		return DDI_BUF_EMP_400MV_6DB_HSW;
1853	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5:
1854		return DDI_BUF_EMP_400MV_9_5DB_HSW;
1855
1856	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
1857		return DDI_BUF_EMP_600MV_0DB_HSW;
1858	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
1859		return DDI_BUF_EMP_600MV_3_5DB_HSW;
1860	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
1861		return DDI_BUF_EMP_600MV_6DB_HSW;
1862
1863	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
1864		return DDI_BUF_EMP_800MV_0DB_HSW;
1865	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
1866		return DDI_BUF_EMP_800MV_3_5DB_HSW;
1867	default:
1868		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
1869			      "0x%x\n", signal_levels);
1870		return DDI_BUF_EMP_400MV_0DB_HSW;
1871	}
1872}
1873
1874/* Properly updates "DP" with the correct signal levels. */
1875static void
1876intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
1877{
1878	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1879	enum port port = intel_dig_port->port;
1880	struct drm_device *dev = intel_dig_port->base.base.dev;
1881	uint32_t signal_levels, mask;
1882	uint8_t train_set = intel_dp->train_set[0];
1883
1884	if (HAS_DDI(dev)) {
1885		signal_levels = intel_hsw_signal_levels(train_set);
1886		mask = DDI_BUF_EMP_MASK;
1887	} else if (IS_VALLEYVIEW(dev)) {
1888		signal_levels = intel_vlv_signal_levels(intel_dp);
1889		mask = 0;
1890	} else if (IS_GEN7(dev) && port == PORT_A) {
1891		signal_levels = intel_gen7_edp_signal_levels(train_set);
1892		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
1893	} else if (IS_GEN6(dev) && port == PORT_A) {
1894		signal_levels = intel_gen6_edp_signal_levels(train_set);
1895		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
1896	} else {
1897		signal_levels = intel_gen4_signal_levels(train_set);
1898		mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
1899	}
1900
1901	DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
1902
1903	*DP = (*DP & ~mask) | signal_levels;
1904}
1905
1906static bool
1907intel_dp_set_link_train(struct intel_dp *intel_dp,
1908			uint32_t dp_reg_value,
1909			uint8_t dp_train_pat)
1910{
1911	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1912	struct drm_device *dev = intel_dig_port->base.base.dev;
1913	struct drm_i915_private *dev_priv = dev->dev_private;
1914	enum port port = intel_dig_port->port;
1915	int ret;
1916
1917	if (HAS_DDI(dev)) {
1918		uint32_t temp = I915_READ(DP_TP_CTL(port));
1919
1920		if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
1921			temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
1922		else
1923			temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
1924
1925		temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
1926		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
1927		case DP_TRAINING_PATTERN_DISABLE:
1928			temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
1929
1930			break;
1931		case DP_TRAINING_PATTERN_1:
1932			temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
1933			break;
1934		case DP_TRAINING_PATTERN_2:
1935			temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
1936			break;
1937		case DP_TRAINING_PATTERN_3:
1938			temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
1939			break;
1940		}
1941		I915_WRITE(DP_TP_CTL(port), temp);
1942
1943	} else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
1944		dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
1945
1946		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
1947		case DP_TRAINING_PATTERN_DISABLE:
1948			dp_reg_value |= DP_LINK_TRAIN_OFF_CPT;
1949			break;
1950		case DP_TRAINING_PATTERN_1:
1951			dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT;
1952			break;
1953		case DP_TRAINING_PATTERN_2:
1954			dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
1955			break;
1956		case DP_TRAINING_PATTERN_3:
1957			DRM_ERROR("DP training pattern 3 not supported\n");
1958			dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
1959			break;
1960		}
1961
1962	} else {
1963		dp_reg_value &= ~DP_LINK_TRAIN_MASK;
1964
1965		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
1966		case DP_TRAINING_PATTERN_DISABLE:
1967			dp_reg_value |= DP_LINK_TRAIN_OFF;
1968			break;
1969		case DP_TRAINING_PATTERN_1:
1970			dp_reg_value |= DP_LINK_TRAIN_PAT_1;
1971			break;
1972		case DP_TRAINING_PATTERN_2:
1973			dp_reg_value |= DP_LINK_TRAIN_PAT_2;
1974			break;
1975		case DP_TRAINING_PATTERN_3:
1976			DRM_ERROR("DP training pattern 3 not supported\n");
1977			dp_reg_value |= DP_LINK_TRAIN_PAT_2;
1978			break;
1979		}
1980	}
1981
1982	I915_WRITE(intel_dp->output_reg, dp_reg_value);
1983	POSTING_READ(intel_dp->output_reg);
1984
1985	intel_dp_aux_native_write_1(intel_dp,
1986				    DP_TRAINING_PATTERN_SET,
1987				    dp_train_pat);
1988
1989	if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) !=
1990	    DP_TRAINING_PATTERN_DISABLE) {
1991		ret = intel_dp_aux_native_write(intel_dp,
1992						DP_TRAINING_LANE0_SET,
1993						intel_dp->train_set,
1994						intel_dp->lane_count);
1995		if (ret != intel_dp->lane_count)
1996			return false;
1997	}
1998
1999	return true;
2000}
2001
2002static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
2003{
2004	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2005	struct drm_device *dev = intel_dig_port->base.base.dev;
2006	struct drm_i915_private *dev_priv = dev->dev_private;
2007	enum port port = intel_dig_port->port;
2008	uint32_t val;
2009
2010	if (!HAS_DDI(dev))
2011		return;
2012
2013	val = I915_READ(DP_TP_CTL(port));
2014	val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2015	val |= DP_TP_CTL_LINK_TRAIN_IDLE;
2016	I915_WRITE(DP_TP_CTL(port), val);
2017
2018	/*
2019	 * On PORT_A we can have only eDP in SST mode. There the only reason
2020	 * we need to set idle transmission mode is to work around a HW issue
2021	 * where we enable the pipe while not in idle link-training mode.
2022	 * In this case there is requirement to wait for a minimum number of
2023	 * idle patterns to be sent.
2024	 */
2025	if (port == PORT_A)
2026		return;
2027
2028	if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
2029		     1))
2030		DRM_ERROR("Timed out waiting for DP idle patterns\n");
2031}
2032
2033/* Enable corresponding port and start training pattern 1 */
2034void
2035intel_dp_start_link_train(struct intel_dp *intel_dp)
2036{
2037	struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
2038	struct drm_device *dev = encoder->dev;
2039	int i;
2040	uint8_t voltage;
2041	bool clock_recovery = false;
2042	int voltage_tries, loop_tries;
2043	uint32_t DP = intel_dp->DP;
2044
2045	if (HAS_DDI(dev))
2046		intel_ddi_prepare_link_retrain(encoder);
2047
2048	/* Write the link configuration data */
2049	intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
2050				  intel_dp->link_configuration,
2051				  DP_LINK_CONFIGURATION_SIZE);
2052
2053	DP |= DP_PORT_EN;
2054
2055	memset(intel_dp->train_set, 0, 4);
2056	voltage = 0xff;
2057	voltage_tries = 0;
2058	loop_tries = 0;
2059	clock_recovery = false;
2060	for (;;) {
2061		/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
2062		uint8_t	    link_status[DP_LINK_STATUS_SIZE];
2063
2064		intel_dp_set_signal_levels(intel_dp, &DP);
2065
2066		/* Set training pattern 1 */
2067		if (!intel_dp_set_link_train(intel_dp, DP,
2068					     DP_TRAINING_PATTERN_1 |
2069					     DP_LINK_SCRAMBLING_DISABLE))
2070			break;
2071
2072		drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
2073		if (!intel_dp_get_link_status(intel_dp, link_status)) {
2074			DRM_ERROR("failed to get link status\n");
2075			break;
2076		}
2077
2078		if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
2079			DRM_DEBUG_KMS("clock recovery OK\n");
2080			clock_recovery = true;
2081			break;
2082		}
2083
2084		/* Check to see if we've tried the max voltage */
2085		for (i = 0; i < intel_dp->lane_count; i++)
2086			if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
2087				break;
2088		if (i == intel_dp->lane_count) {
2089			++loop_tries;
2090			if (loop_tries == 5) {
2091				DRM_DEBUG_KMS("too many full retries, give up\n");
2092				break;
2093			}
2094			memset(intel_dp->train_set, 0, 4);
2095			voltage_tries = 0;
2096			continue;
2097		}
2098
2099		/* Check to see if we've tried the same voltage 5 times */
2100		if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
2101			++voltage_tries;
2102			if (voltage_tries == 5) {
2103				DRM_DEBUG_KMS("too many voltage retries, give up\n");
2104				break;
2105			}
2106		} else
2107			voltage_tries = 0;
2108		voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
2109
2110		/* Compute new intel_dp->train_set as requested by target */
2111		intel_get_adjust_train(intel_dp, link_status);
2112	}
2113
2114	intel_dp->DP = DP;
2115}
2116
2117void
2118intel_dp_complete_link_train(struct intel_dp *intel_dp)
2119{
2120	bool channel_eq = false;
2121	int tries, cr_tries;
2122	uint32_t DP = intel_dp->DP;
2123
2124	/* channel equalization */
2125	tries = 0;
2126	cr_tries = 0;
2127	channel_eq = false;
2128	for (;;) {
2129		uint8_t	    link_status[DP_LINK_STATUS_SIZE];
2130
2131		if (cr_tries > 5) {
2132			DRM_ERROR("failed to train DP, aborting\n");
2133			intel_dp_link_down(intel_dp);
2134			break;
2135		}
2136
2137		intel_dp_set_signal_levels(intel_dp, &DP);
2138
2139		/* channel eq pattern */
2140		if (!intel_dp_set_link_train(intel_dp, DP,
2141					     DP_TRAINING_PATTERN_2 |
2142					     DP_LINK_SCRAMBLING_DISABLE))
2143			break;
2144
2145		drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
2146		if (!intel_dp_get_link_status(intel_dp, link_status))
2147			break;
2148
2149		/* Make sure clock is still ok */
2150		if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
2151			intel_dp_start_link_train(intel_dp);
2152			cr_tries++;
2153			continue;
2154		}
2155
2156		if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
2157			channel_eq = true;
2158			break;
2159		}
2160
2161		/* Try 5 times, then try clock recovery if that fails */
2162		if (tries > 5) {
2163			intel_dp_link_down(intel_dp);
2164			intel_dp_start_link_train(intel_dp);
2165			tries = 0;
2166			cr_tries++;
2167			continue;
2168		}
2169
2170		/* Compute new intel_dp->train_set as requested by target */
2171		intel_get_adjust_train(intel_dp, link_status);
2172		++tries;
2173	}
2174
2175	intel_dp_set_idle_link_train(intel_dp);
2176
2177	intel_dp->DP = DP;
2178
2179	if (channel_eq)
2180		DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
2181
2182}
2183
2184void intel_dp_stop_link_train(struct intel_dp *intel_dp)
2185{
2186	intel_dp_set_link_train(intel_dp, intel_dp->DP,
2187				DP_TRAINING_PATTERN_DISABLE);
2188}
2189
2190static void
2191intel_dp_link_down(struct intel_dp *intel_dp)
2192{
2193	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2194	enum port port = intel_dig_port->port;
2195	struct drm_device *dev = intel_dig_port->base.base.dev;
2196	struct drm_i915_private *dev_priv = dev->dev_private;
2197	struct intel_crtc *intel_crtc =
2198		to_intel_crtc(intel_dig_port->base.base.crtc);
2199	uint32_t DP = intel_dp->DP;
2200
2201	/*
2202	 * DDI code has a strict mode set sequence and we should try to respect
2203	 * it, otherwise we might hang the machine in many different ways. So we
2204	 * really should be disabling the port only on a complete crtc_disable
2205	 * sequence. This function is just called under two conditions on DDI
2206	 * code:
2207	 * - Link train failed while doing crtc_enable, and on this case we
2208	 *   really should respect the mode set sequence and wait for a
2209	 *   crtc_disable.
2210	 * - Someone turned the monitor off and intel_dp_check_link_status
2211	 *   called us. We don't need to disable the whole port on this case, so
2212	 *   when someone turns the monitor on again,
2213	 *   intel_ddi_prepare_link_retrain will take care of redoing the link
2214	 *   train.
2215	 */
2216	if (HAS_DDI(dev))
2217		return;
2218
2219	if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
2220		return;
2221
2222	DRM_DEBUG_KMS("\n");
2223
2224	if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2225		DP &= ~DP_LINK_TRAIN_MASK_CPT;
2226		I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
2227	} else {
2228		DP &= ~DP_LINK_TRAIN_MASK;
2229		I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
2230	}
2231	POSTING_READ(intel_dp->output_reg);
2232
2233	/* We don't really know why we're doing this */
2234	intel_wait_for_vblank(dev, intel_crtc->pipe);
2235
2236	if (HAS_PCH_IBX(dev) &&
2237	    I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
2238		struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2239
2240		/* Hardware workaround: leaving our transcoder select
2241		 * set to transcoder B while it's off will prevent the
2242		 * corresponding HDMI output on transcoder A.
2243		 *
2244		 * Combine this with another hardware workaround:
2245		 * transcoder select bit can only be cleared while the
2246		 * port is enabled.
2247		 */
2248		DP &= ~DP_PIPEB_SELECT;
2249		I915_WRITE(intel_dp->output_reg, DP);
2250
2251		/* Changes to enable or select take place the vblank
2252		 * after being written.
2253		 */
2254		if (WARN_ON(crtc == NULL)) {
2255			/* We should never try to disable a port without a crtc
2256			 * attached. For paranoia keep the code around for a
2257			 * bit. */
2258			POSTING_READ(intel_dp->output_reg);
2259			msleep(50);
2260		} else
2261			intel_wait_for_vblank(dev, intel_crtc->pipe);
2262	}
2263
2264	DP &= ~DP_AUDIO_OUTPUT_ENABLE;
2265	I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
2266	POSTING_READ(intel_dp->output_reg);
2267	msleep(intel_dp->panel_power_down_delay);
2268}
2269
2270static bool
2271intel_dp_get_dpcd(struct intel_dp *intel_dp)
2272{
2273	char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
2274
2275	if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
2276					   sizeof(intel_dp->dpcd)) == 0)
2277		return false; /* aux transfer failed */
2278
2279	hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
2280			   32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
2281	DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
2282
2283	if (intel_dp->dpcd[DP_DPCD_REV] == 0)
2284		return false; /* DPCD not present */
2285
2286	/* Check if the panel supports PSR */
2287	memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
2288	intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT,
2289				       intel_dp->psr_dpcd,
2290				       sizeof(intel_dp->psr_dpcd));
2291	if (is_edp_psr(intel_dp))
2292		DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
2293	if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
2294	      DP_DWN_STRM_PORT_PRESENT))
2295		return true; /* native DP sink */
2296
2297	if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
2298		return true; /* no per-port downstream info */
2299
2300	if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0,
2301					   intel_dp->downstream_ports,
2302					   DP_MAX_DOWNSTREAM_PORTS) == 0)
2303		return false; /* downstream port status fetch failed */
2304
2305	return true;
2306}
2307
2308static void
2309intel_dp_probe_oui(struct intel_dp *intel_dp)
2310{
2311	u8 buf[3];
2312
2313	if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
2314		return;
2315
2316	ironlake_edp_panel_vdd_on(intel_dp);
2317
2318	if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
2319		DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
2320			      buf[0], buf[1], buf[2]);
2321
2322	if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3))
2323		DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
2324			      buf[0], buf[1], buf[2]);
2325
2326	ironlake_edp_panel_vdd_off(intel_dp, false);
2327}
2328
2329static bool
2330intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
2331{
2332	int ret;
2333
2334	ret = intel_dp_aux_native_read_retry(intel_dp,
2335					     DP_DEVICE_SERVICE_IRQ_VECTOR,
2336					     sink_irq_vector, 1);
2337	if (!ret)
2338		return false;
2339
2340	return true;
2341}
2342
2343static void
2344intel_dp_handle_test_request(struct intel_dp *intel_dp)
2345{
2346	/* NAK by default */
2347	intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK);
2348}
2349
2350/*
2351 * According to DP spec
2352 * 5.1.2:
2353 *  1. Read DPCD
2354 *  2. Configure link according to Receiver Capabilities
2355 *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
2356 *  4. Check link status on receipt of hot-plug interrupt
2357 */
2358
2359void
2360intel_dp_check_link_status(struct intel_dp *intel_dp)
2361{
2362	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
2363	u8 sink_irq_vector;
2364	u8 link_status[DP_LINK_STATUS_SIZE];
2365
2366	if (!intel_encoder->connectors_active)
2367		return;
2368
2369	if (WARN_ON(!intel_encoder->base.crtc))
2370		return;
2371
2372	/* Try to read receiver status if the link appears to be up */
2373	if (!intel_dp_get_link_status(intel_dp, link_status)) {
2374		intel_dp_link_down(intel_dp);
2375		return;
2376	}
2377
2378	/* Now read the DPCD to see if it's actually running */
2379	if (!intel_dp_get_dpcd(intel_dp)) {
2380		intel_dp_link_down(intel_dp);
2381		return;
2382	}
2383
2384	/* Try to read the source of the interrupt */
2385	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
2386	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
2387		/* Clear interrupt source */
2388		intel_dp_aux_native_write_1(intel_dp,
2389					    DP_DEVICE_SERVICE_IRQ_VECTOR,
2390					    sink_irq_vector);
2391
2392		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
2393			intel_dp_handle_test_request(intel_dp);
2394		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
2395			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
2396	}
2397
2398	if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
2399		DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
2400			      drm_get_encoder_name(&intel_encoder->base));
2401		intel_dp_start_link_train(intel_dp);
2402		intel_dp_complete_link_train(intel_dp);
2403		intel_dp_stop_link_train(intel_dp);
2404	}
2405}
2406
2407/* XXX this is probably wrong for multiple downstream ports */
2408static enum drm_connector_status
2409intel_dp_detect_dpcd(struct intel_dp *intel_dp)
2410{
2411	uint8_t *dpcd = intel_dp->dpcd;
2412	bool hpd;
2413	uint8_t type;
2414
2415	if (!intel_dp_get_dpcd(intel_dp))
2416		return connector_status_disconnected;
2417
2418	/* if there's no downstream port, we're done */
2419	if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
2420		return connector_status_connected;
2421
2422	/* If we're HPD-aware, SINK_COUNT changes dynamically */
2423	hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD);
2424	if (hpd) {
2425		uint8_t reg;
2426		if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT,
2427						    &reg, 1))
2428			return connector_status_unknown;
2429		return DP_GET_SINK_COUNT(reg) ? connector_status_connected
2430					      : connector_status_disconnected;
2431	}
2432
2433	/* If no HPD, poke DDC gently */
2434	if (drm_probe_ddc(&intel_dp->adapter))
2435		return connector_status_connected;
2436
2437	/* Well we tried, say unknown for unreliable port types */
2438	type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
2439	if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID)
2440		return connector_status_unknown;
2441
2442	/* Anything else is out of spec, warn and ignore */
2443	DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
2444	return connector_status_disconnected;
2445}
2446
2447static enum drm_connector_status
2448ironlake_dp_detect(struct intel_dp *intel_dp)
2449{
2450	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2451	struct drm_i915_private *dev_priv = dev->dev_private;
2452	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2453	enum drm_connector_status status;
2454
2455	/* Can't disconnect eDP, but you can close the lid... */
2456	if (is_edp(intel_dp)) {
2457		status = intel_panel_detect(dev);
2458		if (status == connector_status_unknown)
2459			status = connector_status_connected;
2460		return status;
2461	}
2462
2463	if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
2464		return connector_status_disconnected;
2465
2466	return intel_dp_detect_dpcd(intel_dp);
2467}
2468
2469static enum drm_connector_status
2470g4x_dp_detect(struct intel_dp *intel_dp)
2471{
2472	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2473	struct drm_i915_private *dev_priv = dev->dev_private;
2474	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2475	uint32_t bit;
2476
2477	/* Can't disconnect eDP, but you can close the lid... */
2478	if (is_edp(intel_dp)) {
2479		enum drm_connector_status status;
2480
2481		status = intel_panel_detect(dev);
2482		if (status == connector_status_unknown)
2483			status = connector_status_connected;
2484		return status;
2485	}
2486
2487	switch (intel_dig_port->port) {
2488	case PORT_B:
2489		bit = PORTB_HOTPLUG_LIVE_STATUS;
2490		break;
2491	case PORT_C:
2492		bit = PORTC_HOTPLUG_LIVE_STATUS;
2493		break;
2494	case PORT_D:
2495		bit = PORTD_HOTPLUG_LIVE_STATUS;
2496		break;
2497	default:
2498		return connector_status_unknown;
2499	}
2500
2501	if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
2502		return connector_status_disconnected;
2503
2504	return intel_dp_detect_dpcd(intel_dp);
2505}
2506
2507static struct edid *
2508intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
2509{
2510	struct intel_connector *intel_connector = to_intel_connector(connector);
2511
2512	/* use cached edid if we have one */
2513	if (intel_connector->edid) {
2514		struct edid *edid;
2515		int size;
2516
2517		/* invalid edid */
2518		if (IS_ERR(intel_connector->edid))
2519			return NULL;
2520
2521		size = (intel_connector->edid->extensions + 1) * EDID_LENGTH;
2522		edid = kmemdup(intel_connector->edid, size, GFP_KERNEL);
2523		if (!edid)
2524			return NULL;
2525
2526		return edid;
2527	}
2528
2529	return drm_get_edid(connector, adapter);
2530}
2531
2532static int
2533intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter)
2534{
2535	struct intel_connector *intel_connector = to_intel_connector(connector);
2536
2537	/* use cached edid if we have one */
2538	if (intel_connector->edid) {
2539		/* invalid edid */
2540		if (IS_ERR(intel_connector->edid))
2541			return 0;
2542
2543		return intel_connector_update_modes(connector,
2544						    intel_connector->edid);
2545	}
2546
2547	return intel_ddc_get_modes(connector, adapter);
2548}
2549
2550static enum drm_connector_status
2551intel_dp_detect(struct drm_connector *connector, bool force)
2552{
2553	struct intel_dp *intel_dp = intel_attached_dp(connector);
2554	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2555	struct intel_encoder *intel_encoder = &intel_dig_port->base;
2556	struct drm_device *dev = connector->dev;
2557	enum drm_connector_status status;
2558	struct edid *edid = NULL;
2559
2560	intel_dp->has_audio = false;
2561
2562	if (HAS_PCH_SPLIT(dev))
2563		status = ironlake_dp_detect(intel_dp);
2564	else
2565		status = g4x_dp_detect(intel_dp);
2566
2567	if (status != connector_status_connected)
2568		return status;
2569
2570	intel_dp_probe_oui(intel_dp);
2571
2572	if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
2573		intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
2574	} else {
2575		edid = intel_dp_get_edid(connector, &intel_dp->adapter);
2576		if (edid) {
2577			intel_dp->has_audio = drm_detect_monitor_audio(edid);
2578			kfree(edid);
2579		}
2580	}
2581
2582	if (intel_encoder->type != INTEL_OUTPUT_EDP)
2583		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
2584	return connector_status_connected;
2585}
2586
2587static int intel_dp_get_modes(struct drm_connector *connector)
2588{
2589	struct intel_dp *intel_dp = intel_attached_dp(connector);
2590	struct intel_connector *intel_connector = to_intel_connector(connector);
2591	struct drm_device *dev = connector->dev;
2592	int ret;
2593
2594	/* We should parse the EDID data and find out if it has an audio sink
2595	 */
2596
2597	ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter);
2598	if (ret)
2599		return ret;
2600
2601	/* if eDP has no EDID, fall back to fixed mode */
2602	if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
2603		struct drm_display_mode *mode;
2604		mode = drm_mode_duplicate(dev,
2605					  intel_connector->panel.fixed_mode);
2606		if (mode) {
2607			drm_mode_probed_add(connector, mode);
2608			return 1;
2609		}
2610	}
2611	return 0;
2612}
2613
2614static bool
2615intel_dp_detect_audio(struct drm_connector *connector)
2616{
2617	struct intel_dp *intel_dp = intel_attached_dp(connector);
2618	struct edid *edid;
2619	bool has_audio = false;
2620
2621	edid = intel_dp_get_edid(connector, &intel_dp->adapter);
2622	if (edid) {
2623		has_audio = drm_detect_monitor_audio(edid);
2624		kfree(edid);
2625	}
2626
2627	return has_audio;
2628}
2629
2630static int
2631intel_dp_set_property(struct drm_connector *connector,
2632		      struct drm_property *property,
2633		      uint64_t val)
2634{
2635	struct drm_i915_private *dev_priv = connector->dev->dev_private;
2636	struct intel_connector *intel_connector = to_intel_connector(connector);
2637	struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
2638	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2639	int ret;
2640
2641	ret = drm_object_property_set_value(&connector->base, property, val);
2642	if (ret)
2643		return ret;
2644
2645	if (property == dev_priv->force_audio_property) {
2646		int i = val;
2647		bool has_audio;
2648
2649		if (i == intel_dp->force_audio)
2650			return 0;
2651
2652		intel_dp->force_audio = i;
2653
2654		if (i == HDMI_AUDIO_AUTO)
2655			has_audio = intel_dp_detect_audio(connector);
2656		else
2657			has_audio = (i == HDMI_AUDIO_ON);
2658
2659		if (has_audio == intel_dp->has_audio)
2660			return 0;
2661
2662		intel_dp->has_audio = has_audio;
2663		goto done;
2664	}
2665
2666	if (property == dev_priv->broadcast_rgb_property) {
2667		bool old_auto = intel_dp->color_range_auto;
2668		uint32_t old_range = intel_dp->color_range;
2669
2670		switch (val) {
2671		case INTEL_BROADCAST_RGB_AUTO:
2672			intel_dp->color_range_auto = true;
2673			break;
2674		case INTEL_BROADCAST_RGB_FULL:
2675			intel_dp->color_range_auto = false;
2676			intel_dp->color_range = 0;
2677			break;
2678		case INTEL_BROADCAST_RGB_LIMITED:
2679			intel_dp->color_range_auto = false;
2680			intel_dp->color_range = DP_COLOR_RANGE_16_235;
2681			break;
2682		default:
2683			return -EINVAL;
2684		}
2685
2686		if (old_auto == intel_dp->color_range_auto &&
2687		    old_range == intel_dp->color_range)
2688			return 0;
2689
2690		goto done;
2691	}
2692
2693	if (is_edp(intel_dp) &&
2694	    property == connector->dev->mode_config.scaling_mode_property) {
2695		if (val == DRM_MODE_SCALE_NONE) {
2696			DRM_DEBUG_KMS("no scaling not supported\n");
2697			return -EINVAL;
2698		}
2699
2700		if (intel_connector->panel.fitting_mode == val) {
2701			/* the eDP scaling property is not changed */
2702			return 0;
2703		}
2704		intel_connector->panel.fitting_mode = val;
2705
2706		goto done;
2707	}
2708
2709	return -EINVAL;
2710
2711done:
2712	if (intel_encoder->base.crtc)
2713		intel_crtc_restore_mode(intel_encoder->base.crtc);
2714
2715	return 0;
2716}
2717
2718static void
2719intel_dp_connector_destroy(struct drm_connector *connector)
2720{
2721	struct intel_connector *intel_connector = to_intel_connector(connector);
2722
2723	if (!IS_ERR_OR_NULL(intel_connector->edid))
2724		kfree(intel_connector->edid);
2725
2726	/* Can't call is_edp() since the encoder may have been destroyed
2727	 * already. */
2728	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
2729		intel_panel_fini(&intel_connector->panel);
2730
2731	drm_sysfs_connector_remove(connector);
2732	drm_connector_cleanup(connector);
2733	kfree(connector);
2734}
2735
2736void intel_dp_encoder_destroy(struct drm_encoder *encoder)
2737{
2738	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
2739	struct intel_dp *intel_dp = &intel_dig_port->dp;
2740	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2741
2742	i2c_del_adapter(&intel_dp->adapter);
2743	drm_encoder_cleanup(encoder);
2744	if (is_edp(intel_dp)) {
2745		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
2746		mutex_lock(&dev->mode_config.mutex);
2747		ironlake_panel_vdd_off_sync(intel_dp);
2748		mutex_unlock(&dev->mode_config.mutex);
2749	}
2750	kfree(intel_dig_port);
2751}
2752
2753static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
2754	.mode_set = intel_dp_mode_set,
2755};
2756
2757static const struct drm_connector_funcs intel_dp_connector_funcs = {
2758	.dpms = intel_connector_dpms,
2759	.detect = intel_dp_detect,
2760	.fill_modes = drm_helper_probe_single_connector_modes,
2761	.set_property = intel_dp_set_property,
2762	.destroy = intel_dp_connector_destroy,
2763};
2764
2765static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
2766	.get_modes = intel_dp_get_modes,
2767	.mode_valid = intel_dp_mode_valid,
2768	.best_encoder = intel_best_encoder,
2769};
2770
2771static const struct drm_encoder_funcs intel_dp_enc_funcs = {
2772	.destroy = intel_dp_encoder_destroy,
2773};
2774
2775static void
2776intel_dp_hot_plug(struct intel_encoder *intel_encoder)
2777{
2778	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2779
2780	intel_dp_check_link_status(intel_dp);
2781}
2782
2783/* Return which DP Port should be selected for Transcoder DP control */
2784int
2785intel_trans_dp_port_sel(struct drm_crtc *crtc)
2786{
2787	struct drm_device *dev = crtc->dev;
2788	struct intel_encoder *intel_encoder;
2789	struct intel_dp *intel_dp;
2790
2791	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
2792		intel_dp = enc_to_intel_dp(&intel_encoder->base);
2793
2794		if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
2795		    intel_encoder->type == INTEL_OUTPUT_EDP)
2796			return intel_dp->output_reg;
2797	}
2798
2799	return -1;
2800}
2801
2802/* check the VBT to see whether the eDP is on DP-D port */
2803bool intel_dpd_is_edp(struct drm_device *dev)
2804{
2805	struct drm_i915_private *dev_priv = dev->dev_private;
2806	struct child_device_config *p_child;
2807	int i;
2808
2809	if (!dev_priv->vbt.child_dev_num)
2810		return false;
2811
2812	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
2813		p_child = dev_priv->vbt.child_dev + i;
2814
2815		if (p_child->dvo_port == PORT_IDPD &&
2816		    p_child->device_type == DEVICE_TYPE_eDP)
2817			return true;
2818	}
2819	return false;
2820}
2821
2822static void
2823intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
2824{
2825	struct intel_connector *intel_connector = to_intel_connector(connector);
2826
2827	intel_attach_force_audio_property(connector);
2828	intel_attach_broadcast_rgb_property(connector);
2829	intel_dp->color_range_auto = true;
2830
2831	if (is_edp(intel_dp)) {
2832		drm_mode_create_scaling_mode_property(connector->dev);
2833		drm_object_attach_property(
2834			&connector->base,
2835			connector->dev->mode_config.scaling_mode_property,
2836			DRM_MODE_SCALE_ASPECT);
2837		intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
2838	}
2839}
2840
2841static void
2842intel_dp_init_panel_power_sequencer(struct drm_device *dev,
2843				    struct intel_dp *intel_dp,
2844				    struct edp_power_seq *out)
2845{
2846	struct drm_i915_private *dev_priv = dev->dev_private;
2847	struct edp_power_seq cur, vbt, spec, final;
2848	u32 pp_on, pp_off, pp_div, pp;
2849	int pp_control_reg, pp_on_reg, pp_off_reg, pp_div_reg;
2850
2851	if (HAS_PCH_SPLIT(dev)) {
2852		pp_control_reg = PCH_PP_CONTROL;
2853		pp_on_reg = PCH_PP_ON_DELAYS;
2854		pp_off_reg = PCH_PP_OFF_DELAYS;
2855		pp_div_reg = PCH_PP_DIVISOR;
2856	} else {
2857		pp_control_reg = PIPEA_PP_CONTROL;
2858		pp_on_reg = PIPEA_PP_ON_DELAYS;
2859		pp_off_reg = PIPEA_PP_OFF_DELAYS;
2860		pp_div_reg = PIPEA_PP_DIVISOR;
2861	}
2862
2863	/* Workaround: Need to write PP_CONTROL with the unlock key as
2864	 * the very first thing. */
2865	pp = ironlake_get_pp_control(intel_dp);
2866	I915_WRITE(pp_control_reg, pp);
2867
2868	pp_on = I915_READ(pp_on_reg);
2869	pp_off = I915_READ(pp_off_reg);
2870	pp_div = I915_READ(pp_div_reg);
2871
2872	/* Pull timing values out of registers */
2873	cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
2874		PANEL_POWER_UP_DELAY_SHIFT;
2875
2876	cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
2877		PANEL_LIGHT_ON_DELAY_SHIFT;
2878
2879	cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
2880		PANEL_LIGHT_OFF_DELAY_SHIFT;
2881
2882	cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
2883		PANEL_POWER_DOWN_DELAY_SHIFT;
2884
2885	cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
2886		       PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
2887
2888	DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
2889		      cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
2890
2891	vbt = dev_priv->vbt.edp_pps;
2892
2893	/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
2894	 * our hw here, which are all in 100usec. */
2895	spec.t1_t3 = 210 * 10;
2896	spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
2897	spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
2898	spec.t10 = 500 * 10;
2899	/* This one is special and actually in units of 100ms, but zero
2900	 * based in the hw (so we need to add 100 ms). But the sw vbt
2901	 * table multiplies it with 1000 to make it in units of 100usec,
2902	 * too. */
2903	spec.t11_t12 = (510 + 100) * 10;
2904
2905	DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
2906		      vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
2907
2908	/* Use the max of the register settings and vbt. If both are
2909	 * unset, fall back to the spec limits. */
2910#define assign_final(field)	final.field = (max(cur.field, vbt.field) == 0 ? \
2911				       spec.field : \
2912				       max(cur.field, vbt.field))
2913	assign_final(t1_t3);
2914	assign_final(t8);
2915	assign_final(t9);
2916	assign_final(t10);
2917	assign_final(t11_t12);
2918#undef assign_final
2919
2920#define get_delay(field)	(DIV_ROUND_UP(final.field, 10))
2921	intel_dp->panel_power_up_delay = get_delay(t1_t3);
2922	intel_dp->backlight_on_delay = get_delay(t8);
2923	intel_dp->backlight_off_delay = get_delay(t9);
2924	intel_dp->panel_power_down_delay = get_delay(t10);
2925	intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
2926#undef get_delay
2927
2928	DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
2929		      intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
2930		      intel_dp->panel_power_cycle_delay);
2931
2932	DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
2933		      intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
2934
2935	if (out)
2936		*out = final;
2937}
2938
2939static void
2940intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
2941					      struct intel_dp *intel_dp,
2942					      struct edp_power_seq *seq)
2943{
2944	struct drm_i915_private *dev_priv = dev->dev_private;
2945	u32 pp_on, pp_off, pp_div, port_sel = 0;
2946	int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
2947	int pp_on_reg, pp_off_reg, pp_div_reg;
2948
2949	if (HAS_PCH_SPLIT(dev)) {
2950		pp_on_reg = PCH_PP_ON_DELAYS;
2951		pp_off_reg = PCH_PP_OFF_DELAYS;
2952		pp_div_reg = PCH_PP_DIVISOR;
2953	} else {
2954		pp_on_reg = PIPEA_PP_ON_DELAYS;
2955		pp_off_reg = PIPEA_PP_OFF_DELAYS;
2956		pp_div_reg = PIPEA_PP_DIVISOR;
2957	}
2958
2959	/* And finally store the new values in the power sequencer. */
2960	pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
2961		(seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
2962	pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
2963		 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
2964	/* Compute the divisor for the pp clock, simply match the Bspec
2965	 * formula. */
2966	pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
2967	pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
2968			<< PANEL_POWER_CYCLE_DELAY_SHIFT);
2969
2970	/* Haswell doesn't have any port selection bits for the panel
2971	 * power sequencer any more. */
2972	if (IS_VALLEYVIEW(dev)) {
2973		port_sel = I915_READ(pp_on_reg) & 0xc0000000;
2974	} else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
2975		if (dp_to_dig_port(intel_dp)->port == PORT_A)
2976			port_sel = PANEL_POWER_PORT_DP_A;
2977		else
2978			port_sel = PANEL_POWER_PORT_DP_D;
2979	}
2980
2981	pp_on |= port_sel;
2982
2983	I915_WRITE(pp_on_reg, pp_on);
2984	I915_WRITE(pp_off_reg, pp_off);
2985	I915_WRITE(pp_div_reg, pp_div);
2986
2987	DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
2988		      I915_READ(pp_on_reg),
2989		      I915_READ(pp_off_reg),
2990		      I915_READ(pp_div_reg));
2991}
2992
2993static bool intel_edp_init_connector(struct intel_dp *intel_dp,
2994				     struct intel_connector *intel_connector)
2995{
2996	struct drm_connector *connector = &intel_connector->base;
2997	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2998	struct drm_device *dev = intel_dig_port->base.base.dev;
2999	struct drm_i915_private *dev_priv = dev->dev_private;
3000	struct drm_display_mode *fixed_mode = NULL;
3001	struct edp_power_seq power_seq = { 0 };
3002	bool has_dpcd;
3003	struct drm_display_mode *scan;
3004	struct edid *edid;
3005
3006	if (!is_edp(intel_dp))
3007		return true;
3008
3009	intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
3010
3011	/* Cache DPCD and EDID for edp. */
3012	ironlake_edp_panel_vdd_on(intel_dp);
3013	has_dpcd = intel_dp_get_dpcd(intel_dp);
3014	ironlake_edp_panel_vdd_off(intel_dp, false);
3015
3016	if (has_dpcd) {
3017		if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
3018			dev_priv->no_aux_handshake =
3019				intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
3020				DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
3021	} else {
3022		/* if this fails, presume the device is a ghost */
3023		DRM_INFO("failed to retrieve link info, disabling eDP\n");
3024		return false;
3025	}
3026
3027	/* We now know it's not a ghost, init power sequence regs. */
3028	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
3029						      &power_seq);
3030
3031	ironlake_edp_panel_vdd_on(intel_dp);
3032	edid = drm_get_edid(connector, &intel_dp->adapter);
3033	if (edid) {
3034		if (drm_add_edid_modes(connector, edid)) {
3035			drm_mode_connector_update_edid_property(connector,
3036								edid);
3037			drm_edid_to_eld(connector, edid);
3038		} else {
3039			kfree(edid);
3040			edid = ERR_PTR(-EINVAL);
3041		}
3042	} else {
3043		edid = ERR_PTR(-ENOENT);
3044	}
3045	intel_connector->edid = edid;
3046
3047	/* prefer fixed mode from EDID if available */
3048	list_for_each_entry(scan, &connector->probed_modes, head) {
3049		if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
3050			fixed_mode = drm_mode_duplicate(dev, scan);
3051			break;
3052		}
3053	}
3054
3055	/* fallback to VBT if available for eDP */
3056	if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
3057		fixed_mode = drm_mode_duplicate(dev,
3058					dev_priv->vbt.lfp_lvds_vbt_mode);
3059		if (fixed_mode)
3060			fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
3061	}
3062
3063	ironlake_edp_panel_vdd_off(intel_dp, false);
3064
3065	intel_panel_init(&intel_connector->panel, fixed_mode);
3066	intel_panel_setup_backlight(connector);
3067
3068	return true;
3069}
3070
3071bool
3072intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
3073			struct intel_connector *intel_connector)
3074{
3075	struct drm_connector *connector = &intel_connector->base;
3076	struct intel_dp *intel_dp = &intel_dig_port->dp;
3077	struct intel_encoder *intel_encoder = &intel_dig_port->base;
3078	struct drm_device *dev = intel_encoder->base.dev;
3079	struct drm_i915_private *dev_priv = dev->dev_private;
3080	enum port port = intel_dig_port->port;
3081	const char *name = NULL;
3082	int type, error;
3083
3084	/* Preserve the current hw state. */
3085	intel_dp->DP = I915_READ(intel_dp->output_reg);
3086	intel_dp->attached_connector = intel_connector;
3087
3088	type = DRM_MODE_CONNECTOR_DisplayPort;
3089	/*
3090	 * FIXME : We need to initialize built-in panels before external panels.
3091	 * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup
3092	 */
3093	switch (port) {
3094	case PORT_A:
3095		type = DRM_MODE_CONNECTOR_eDP;
3096		break;
3097	case PORT_C:
3098		if (IS_VALLEYVIEW(dev))
3099			type = DRM_MODE_CONNECTOR_eDP;
3100		break;
3101	case PORT_D:
3102		if (HAS_PCH_SPLIT(dev) && intel_dpd_is_edp(dev))
3103			type = DRM_MODE_CONNECTOR_eDP;
3104		break;
3105	default:	/* silence GCC warning */
3106		break;
3107	}
3108
3109	/*
3110	 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
3111	 * for DP the encoder type can be set by the caller to
3112	 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
3113	 */
3114	if (type == DRM_MODE_CONNECTOR_eDP)
3115		intel_encoder->type = INTEL_OUTPUT_EDP;
3116
3117	DRM_DEBUG_KMS("Adding %s connector on port %c\n",
3118			type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
3119			port_name(port));
3120
3121	drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
3122	drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
3123
3124	connector->interlace_allowed = true;
3125	connector->doublescan_allowed = 0;
3126
3127	INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
3128			  ironlake_panel_vdd_work);
3129
3130	intel_connector_attach_encoder(intel_connector, intel_encoder);
3131	drm_sysfs_connector_add(connector);
3132
3133	if (HAS_DDI(dev))
3134		intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
3135	else
3136		intel_connector->get_hw_state = intel_connector_get_hw_state;
3137
3138	intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
3139	if (HAS_DDI(dev)) {
3140		switch (intel_dig_port->port) {
3141		case PORT_A:
3142			intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
3143			break;
3144		case PORT_B:
3145			intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
3146			break;
3147		case PORT_C:
3148			intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
3149			break;
3150		case PORT_D:
3151			intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
3152			break;
3153		default:
3154			BUG();
3155		}
3156	}
3157
3158	/* Set up the DDC bus. */
3159	switch (port) {
3160	case PORT_A:
3161		intel_encoder->hpd_pin = HPD_PORT_A;
3162		name = "DPDDC-A";
3163		break;
3164	case PORT_B:
3165		intel_encoder->hpd_pin = HPD_PORT_B;
3166		name = "DPDDC-B";
3167		break;
3168	case PORT_C:
3169		intel_encoder->hpd_pin = HPD_PORT_C;
3170		name = "DPDDC-C";
3171		break;
3172	case PORT_D:
3173		intel_encoder->hpd_pin = HPD_PORT_D;
3174		name = "DPDDC-D";
3175		break;
3176	default:
3177		BUG();
3178	}
3179
3180	error = intel_dp_i2c_init(intel_dp, intel_connector, name);
3181	WARN(error, "intel_dp_i2c_init failed with error %d for port %c\n",
3182	     error, port_name(port));
3183
3184	if (!intel_edp_init_connector(intel_dp, intel_connector)) {
3185		i2c_del_adapter(&intel_dp->adapter);
3186		if (is_edp(intel_dp)) {
3187			cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
3188			mutex_lock(&dev->mode_config.mutex);
3189			ironlake_panel_vdd_off_sync(intel_dp);
3190			mutex_unlock(&dev->mode_config.mutex);
3191		}
3192		drm_sysfs_connector_remove(connector);
3193		drm_connector_cleanup(connector);
3194		return false;
3195	}
3196
3197	intel_dp_add_properties(intel_dp, connector);
3198
3199	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
3200	 * 0xd.  Failure to do so will result in spurious interrupts being
3201	 * generated on the port when a cable is not attached.
3202	 */
3203	if (IS_G4X(dev) && !IS_GM45(dev)) {
3204		u32 temp = I915_READ(PEG_BAND_GAP_DATA);
3205		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
3206	}
3207
3208	return true;
3209}
3210
3211void
3212intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
3213{
3214	struct intel_digital_port *intel_dig_port;
3215	struct intel_encoder *intel_encoder;
3216	struct drm_encoder *encoder;
3217	struct intel_connector *intel_connector;
3218
3219	intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
3220	if (!intel_dig_port)
3221		return;
3222
3223	intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
3224	if (!intel_connector) {
3225		kfree(intel_dig_port);
3226		return;
3227	}
3228
3229	intel_encoder = &intel_dig_port->base;
3230	encoder = &intel_encoder->base;
3231
3232	drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
3233			 DRM_MODE_ENCODER_TMDS);
3234	drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
3235
3236	intel_encoder->compute_config = intel_dp_compute_config;
3237	intel_encoder->enable = intel_enable_dp;
3238	intel_encoder->pre_enable = intel_pre_enable_dp;
3239	intel_encoder->disable = intel_disable_dp;
3240	intel_encoder->post_disable = intel_post_disable_dp;
3241	intel_encoder->get_hw_state = intel_dp_get_hw_state;
3242	intel_encoder->get_config = intel_dp_get_config;
3243	if (IS_VALLEYVIEW(dev))
3244		intel_encoder->pre_pll_enable = intel_dp_pre_pll_enable;
3245
3246	intel_dig_port->port = port;
3247	intel_dig_port->dp.output_reg = output_reg;
3248
3249	intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
3250	intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
3251	intel_encoder->cloneable = false;
3252	intel_encoder->hot_plug = intel_dp_hot_plug;
3253
3254	if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
3255		drm_encoder_cleanup(encoder);
3256		kfree(intel_dig_port);
3257		kfree(intel_connector);
3258	}
3259}
3260