intel_dp.c revision 44f37d1f528a5b7c4703e77a710c7fa8a0e452f9
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 *    Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
29#include <linux/slab.h>
30#include <linux/export.h>
31#include <drm/drmP.h>
32#include <drm/drm_crtc.h>
33#include <drm/drm_crtc_helper.h>
34#include <drm/drm_edid.h>
35#include "intel_drv.h"
36#include <drm/i915_drm.h>
37#include "i915_drv.h"
38
39#define DP_LINK_CHECK_TIMEOUT	(10 * 1000)
40
41struct dp_link_dpll {
42	int link_bw;
43	struct dpll dpll;
44};
45
46static const struct dp_link_dpll gen4_dpll[] = {
47	{ DP_LINK_BW_1_62,
48		{ .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
49	{ DP_LINK_BW_2_7,
50		{ .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
51};
52
53static const struct dp_link_dpll pch_dpll[] = {
54	{ DP_LINK_BW_1_62,
55		{ .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
56	{ DP_LINK_BW_2_7,
57		{ .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
58};
59
60static const struct dp_link_dpll vlv_dpll[] = {
61	{ DP_LINK_BW_1_62,
62		{ .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
63	{ DP_LINK_BW_2_7,
64		{ .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
65};
66
67/*
68 * CHV supports eDP 1.4 that have  more link rates.
69 * Below only provides the fixed rate but exclude variable rate.
70 */
71static const struct dp_link_dpll chv_dpll[] = {
72	/*
73	 * CHV requires to program fractional division for m2.
74	 * m2 is stored in fixed point format using formula below
75	 * (m2_int << 22) | m2_fraction
76	 */
77	{ DP_LINK_BW_1_62,	/* m2_int = 32, m2_fraction = 1677722 */
78		{ .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
79	{ DP_LINK_BW_2_7,	/* m2_int = 27, m2_fraction = 0 */
80		{ .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
81	{ DP_LINK_BW_5_4,	/* m2_int = 27, m2_fraction = 0 */
82		{ .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
83};
84
85/**
86 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
87 * @intel_dp: DP struct
88 *
89 * If a CPU or PCH DP output is attached to an eDP panel, this function
90 * will return true, and false otherwise.
91 */
92static bool is_edp(struct intel_dp *intel_dp)
93{
94	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
95
96	return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
97}
98
99static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
100{
101	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
102
103	return intel_dig_port->base.base.dev;
104}
105
106static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
107{
108	return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
109}
110
111static void intel_dp_link_down(struct intel_dp *intel_dp);
112static bool _edp_panel_vdd_on(struct intel_dp *intel_dp);
113static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
114
115static int
116intel_dp_max_link_bw(struct intel_dp *intel_dp)
117{
118	int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
119	struct drm_device *dev = intel_dp->attached_connector->base.dev;
120
121	switch (max_link_bw) {
122	case DP_LINK_BW_1_62:
123	case DP_LINK_BW_2_7:
124		break;
125	case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
126		if ((IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) &&
127		    intel_dp->dpcd[DP_DPCD_REV] >= 0x12)
128			max_link_bw = DP_LINK_BW_5_4;
129		else
130			max_link_bw = DP_LINK_BW_2_7;
131		break;
132	default:
133		WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
134		     max_link_bw);
135		max_link_bw = DP_LINK_BW_1_62;
136		break;
137	}
138	return max_link_bw;
139}
140
141/*
142 * The units on the numbers in the next two are... bizarre.  Examples will
143 * make it clearer; this one parallels an example in the eDP spec.
144 *
145 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
146 *
147 *     270000 * 1 * 8 / 10 == 216000
148 *
149 * The actual data capacity of that configuration is 2.16Gbit/s, so the
150 * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
151 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
152 * 119000.  At 18bpp that's 2142000 kilobits per second.
153 *
154 * Thus the strange-looking division by 10 in intel_dp_link_required, to
155 * get the result in decakilobits instead of kilobits.
156 */
157
158static int
159intel_dp_link_required(int pixel_clock, int bpp)
160{
161	return (pixel_clock * bpp + 9) / 10;
162}
163
164static int
165intel_dp_max_data_rate(int max_link_clock, int max_lanes)
166{
167	return (max_link_clock * max_lanes * 8) / 10;
168}
169
170static enum drm_mode_status
171intel_dp_mode_valid(struct drm_connector *connector,
172		    struct drm_display_mode *mode)
173{
174	struct intel_dp *intel_dp = intel_attached_dp(connector);
175	struct intel_connector *intel_connector = to_intel_connector(connector);
176	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
177	int target_clock = mode->clock;
178	int max_rate, mode_rate, max_lanes, max_link_clock;
179
180	if (is_edp(intel_dp) && fixed_mode) {
181		if (mode->hdisplay > fixed_mode->hdisplay)
182			return MODE_PANEL;
183
184		if (mode->vdisplay > fixed_mode->vdisplay)
185			return MODE_PANEL;
186
187		target_clock = fixed_mode->clock;
188	}
189
190	max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
191	max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
192
193	max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
194	mode_rate = intel_dp_link_required(target_clock, 18);
195
196	if (mode_rate > max_rate)
197		return MODE_CLOCK_HIGH;
198
199	if (mode->clock < 10000)
200		return MODE_CLOCK_LOW;
201
202	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
203		return MODE_H_ILLEGAL;
204
205	return MODE_OK;
206}
207
208static uint32_t
209pack_aux(uint8_t *src, int src_bytes)
210{
211	int	i;
212	uint32_t v = 0;
213
214	if (src_bytes > 4)
215		src_bytes = 4;
216	for (i = 0; i < src_bytes; i++)
217		v |= ((uint32_t) src[i]) << ((3-i) * 8);
218	return v;
219}
220
221static void
222unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
223{
224	int i;
225	if (dst_bytes > 4)
226		dst_bytes = 4;
227	for (i = 0; i < dst_bytes; i++)
228		dst[i] = src >> ((3-i) * 8);
229}
230
231/* hrawclock is 1/4 the FSB frequency */
232static int
233intel_hrawclk(struct drm_device *dev)
234{
235	struct drm_i915_private *dev_priv = dev->dev_private;
236	uint32_t clkcfg;
237
238	/* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
239	if (IS_VALLEYVIEW(dev))
240		return 200;
241
242	clkcfg = I915_READ(CLKCFG);
243	switch (clkcfg & CLKCFG_FSB_MASK) {
244	case CLKCFG_FSB_400:
245		return 100;
246	case CLKCFG_FSB_533:
247		return 133;
248	case CLKCFG_FSB_667:
249		return 166;
250	case CLKCFG_FSB_800:
251		return 200;
252	case CLKCFG_FSB_1067:
253		return 266;
254	case CLKCFG_FSB_1333:
255		return 333;
256	/* these two are just a guess; one of them might be right */
257	case CLKCFG_FSB_1600:
258	case CLKCFG_FSB_1600_ALT:
259		return 400;
260	default:
261		return 133;
262	}
263}
264
265static void
266intel_dp_init_panel_power_sequencer(struct drm_device *dev,
267				    struct intel_dp *intel_dp,
268				    struct edp_power_seq *out);
269static void
270intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
271					      struct intel_dp *intel_dp,
272					      struct edp_power_seq *out);
273
274static enum pipe
275vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
276{
277	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
278	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
279	struct drm_device *dev = intel_dig_port->base.base.dev;
280	struct drm_i915_private *dev_priv = dev->dev_private;
281	enum port port = intel_dig_port->port;
282	enum pipe pipe;
283
284	/* modeset should have pipe */
285	if (crtc)
286		return to_intel_crtc(crtc)->pipe;
287
288	/* init time, try to find a pipe with this port selected */
289	for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
290		u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
291			PANEL_PORT_SELECT_MASK;
292		if (port_sel == PANEL_PORT_SELECT_DPB_VLV && port == PORT_B)
293			return pipe;
294		if (port_sel == PANEL_PORT_SELECT_DPC_VLV && port == PORT_C)
295			return pipe;
296	}
297
298	/* shrug */
299	return PIPE_A;
300}
301
302static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
303{
304	struct drm_device *dev = intel_dp_to_dev(intel_dp);
305
306	if (HAS_PCH_SPLIT(dev))
307		return PCH_PP_CONTROL;
308	else
309		return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
310}
311
312static u32 _pp_stat_reg(struct intel_dp *intel_dp)
313{
314	struct drm_device *dev = intel_dp_to_dev(intel_dp);
315
316	if (HAS_PCH_SPLIT(dev))
317		return PCH_PP_STATUS;
318	else
319		return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
320}
321
322static bool edp_have_panel_power(struct intel_dp *intel_dp)
323{
324	struct drm_device *dev = intel_dp_to_dev(intel_dp);
325	struct drm_i915_private *dev_priv = dev->dev_private;
326
327	return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
328}
329
330static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
331{
332	struct drm_device *dev = intel_dp_to_dev(intel_dp);
333	struct drm_i915_private *dev_priv = dev->dev_private;
334	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
335	struct intel_encoder *intel_encoder = &intel_dig_port->base;
336	enum intel_display_power_domain power_domain;
337
338	power_domain = intel_display_port_power_domain(intel_encoder);
339	return intel_display_power_enabled(dev_priv, power_domain) &&
340	       (I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0;
341}
342
343static void
344intel_dp_check_edp(struct intel_dp *intel_dp)
345{
346	struct drm_device *dev = intel_dp_to_dev(intel_dp);
347	struct drm_i915_private *dev_priv = dev->dev_private;
348
349	if (!is_edp(intel_dp))
350		return;
351
352	if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
353		WARN(1, "eDP powered off while attempting aux channel communication.\n");
354		DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
355			      I915_READ(_pp_stat_reg(intel_dp)),
356			      I915_READ(_pp_ctrl_reg(intel_dp)));
357	}
358}
359
360static uint32_t
361intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
362{
363	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
364	struct drm_device *dev = intel_dig_port->base.base.dev;
365	struct drm_i915_private *dev_priv = dev->dev_private;
366	uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
367	uint32_t status;
368	bool done;
369
370#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
371	if (has_aux_irq)
372		done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
373					  msecs_to_jiffies_timeout(10));
374	else
375		done = wait_for_atomic(C, 10) == 0;
376	if (!done)
377		DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
378			  has_aux_irq);
379#undef C
380
381	return status;
382}
383
384static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
385{
386	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
387	struct drm_device *dev = intel_dig_port->base.base.dev;
388
389	/*
390	 * The clock divider is based off the hrawclk, and would like to run at
391	 * 2MHz.  So, take the hrawclk value and divide by 2 and use that
392	 */
393	return index ? 0 : intel_hrawclk(dev) / 2;
394}
395
396static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
397{
398	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
399	struct drm_device *dev = intel_dig_port->base.base.dev;
400
401	if (index)
402		return 0;
403
404	if (intel_dig_port->port == PORT_A) {
405		if (IS_GEN6(dev) || IS_GEN7(dev))
406			return 200; /* SNB & IVB eDP input clock at 400Mhz */
407		else
408			return 225; /* eDP input clock at 450Mhz */
409	} else {
410		return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
411	}
412}
413
414static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
415{
416	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
417	struct drm_device *dev = intel_dig_port->base.base.dev;
418	struct drm_i915_private *dev_priv = dev->dev_private;
419
420	if (intel_dig_port->port == PORT_A) {
421		if (index)
422			return 0;
423		return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
424	} else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
425		/* Workaround for non-ULT HSW */
426		switch (index) {
427		case 0: return 63;
428		case 1: return 72;
429		default: return 0;
430		}
431	} else  {
432		return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
433	}
434}
435
436static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
437{
438	return index ? 0 : 100;
439}
440
441static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
442				      bool has_aux_irq,
443				      int send_bytes,
444				      uint32_t aux_clock_divider)
445{
446	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
447	struct drm_device *dev = intel_dig_port->base.base.dev;
448	uint32_t precharge, timeout;
449
450	if (IS_GEN6(dev))
451		precharge = 3;
452	else
453		precharge = 5;
454
455	if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
456		timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
457	else
458		timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
459
460	return DP_AUX_CH_CTL_SEND_BUSY |
461	       DP_AUX_CH_CTL_DONE |
462	       (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
463	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
464	       timeout |
465	       DP_AUX_CH_CTL_RECEIVE_ERROR |
466	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
467	       (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
468	       (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
469}
470
471static int
472intel_dp_aux_ch(struct intel_dp *intel_dp,
473		uint8_t *send, int send_bytes,
474		uint8_t *recv, int recv_size)
475{
476	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
477	struct drm_device *dev = intel_dig_port->base.base.dev;
478	struct drm_i915_private *dev_priv = dev->dev_private;
479	uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
480	uint32_t ch_data = ch_ctl + 4;
481	uint32_t aux_clock_divider;
482	int i, ret, recv_bytes;
483	uint32_t status;
484	int try, clock = 0;
485	bool has_aux_irq = HAS_AUX_IRQ(dev);
486	bool vdd;
487
488	vdd = _edp_panel_vdd_on(intel_dp);
489
490	/* dp aux is extremely sensitive to irq latency, hence request the
491	 * lowest possible wakeup latency and so prevent the cpu from going into
492	 * deep sleep states.
493	 */
494	pm_qos_update_request(&dev_priv->pm_qos, 0);
495
496	intel_dp_check_edp(intel_dp);
497
498	intel_aux_display_runtime_get(dev_priv);
499
500	/* Try to wait for any previous AUX channel activity */
501	for (try = 0; try < 3; try++) {
502		status = I915_READ_NOTRACE(ch_ctl);
503		if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
504			break;
505		msleep(1);
506	}
507
508	if (try == 3) {
509		WARN(1, "dp_aux_ch not started status 0x%08x\n",
510		     I915_READ(ch_ctl));
511		ret = -EBUSY;
512		goto out;
513	}
514
515	/* Only 5 data registers! */
516	if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
517		ret = -E2BIG;
518		goto out;
519	}
520
521	while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
522		u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
523							  has_aux_irq,
524							  send_bytes,
525							  aux_clock_divider);
526
527		/* Must try at least 3 times according to DP spec */
528		for (try = 0; try < 5; try++) {
529			/* Load the send data into the aux channel data registers */
530			for (i = 0; i < send_bytes; i += 4)
531				I915_WRITE(ch_data + i,
532					   pack_aux(send + i, send_bytes - i));
533
534			/* Send the command and wait for it to complete */
535			I915_WRITE(ch_ctl, send_ctl);
536
537			status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
538
539			/* Clear done status and any errors */
540			I915_WRITE(ch_ctl,
541				   status |
542				   DP_AUX_CH_CTL_DONE |
543				   DP_AUX_CH_CTL_TIME_OUT_ERROR |
544				   DP_AUX_CH_CTL_RECEIVE_ERROR);
545
546			if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
547				      DP_AUX_CH_CTL_RECEIVE_ERROR))
548				continue;
549			if (status & DP_AUX_CH_CTL_DONE)
550				break;
551		}
552		if (status & DP_AUX_CH_CTL_DONE)
553			break;
554	}
555
556	if ((status & DP_AUX_CH_CTL_DONE) == 0) {
557		DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
558		ret = -EBUSY;
559		goto out;
560	}
561
562	/* Check for timeout or receive error.
563	 * Timeouts occur when the sink is not connected
564	 */
565	if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
566		DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
567		ret = -EIO;
568		goto out;
569	}
570
571	/* Timeouts occur when the device isn't connected, so they're
572	 * "normal" -- don't fill the kernel log with these */
573	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
574		DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
575		ret = -ETIMEDOUT;
576		goto out;
577	}
578
579	/* Unload any bytes sent back from the other side */
580	recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
581		      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
582	if (recv_bytes > recv_size)
583		recv_bytes = recv_size;
584
585	for (i = 0; i < recv_bytes; i += 4)
586		unpack_aux(I915_READ(ch_data + i),
587			   recv + i, recv_bytes - i);
588
589	ret = recv_bytes;
590out:
591	pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
592	intel_aux_display_runtime_put(dev_priv);
593
594	if (vdd)
595		edp_panel_vdd_off(intel_dp, false);
596
597	return ret;
598}
599
600#define BARE_ADDRESS_SIZE	3
601#define HEADER_SIZE		(BARE_ADDRESS_SIZE + 1)
602static ssize_t
603intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
604{
605	struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
606	uint8_t txbuf[20], rxbuf[20];
607	size_t txsize, rxsize;
608	int ret;
609
610	txbuf[0] = msg->request << 4;
611	txbuf[1] = msg->address >> 8;
612	txbuf[2] = msg->address & 0xff;
613	txbuf[3] = msg->size - 1;
614
615	switch (msg->request & ~DP_AUX_I2C_MOT) {
616	case DP_AUX_NATIVE_WRITE:
617	case DP_AUX_I2C_WRITE:
618		txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
619		rxsize = 1;
620
621		if (WARN_ON(txsize > 20))
622			return -E2BIG;
623
624		memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
625
626		ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
627		if (ret > 0) {
628			msg->reply = rxbuf[0] >> 4;
629
630			/* Return payload size. */
631			ret = msg->size;
632		}
633		break;
634
635	case DP_AUX_NATIVE_READ:
636	case DP_AUX_I2C_READ:
637		txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
638		rxsize = msg->size + 1;
639
640		if (WARN_ON(rxsize > 20))
641			return -E2BIG;
642
643		ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
644		if (ret > 0) {
645			msg->reply = rxbuf[0] >> 4;
646			/*
647			 * Assume happy day, and copy the data. The caller is
648			 * expected to check msg->reply before touching it.
649			 *
650			 * Return payload size.
651			 */
652			ret--;
653			memcpy(msg->buffer, rxbuf + 1, ret);
654		}
655		break;
656
657	default:
658		ret = -EINVAL;
659		break;
660	}
661
662	return ret;
663}
664
665static void
666intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
667{
668	struct drm_device *dev = intel_dp_to_dev(intel_dp);
669	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
670	enum port port = intel_dig_port->port;
671	const char *name = NULL;
672	int ret;
673
674	switch (port) {
675	case PORT_A:
676		intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
677		name = "DPDDC-A";
678		break;
679	case PORT_B:
680		intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
681		name = "DPDDC-B";
682		break;
683	case PORT_C:
684		intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
685		name = "DPDDC-C";
686		break;
687	case PORT_D:
688		intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
689		name = "DPDDC-D";
690		break;
691	default:
692		BUG();
693	}
694
695	if (!HAS_DDI(dev))
696		intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
697
698	intel_dp->aux.name = name;
699	intel_dp->aux.dev = dev->dev;
700	intel_dp->aux.transfer = intel_dp_aux_transfer;
701
702	DRM_DEBUG_KMS("registering %s bus for %s\n", name,
703		      connector->base.kdev->kobj.name);
704
705	ret = drm_dp_aux_register_i2c_bus(&intel_dp->aux);
706	if (ret < 0) {
707		DRM_ERROR("drm_dp_aux_register_i2c_bus() for %s failed (%d)\n",
708			  name, ret);
709		return;
710	}
711
712	ret = sysfs_create_link(&connector->base.kdev->kobj,
713				&intel_dp->aux.ddc.dev.kobj,
714				intel_dp->aux.ddc.dev.kobj.name);
715	if (ret < 0) {
716		DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
717		drm_dp_aux_unregister_i2c_bus(&intel_dp->aux);
718	}
719}
720
721static void
722intel_dp_connector_unregister(struct intel_connector *intel_connector)
723{
724	struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
725
726	sysfs_remove_link(&intel_connector->base.kdev->kobj,
727			  intel_dp->aux.ddc.dev.kobj.name);
728	intel_connector_unregister(intel_connector);
729}
730
731static void
732intel_dp_set_clock(struct intel_encoder *encoder,
733		   struct intel_crtc_config *pipe_config, int link_bw)
734{
735	struct drm_device *dev = encoder->base.dev;
736	const struct dp_link_dpll *divisor = NULL;
737	int i, count = 0;
738
739	if (IS_G4X(dev)) {
740		divisor = gen4_dpll;
741		count = ARRAY_SIZE(gen4_dpll);
742	} else if (IS_HASWELL(dev)) {
743		/* Haswell has special-purpose DP DDI clocks. */
744	} else if (HAS_PCH_SPLIT(dev)) {
745		divisor = pch_dpll;
746		count = ARRAY_SIZE(pch_dpll);
747	} else if (IS_CHERRYVIEW(dev)) {
748		divisor = chv_dpll;
749		count = ARRAY_SIZE(chv_dpll);
750	} else if (IS_VALLEYVIEW(dev)) {
751		divisor = vlv_dpll;
752		count = ARRAY_SIZE(vlv_dpll);
753	}
754
755	if (divisor && count) {
756		for (i = 0; i < count; i++) {
757			if (link_bw == divisor[i].link_bw) {
758				pipe_config->dpll = divisor[i].dpll;
759				pipe_config->clock_set = true;
760				break;
761			}
762		}
763	}
764}
765
766static void
767intel_dp_set_m2_n2(struct intel_crtc *crtc, struct intel_link_m_n *m_n)
768{
769	struct drm_device *dev = crtc->base.dev;
770	struct drm_i915_private *dev_priv = dev->dev_private;
771	enum transcoder transcoder = crtc->config.cpu_transcoder;
772
773	I915_WRITE(PIPE_DATA_M2(transcoder),
774		TU_SIZE(m_n->tu) | m_n->gmch_m);
775	I915_WRITE(PIPE_DATA_N2(transcoder), m_n->gmch_n);
776	I915_WRITE(PIPE_LINK_M2(transcoder), m_n->link_m);
777	I915_WRITE(PIPE_LINK_N2(transcoder), m_n->link_n);
778}
779
780bool
781intel_dp_compute_config(struct intel_encoder *encoder,
782			struct intel_crtc_config *pipe_config)
783{
784	struct drm_device *dev = encoder->base.dev;
785	struct drm_i915_private *dev_priv = dev->dev_private;
786	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
787	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
788	enum port port = dp_to_dig_port(intel_dp)->port;
789	struct intel_crtc *intel_crtc = encoder->new_crtc;
790	struct intel_connector *intel_connector = intel_dp->attached_connector;
791	int lane_count, clock;
792	int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
793	/* Conveniently, the link BW constants become indices with a shift...*/
794	int max_clock = intel_dp_max_link_bw(intel_dp) >> 3;
795	int bpp, mode_rate;
796	static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 };
797	int link_avail, link_clock;
798
799	if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
800		pipe_config->has_pch_encoder = true;
801
802	pipe_config->has_dp_encoder = true;
803
804	if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
805		intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
806				       adjusted_mode);
807		if (!HAS_PCH_SPLIT(dev))
808			intel_gmch_panel_fitting(intel_crtc, pipe_config,
809						 intel_connector->panel.fitting_mode);
810		else
811			intel_pch_panel_fitting(intel_crtc, pipe_config,
812						intel_connector->panel.fitting_mode);
813	}
814
815	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
816		return false;
817
818	DRM_DEBUG_KMS("DP link computation with max lane count %i "
819		      "max bw %02x pixel clock %iKHz\n",
820		      max_lane_count, bws[max_clock],
821		      adjusted_mode->crtc_clock);
822
823	/* Walk through all bpp values. Luckily they're all nicely spaced with 2
824	 * bpc in between. */
825	bpp = pipe_config->pipe_bpp;
826	if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
827	    dev_priv->vbt.edp_bpp < bpp) {
828		DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
829			      dev_priv->vbt.edp_bpp);
830		bpp = dev_priv->vbt.edp_bpp;
831	}
832
833	for (; bpp >= 6*3; bpp -= 2*3) {
834		mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
835						   bpp);
836
837		for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
838			for (clock = 0; clock <= max_clock; clock++) {
839				link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
840				link_avail = intel_dp_max_data_rate(link_clock,
841								    lane_count);
842
843				if (mode_rate <= link_avail) {
844					goto found;
845				}
846			}
847		}
848	}
849
850	return false;
851
852found:
853	if (intel_dp->color_range_auto) {
854		/*
855		 * See:
856		 * CEA-861-E - 5.1 Default Encoding Parameters
857		 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
858		 */
859		if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
860			intel_dp->color_range = DP_COLOR_RANGE_16_235;
861		else
862			intel_dp->color_range = 0;
863	}
864
865	if (intel_dp->color_range)
866		pipe_config->limited_color_range = true;
867
868	intel_dp->link_bw = bws[clock];
869	intel_dp->lane_count = lane_count;
870	pipe_config->pipe_bpp = bpp;
871	pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
872
873	DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
874		      intel_dp->link_bw, intel_dp->lane_count,
875		      pipe_config->port_clock, bpp);
876	DRM_DEBUG_KMS("DP link bw required %i available %i\n",
877		      mode_rate, link_avail);
878
879	intel_link_compute_m_n(bpp, lane_count,
880			       adjusted_mode->crtc_clock,
881			       pipe_config->port_clock,
882			       &pipe_config->dp_m_n);
883
884	if (intel_connector->panel.downclock_mode != NULL &&
885		intel_dp->drrs_state.type == SEAMLESS_DRRS_SUPPORT) {
886			intel_link_compute_m_n(bpp, lane_count,
887				intel_connector->panel.downclock_mode->clock,
888				pipe_config->port_clock,
889				&pipe_config->dp_m2_n2);
890	}
891
892	intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
893
894	return true;
895}
896
897static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
898{
899	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
900	struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
901	struct drm_device *dev = crtc->base.dev;
902	struct drm_i915_private *dev_priv = dev->dev_private;
903	u32 dpa_ctl;
904
905	DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", crtc->config.port_clock);
906	dpa_ctl = I915_READ(DP_A);
907	dpa_ctl &= ~DP_PLL_FREQ_MASK;
908
909	if (crtc->config.port_clock == 162000) {
910		/* For a long time we've carried around a ILK-DevA w/a for the
911		 * 160MHz clock. If we're really unlucky, it's still required.
912		 */
913		DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
914		dpa_ctl |= DP_PLL_FREQ_160MHZ;
915		intel_dp->DP |= DP_PLL_FREQ_160MHZ;
916	} else {
917		dpa_ctl |= DP_PLL_FREQ_270MHZ;
918		intel_dp->DP |= DP_PLL_FREQ_270MHZ;
919	}
920
921	I915_WRITE(DP_A, dpa_ctl);
922
923	POSTING_READ(DP_A);
924	udelay(500);
925}
926
927static void intel_dp_mode_set(struct intel_encoder *encoder)
928{
929	struct drm_device *dev = encoder->base.dev;
930	struct drm_i915_private *dev_priv = dev->dev_private;
931	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
932	enum port port = dp_to_dig_port(intel_dp)->port;
933	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
934	struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
935
936	/*
937	 * There are four kinds of DP registers:
938	 *
939	 * 	IBX PCH
940	 * 	SNB CPU
941	 *	IVB CPU
942	 * 	CPT PCH
943	 *
944	 * IBX PCH and CPU are the same for almost everything,
945	 * except that the CPU DP PLL is configured in this
946	 * register
947	 *
948	 * CPT PCH is quite different, having many bits moved
949	 * to the TRANS_DP_CTL register instead. That
950	 * configuration happens (oddly) in ironlake_pch_enable
951	 */
952
953	/* Preserve the BIOS-computed detected bit. This is
954	 * supposed to be read-only.
955	 */
956	intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
957
958	/* Handle DP bits in common between all three register formats */
959	intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
960	intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
961
962	if (intel_dp->has_audio) {
963		DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
964				 pipe_name(crtc->pipe));
965		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
966		intel_write_eld(&encoder->base, adjusted_mode);
967	}
968
969	/* Split out the IBX/CPU vs CPT settings */
970
971	if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
972		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
973			intel_dp->DP |= DP_SYNC_HS_HIGH;
974		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
975			intel_dp->DP |= DP_SYNC_VS_HIGH;
976		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
977
978		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
979			intel_dp->DP |= DP_ENHANCED_FRAMING;
980
981		intel_dp->DP |= crtc->pipe << 29;
982	} else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
983		if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
984			intel_dp->DP |= intel_dp->color_range;
985
986		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
987			intel_dp->DP |= DP_SYNC_HS_HIGH;
988		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
989			intel_dp->DP |= DP_SYNC_VS_HIGH;
990		intel_dp->DP |= DP_LINK_TRAIN_OFF;
991
992		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
993			intel_dp->DP |= DP_ENHANCED_FRAMING;
994
995		if (!IS_CHERRYVIEW(dev)) {
996			if (crtc->pipe == 1)
997				intel_dp->DP |= DP_PIPEB_SELECT;
998		} else {
999			intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1000		}
1001	} else {
1002		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1003	}
1004
1005	if (port == PORT_A && !IS_VALLEYVIEW(dev))
1006		ironlake_set_pll_cpu_edp(intel_dp);
1007}
1008
1009#define IDLE_ON_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1010#define IDLE_ON_VALUE   	(PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
1011
1012#define IDLE_OFF_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1013#define IDLE_OFF_VALUE		(0     | PP_SEQUENCE_NONE | 0                     | 0)
1014
1015#define IDLE_CYCLE_MASK		(PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1016#define IDLE_CYCLE_VALUE	(0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1017
1018static void wait_panel_status(struct intel_dp *intel_dp,
1019				       u32 mask,
1020				       u32 value)
1021{
1022	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1023	struct drm_i915_private *dev_priv = dev->dev_private;
1024	u32 pp_stat_reg, pp_ctrl_reg;
1025
1026	pp_stat_reg = _pp_stat_reg(intel_dp);
1027	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1028
1029	DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1030			mask, value,
1031			I915_READ(pp_stat_reg),
1032			I915_READ(pp_ctrl_reg));
1033
1034	if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1035		DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1036				I915_READ(pp_stat_reg),
1037				I915_READ(pp_ctrl_reg));
1038	}
1039
1040	DRM_DEBUG_KMS("Wait complete\n");
1041}
1042
1043static void wait_panel_on(struct intel_dp *intel_dp)
1044{
1045	DRM_DEBUG_KMS("Wait for panel power on\n");
1046	wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1047}
1048
1049static void wait_panel_off(struct intel_dp *intel_dp)
1050{
1051	DRM_DEBUG_KMS("Wait for panel power off time\n");
1052	wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1053}
1054
1055static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1056{
1057	DRM_DEBUG_KMS("Wait for panel power cycle\n");
1058
1059	/* When we disable the VDD override bit last we have to do the manual
1060	 * wait. */
1061	wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1062				       intel_dp->panel_power_cycle_delay);
1063
1064	wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1065}
1066
1067static void wait_backlight_on(struct intel_dp *intel_dp)
1068{
1069	wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1070				       intel_dp->backlight_on_delay);
1071}
1072
1073static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1074{
1075	wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1076				       intel_dp->backlight_off_delay);
1077}
1078
1079/* Read the current pp_control value, unlocking the register if it
1080 * is locked
1081 */
1082
1083static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1084{
1085	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1086	struct drm_i915_private *dev_priv = dev->dev_private;
1087	u32 control;
1088
1089	control = I915_READ(_pp_ctrl_reg(intel_dp));
1090	control &= ~PANEL_UNLOCK_MASK;
1091	control |= PANEL_UNLOCK_REGS;
1092	return control;
1093}
1094
1095static bool _edp_panel_vdd_on(struct intel_dp *intel_dp)
1096{
1097	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1098	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1099	struct intel_encoder *intel_encoder = &intel_dig_port->base;
1100	struct drm_i915_private *dev_priv = dev->dev_private;
1101	enum intel_display_power_domain power_domain;
1102	u32 pp;
1103	u32 pp_stat_reg, pp_ctrl_reg;
1104	bool need_to_disable = !intel_dp->want_panel_vdd;
1105
1106	if (!is_edp(intel_dp))
1107		return false;
1108
1109	intel_dp->want_panel_vdd = true;
1110
1111	if (edp_have_panel_vdd(intel_dp))
1112		return need_to_disable;
1113
1114	power_domain = intel_display_port_power_domain(intel_encoder);
1115	intel_display_power_get(dev_priv, power_domain);
1116
1117	DRM_DEBUG_KMS("Turning eDP VDD on\n");
1118
1119	if (!edp_have_panel_power(intel_dp))
1120		wait_panel_power_cycle(intel_dp);
1121
1122	pp = ironlake_get_pp_control(intel_dp);
1123	pp |= EDP_FORCE_VDD;
1124
1125	pp_stat_reg = _pp_stat_reg(intel_dp);
1126	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1127
1128	I915_WRITE(pp_ctrl_reg, pp);
1129	POSTING_READ(pp_ctrl_reg);
1130	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1131			I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1132	/*
1133	 * If the panel wasn't on, delay before accessing aux channel
1134	 */
1135	if (!edp_have_panel_power(intel_dp)) {
1136		DRM_DEBUG_KMS("eDP was not running\n");
1137		msleep(intel_dp->panel_power_up_delay);
1138	}
1139
1140	return need_to_disable;
1141}
1142
1143void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1144{
1145	if (is_edp(intel_dp)) {
1146		bool vdd = _edp_panel_vdd_on(intel_dp);
1147
1148		WARN(!vdd, "eDP VDD already requested on\n");
1149	}
1150}
1151
1152static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1153{
1154	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1155	struct drm_i915_private *dev_priv = dev->dev_private;
1156	u32 pp;
1157	u32 pp_stat_reg, pp_ctrl_reg;
1158
1159	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
1160
1161	if (!intel_dp->want_panel_vdd && edp_have_panel_vdd(intel_dp)) {
1162		struct intel_digital_port *intel_dig_port =
1163						dp_to_dig_port(intel_dp);
1164		struct intel_encoder *intel_encoder = &intel_dig_port->base;
1165		enum intel_display_power_domain power_domain;
1166
1167		DRM_DEBUG_KMS("Turning eDP VDD off\n");
1168
1169		pp = ironlake_get_pp_control(intel_dp);
1170		pp &= ~EDP_FORCE_VDD;
1171
1172		pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1173		pp_stat_reg = _pp_stat_reg(intel_dp);
1174
1175		I915_WRITE(pp_ctrl_reg, pp);
1176		POSTING_READ(pp_ctrl_reg);
1177
1178		/* Make sure sequencer is idle before allowing subsequent activity */
1179		DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1180		I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1181
1182		if ((pp & POWER_TARGET_ON) == 0)
1183			intel_dp->last_power_cycle = jiffies;
1184
1185		power_domain = intel_display_port_power_domain(intel_encoder);
1186		intel_display_power_put(dev_priv, power_domain);
1187	}
1188}
1189
1190static void edp_panel_vdd_work(struct work_struct *__work)
1191{
1192	struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1193						 struct intel_dp, panel_vdd_work);
1194	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1195
1196	mutex_lock(&dev->mode_config.mutex);
1197	edp_panel_vdd_off_sync(intel_dp);
1198	mutex_unlock(&dev->mode_config.mutex);
1199}
1200
1201static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1202{
1203	if (!is_edp(intel_dp))
1204		return;
1205
1206	WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
1207
1208	intel_dp->want_panel_vdd = false;
1209
1210	if (sync) {
1211		edp_panel_vdd_off_sync(intel_dp);
1212	} else {
1213		/*
1214		 * Queue the timer to fire a long
1215		 * time from now (relative to the power down delay)
1216		 * to keep the panel power up across a sequence of operations
1217		 */
1218		schedule_delayed_work(&intel_dp->panel_vdd_work,
1219				      msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
1220	}
1221}
1222
1223void intel_edp_panel_on(struct intel_dp *intel_dp)
1224{
1225	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1226	struct drm_i915_private *dev_priv = dev->dev_private;
1227	u32 pp;
1228	u32 pp_ctrl_reg;
1229
1230	if (!is_edp(intel_dp))
1231		return;
1232
1233	DRM_DEBUG_KMS("Turn eDP power on\n");
1234
1235	if (edp_have_panel_power(intel_dp)) {
1236		DRM_DEBUG_KMS("eDP power already on\n");
1237		return;
1238	}
1239
1240	wait_panel_power_cycle(intel_dp);
1241
1242	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1243	pp = ironlake_get_pp_control(intel_dp);
1244	if (IS_GEN5(dev)) {
1245		/* ILK workaround: disable reset around power sequence */
1246		pp &= ~PANEL_POWER_RESET;
1247		I915_WRITE(pp_ctrl_reg, pp);
1248		POSTING_READ(pp_ctrl_reg);
1249	}
1250
1251	pp |= POWER_TARGET_ON;
1252	if (!IS_GEN5(dev))
1253		pp |= PANEL_POWER_RESET;
1254
1255	I915_WRITE(pp_ctrl_reg, pp);
1256	POSTING_READ(pp_ctrl_reg);
1257
1258	wait_panel_on(intel_dp);
1259	intel_dp->last_power_on = jiffies;
1260
1261	if (IS_GEN5(dev)) {
1262		pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1263		I915_WRITE(pp_ctrl_reg, pp);
1264		POSTING_READ(pp_ctrl_reg);
1265	}
1266}
1267
1268void intel_edp_panel_off(struct intel_dp *intel_dp)
1269{
1270	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1271	struct intel_encoder *intel_encoder = &intel_dig_port->base;
1272	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1273	struct drm_i915_private *dev_priv = dev->dev_private;
1274	enum intel_display_power_domain power_domain;
1275	u32 pp;
1276	u32 pp_ctrl_reg;
1277
1278	if (!is_edp(intel_dp))
1279		return;
1280
1281	DRM_DEBUG_KMS("Turn eDP power off\n");
1282
1283	edp_wait_backlight_off(intel_dp);
1284
1285	WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
1286
1287	pp = ironlake_get_pp_control(intel_dp);
1288	/* We need to switch off panel power _and_ force vdd, for otherwise some
1289	 * panels get very unhappy and cease to work. */
1290	pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1291		EDP_BLC_ENABLE);
1292
1293	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1294
1295	intel_dp->want_panel_vdd = false;
1296
1297	I915_WRITE(pp_ctrl_reg, pp);
1298	POSTING_READ(pp_ctrl_reg);
1299
1300	intel_dp->last_power_cycle = jiffies;
1301	wait_panel_off(intel_dp);
1302
1303	/* We got a reference when we enabled the VDD. */
1304	power_domain = intel_display_port_power_domain(intel_encoder);
1305	intel_display_power_put(dev_priv, power_domain);
1306}
1307
1308void intel_edp_backlight_on(struct intel_dp *intel_dp)
1309{
1310	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1311	struct drm_device *dev = intel_dig_port->base.base.dev;
1312	struct drm_i915_private *dev_priv = dev->dev_private;
1313	u32 pp;
1314	u32 pp_ctrl_reg;
1315
1316	if (!is_edp(intel_dp))
1317		return;
1318
1319	DRM_DEBUG_KMS("\n");
1320	/*
1321	 * If we enable the backlight right away following a panel power
1322	 * on, we may see slight flicker as the panel syncs with the eDP
1323	 * link.  So delay a bit to make sure the image is solid before
1324	 * allowing it to appear.
1325	 */
1326	wait_backlight_on(intel_dp);
1327	pp = ironlake_get_pp_control(intel_dp);
1328	pp |= EDP_BLC_ENABLE;
1329
1330	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1331
1332	I915_WRITE(pp_ctrl_reg, pp);
1333	POSTING_READ(pp_ctrl_reg);
1334
1335	intel_panel_enable_backlight(intel_dp->attached_connector);
1336}
1337
1338void intel_edp_backlight_off(struct intel_dp *intel_dp)
1339{
1340	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1341	struct drm_i915_private *dev_priv = dev->dev_private;
1342	u32 pp;
1343	u32 pp_ctrl_reg;
1344
1345	if (!is_edp(intel_dp))
1346		return;
1347
1348	intel_panel_disable_backlight(intel_dp->attached_connector);
1349
1350	DRM_DEBUG_KMS("\n");
1351	pp = ironlake_get_pp_control(intel_dp);
1352	pp &= ~EDP_BLC_ENABLE;
1353
1354	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1355
1356	I915_WRITE(pp_ctrl_reg, pp);
1357	POSTING_READ(pp_ctrl_reg);
1358	intel_dp->last_backlight_off = jiffies;
1359}
1360
1361static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
1362{
1363	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1364	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1365	struct drm_device *dev = crtc->dev;
1366	struct drm_i915_private *dev_priv = dev->dev_private;
1367	u32 dpa_ctl;
1368
1369	assert_pipe_disabled(dev_priv,
1370			     to_intel_crtc(crtc)->pipe);
1371
1372	DRM_DEBUG_KMS("\n");
1373	dpa_ctl = I915_READ(DP_A);
1374	WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
1375	WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1376
1377	/* We don't adjust intel_dp->DP while tearing down the link, to
1378	 * facilitate link retraining (e.g. after hotplug). Hence clear all
1379	 * enable bits here to ensure that we don't enable too much. */
1380	intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
1381	intel_dp->DP |= DP_PLL_ENABLE;
1382	I915_WRITE(DP_A, intel_dp->DP);
1383	POSTING_READ(DP_A);
1384	udelay(200);
1385}
1386
1387static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
1388{
1389	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1390	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1391	struct drm_device *dev = crtc->dev;
1392	struct drm_i915_private *dev_priv = dev->dev_private;
1393	u32 dpa_ctl;
1394
1395	assert_pipe_disabled(dev_priv,
1396			     to_intel_crtc(crtc)->pipe);
1397
1398	dpa_ctl = I915_READ(DP_A);
1399	WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
1400	     "dp pll off, should be on\n");
1401	WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1402
1403	/* We can't rely on the value tracked for the DP register in
1404	 * intel_dp->DP because link_down must not change that (otherwise link
1405	 * re-training will fail. */
1406	dpa_ctl &= ~DP_PLL_ENABLE;
1407	I915_WRITE(DP_A, dpa_ctl);
1408	POSTING_READ(DP_A);
1409	udelay(200);
1410}
1411
1412/* If the sink supports it, try to set the power state appropriately */
1413void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
1414{
1415	int ret, i;
1416
1417	/* Should have a valid DPCD by this point */
1418	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
1419		return;
1420
1421	if (mode != DRM_MODE_DPMS_ON) {
1422		ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
1423					 DP_SET_POWER_D3);
1424		if (ret != 1)
1425			DRM_DEBUG_DRIVER("failed to write sink power state\n");
1426	} else {
1427		/*
1428		 * When turning on, we need to retry for 1ms to give the sink
1429		 * time to wake up.
1430		 */
1431		for (i = 0; i < 3; i++) {
1432			ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
1433						 DP_SET_POWER_D0);
1434			if (ret == 1)
1435				break;
1436			msleep(1);
1437		}
1438	}
1439}
1440
1441static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
1442				  enum pipe *pipe)
1443{
1444	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1445	enum port port = dp_to_dig_port(intel_dp)->port;
1446	struct drm_device *dev = encoder->base.dev;
1447	struct drm_i915_private *dev_priv = dev->dev_private;
1448	enum intel_display_power_domain power_domain;
1449	u32 tmp;
1450
1451	power_domain = intel_display_port_power_domain(encoder);
1452	if (!intel_display_power_enabled(dev_priv, power_domain))
1453		return false;
1454
1455	tmp = I915_READ(intel_dp->output_reg);
1456
1457	if (!(tmp & DP_PORT_EN))
1458		return false;
1459
1460	if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1461		*pipe = PORT_TO_PIPE_CPT(tmp);
1462	} else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
1463		*pipe = PORT_TO_PIPE(tmp);
1464	} else {
1465		u32 trans_sel;
1466		u32 trans_dp;
1467		int i;
1468
1469		switch (intel_dp->output_reg) {
1470		case PCH_DP_B:
1471			trans_sel = TRANS_DP_PORT_SEL_B;
1472			break;
1473		case PCH_DP_C:
1474			trans_sel = TRANS_DP_PORT_SEL_C;
1475			break;
1476		case PCH_DP_D:
1477			trans_sel = TRANS_DP_PORT_SEL_D;
1478			break;
1479		default:
1480			return true;
1481		}
1482
1483		for_each_pipe(i) {
1484			trans_dp = I915_READ(TRANS_DP_CTL(i));
1485			if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
1486				*pipe = i;
1487				return true;
1488			}
1489		}
1490
1491		DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
1492			      intel_dp->output_reg);
1493	}
1494
1495	return true;
1496}
1497
1498static void intel_dp_get_config(struct intel_encoder *encoder,
1499				struct intel_crtc_config *pipe_config)
1500{
1501	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1502	u32 tmp, flags = 0;
1503	struct drm_device *dev = encoder->base.dev;
1504	struct drm_i915_private *dev_priv = dev->dev_private;
1505	enum port port = dp_to_dig_port(intel_dp)->port;
1506	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1507	int dotclock;
1508
1509	if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
1510		tmp = I915_READ(intel_dp->output_reg);
1511		if (tmp & DP_SYNC_HS_HIGH)
1512			flags |= DRM_MODE_FLAG_PHSYNC;
1513		else
1514			flags |= DRM_MODE_FLAG_NHSYNC;
1515
1516		if (tmp & DP_SYNC_VS_HIGH)
1517			flags |= DRM_MODE_FLAG_PVSYNC;
1518		else
1519			flags |= DRM_MODE_FLAG_NVSYNC;
1520	} else {
1521		tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1522		if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
1523			flags |= DRM_MODE_FLAG_PHSYNC;
1524		else
1525			flags |= DRM_MODE_FLAG_NHSYNC;
1526
1527		if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
1528			flags |= DRM_MODE_FLAG_PVSYNC;
1529		else
1530			flags |= DRM_MODE_FLAG_NVSYNC;
1531	}
1532
1533	pipe_config->adjusted_mode.flags |= flags;
1534
1535	pipe_config->has_dp_encoder = true;
1536
1537	intel_dp_get_m_n(crtc, pipe_config);
1538
1539	if (port == PORT_A) {
1540		if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
1541			pipe_config->port_clock = 162000;
1542		else
1543			pipe_config->port_clock = 270000;
1544	}
1545
1546	dotclock = intel_dotclock_calculate(pipe_config->port_clock,
1547					    &pipe_config->dp_m_n);
1548
1549	if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
1550		ironlake_check_encoder_dotclock(pipe_config, dotclock);
1551
1552	pipe_config->adjusted_mode.crtc_clock = dotclock;
1553
1554	if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
1555	    pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
1556		/*
1557		 * This is a big fat ugly hack.
1558		 *
1559		 * Some machines in UEFI boot mode provide us a VBT that has 18
1560		 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
1561		 * unknown we fail to light up. Yet the same BIOS boots up with
1562		 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
1563		 * max, not what it tells us to use.
1564		 *
1565		 * Note: This will still be broken if the eDP panel is not lit
1566		 * up by the BIOS, and thus we can't get the mode at module
1567		 * load.
1568		 */
1569		DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
1570			      pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
1571		dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
1572	}
1573}
1574
1575static bool is_edp_psr(struct drm_device *dev)
1576{
1577	struct drm_i915_private *dev_priv = dev->dev_private;
1578
1579	return dev_priv->psr.sink_support;
1580}
1581
1582static bool intel_edp_is_psr_enabled(struct drm_device *dev)
1583{
1584	struct drm_i915_private *dev_priv = dev->dev_private;
1585
1586	if (!HAS_PSR(dev))
1587		return false;
1588
1589	return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
1590}
1591
1592static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
1593				    struct edp_vsc_psr *vsc_psr)
1594{
1595	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1596	struct drm_device *dev = dig_port->base.base.dev;
1597	struct drm_i915_private *dev_priv = dev->dev_private;
1598	struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1599	u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
1600	u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
1601	uint32_t *data = (uint32_t *) vsc_psr;
1602	unsigned int i;
1603
1604	/* As per BSPec (Pipe Video Data Island Packet), we need to disable
1605	   the video DIP being updated before program video DIP data buffer
1606	   registers for DIP being updated. */
1607	I915_WRITE(ctl_reg, 0);
1608	POSTING_READ(ctl_reg);
1609
1610	for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
1611		if (i < sizeof(struct edp_vsc_psr))
1612			I915_WRITE(data_reg + i, *data++);
1613		else
1614			I915_WRITE(data_reg + i, 0);
1615	}
1616
1617	I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
1618	POSTING_READ(ctl_reg);
1619}
1620
1621static void intel_edp_psr_setup(struct intel_dp *intel_dp)
1622{
1623	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1624	struct drm_i915_private *dev_priv = dev->dev_private;
1625	struct edp_vsc_psr psr_vsc;
1626
1627	if (intel_dp->psr_setup_done)
1628		return;
1629
1630	/* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
1631	memset(&psr_vsc, 0, sizeof(psr_vsc));
1632	psr_vsc.sdp_header.HB0 = 0;
1633	psr_vsc.sdp_header.HB1 = 0x7;
1634	psr_vsc.sdp_header.HB2 = 0x2;
1635	psr_vsc.sdp_header.HB3 = 0x8;
1636	intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
1637
1638	/* Avoid continuous PSR exit by masking memup and hpd */
1639	I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
1640		   EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
1641
1642	intel_dp->psr_setup_done = true;
1643}
1644
1645static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
1646{
1647	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1648	struct drm_i915_private *dev_priv = dev->dev_private;
1649	uint32_t aux_clock_divider;
1650	int precharge = 0x3;
1651	int msg_size = 5;       /* Header(4) + Message(1) */
1652
1653	aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
1654
1655	/* Enable PSR in sink */
1656	if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT)
1657		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
1658				   DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
1659	else
1660		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
1661				   DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
1662
1663	/* Setup AUX registers */
1664	I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND);
1665	I915_WRITE(EDP_PSR_AUX_DATA2(dev), EDP_PSR_DPCD_NORMAL_OPERATION);
1666	I915_WRITE(EDP_PSR_AUX_CTL(dev),
1667		   DP_AUX_CH_CTL_TIME_OUT_400us |
1668		   (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1669		   (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
1670		   (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
1671}
1672
1673static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
1674{
1675	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1676	struct drm_i915_private *dev_priv = dev->dev_private;
1677	uint32_t max_sleep_time = 0x1f;
1678	uint32_t idle_frames = 1;
1679	uint32_t val = 0x0;
1680	const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
1681
1682	if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) {
1683		val |= EDP_PSR_LINK_STANDBY;
1684		val |= EDP_PSR_TP2_TP3_TIME_0us;
1685		val |= EDP_PSR_TP1_TIME_0us;
1686		val |= EDP_PSR_SKIP_AUX_EXIT;
1687	} else
1688		val |= EDP_PSR_LINK_DISABLE;
1689
1690	I915_WRITE(EDP_PSR_CTL(dev), val |
1691		   (IS_BROADWELL(dev) ? 0 : link_entry_time) |
1692		   max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
1693		   idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
1694		   EDP_PSR_ENABLE);
1695}
1696
1697static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
1698{
1699	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1700	struct drm_device *dev = dig_port->base.base.dev;
1701	struct drm_i915_private *dev_priv = dev->dev_private;
1702	struct drm_crtc *crtc = dig_port->base.base.crtc;
1703	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1704	struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->primary->fb)->obj;
1705	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
1706
1707	dev_priv->psr.source_ok = false;
1708
1709	if (!HAS_PSR(dev)) {
1710		DRM_DEBUG_KMS("PSR not supported on this platform\n");
1711		return false;
1712	}
1713
1714	if ((intel_encoder->type != INTEL_OUTPUT_EDP) ||
1715	    (dig_port->port != PORT_A)) {
1716		DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
1717		return false;
1718	}
1719
1720	if (!i915.enable_psr) {
1721		DRM_DEBUG_KMS("PSR disable by flag\n");
1722		return false;
1723	}
1724
1725	crtc = dig_port->base.base.crtc;
1726	if (crtc == NULL) {
1727		DRM_DEBUG_KMS("crtc not active for PSR\n");
1728		return false;
1729	}
1730
1731	intel_crtc = to_intel_crtc(crtc);
1732	if (!intel_crtc_active(crtc)) {
1733		DRM_DEBUG_KMS("crtc not active for PSR\n");
1734		return false;
1735	}
1736
1737	obj = to_intel_framebuffer(crtc->primary->fb)->obj;
1738	if (obj->tiling_mode != I915_TILING_X ||
1739	    obj->fence_reg == I915_FENCE_REG_NONE) {
1740		DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
1741		return false;
1742	}
1743
1744	if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) {
1745		DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n");
1746		return false;
1747	}
1748
1749	if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
1750	    S3D_ENABLE) {
1751		DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
1752		return false;
1753	}
1754
1755	if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
1756		DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
1757		return false;
1758	}
1759
1760	dev_priv->psr.source_ok = true;
1761	return true;
1762}
1763
1764static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
1765{
1766	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1767
1768	if (!intel_edp_psr_match_conditions(intel_dp) ||
1769	    intel_edp_is_psr_enabled(dev))
1770		return;
1771
1772	/* Setup PSR once */
1773	intel_edp_psr_setup(intel_dp);
1774
1775	/* Enable PSR on the panel */
1776	intel_edp_psr_enable_sink(intel_dp);
1777
1778	/* Enable PSR on the host */
1779	intel_edp_psr_enable_source(intel_dp);
1780}
1781
1782void intel_edp_psr_enable(struct intel_dp *intel_dp)
1783{
1784	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1785
1786	if (intel_edp_psr_match_conditions(intel_dp) &&
1787	    !intel_edp_is_psr_enabled(dev))
1788		intel_edp_psr_do_enable(intel_dp);
1789}
1790
1791void intel_edp_psr_disable(struct intel_dp *intel_dp)
1792{
1793	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1794	struct drm_i915_private *dev_priv = dev->dev_private;
1795
1796	if (!intel_edp_is_psr_enabled(dev))
1797		return;
1798
1799	I915_WRITE(EDP_PSR_CTL(dev),
1800		   I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
1801
1802	/* Wait till PSR is idle */
1803	if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
1804		       EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
1805		DRM_ERROR("Timed out waiting for PSR Idle State\n");
1806}
1807
1808void intel_edp_psr_update(struct drm_device *dev)
1809{
1810	struct intel_encoder *encoder;
1811	struct intel_dp *intel_dp = NULL;
1812
1813	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head)
1814		if (encoder->type == INTEL_OUTPUT_EDP) {
1815			intel_dp = enc_to_intel_dp(&encoder->base);
1816
1817			if (!is_edp_psr(dev))
1818				return;
1819
1820			if (!intel_edp_psr_match_conditions(intel_dp))
1821				intel_edp_psr_disable(intel_dp);
1822			else
1823				if (!intel_edp_is_psr_enabled(dev))
1824					intel_edp_psr_do_enable(intel_dp);
1825		}
1826}
1827
1828static void intel_disable_dp(struct intel_encoder *encoder)
1829{
1830	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1831	enum port port = dp_to_dig_port(intel_dp)->port;
1832	struct drm_device *dev = encoder->base.dev;
1833
1834	/* Make sure the panel is off before trying to change the mode. But also
1835	 * ensure that we have vdd while we switch off the panel. */
1836	intel_edp_panel_vdd_on(intel_dp);
1837	intel_edp_backlight_off(intel_dp);
1838	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
1839	intel_edp_panel_off(intel_dp);
1840
1841	/* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
1842	if (!(port == PORT_A || IS_VALLEYVIEW(dev)))
1843		intel_dp_link_down(intel_dp);
1844}
1845
1846static void g4x_post_disable_dp(struct intel_encoder *encoder)
1847{
1848	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1849	enum port port = dp_to_dig_port(intel_dp)->port;
1850
1851	if (port != PORT_A)
1852		return;
1853
1854	intel_dp_link_down(intel_dp);
1855	ironlake_edp_pll_off(intel_dp);
1856}
1857
1858static void vlv_post_disable_dp(struct intel_encoder *encoder)
1859{
1860	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1861
1862	intel_dp_link_down(intel_dp);
1863}
1864
1865static void intel_enable_dp(struct intel_encoder *encoder)
1866{
1867	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1868	struct drm_device *dev = encoder->base.dev;
1869	struct drm_i915_private *dev_priv = dev->dev_private;
1870	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
1871
1872	if (WARN_ON(dp_reg & DP_PORT_EN))
1873		return;
1874
1875	intel_edp_panel_vdd_on(intel_dp);
1876	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1877	intel_dp_start_link_train(intel_dp);
1878	intel_edp_panel_on(intel_dp);
1879	edp_panel_vdd_off(intel_dp, true);
1880	intel_dp_complete_link_train(intel_dp);
1881	intel_dp_stop_link_train(intel_dp);
1882}
1883
1884static void g4x_enable_dp(struct intel_encoder *encoder)
1885{
1886	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1887
1888	intel_enable_dp(encoder);
1889	intel_edp_backlight_on(intel_dp);
1890}
1891
1892static void vlv_enable_dp(struct intel_encoder *encoder)
1893{
1894	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1895
1896	intel_edp_backlight_on(intel_dp);
1897}
1898
1899static void g4x_pre_enable_dp(struct intel_encoder *encoder)
1900{
1901	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1902	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
1903
1904	if (dport->port == PORT_A)
1905		ironlake_edp_pll_on(intel_dp);
1906}
1907
1908static void vlv_pre_enable_dp(struct intel_encoder *encoder)
1909{
1910	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1911	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
1912	struct drm_device *dev = encoder->base.dev;
1913	struct drm_i915_private *dev_priv = dev->dev_private;
1914	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
1915	enum dpio_channel port = vlv_dport_to_channel(dport);
1916	int pipe = intel_crtc->pipe;
1917	struct edp_power_seq power_seq;
1918	u32 val;
1919
1920	mutex_lock(&dev_priv->dpio_lock);
1921
1922	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
1923	val = 0;
1924	if (pipe)
1925		val |= (1<<21);
1926	else
1927		val &= ~(1<<21);
1928	val |= 0x001000c4;
1929	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
1930	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
1931	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
1932
1933	mutex_unlock(&dev_priv->dpio_lock);
1934
1935	if (is_edp(intel_dp)) {
1936		/* init power sequencer on this pipe and port */
1937		intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
1938		intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
1939							      &power_seq);
1940	}
1941
1942	intel_enable_dp(encoder);
1943
1944	vlv_wait_port_ready(dev_priv, dport);
1945}
1946
1947static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
1948{
1949	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1950	struct drm_device *dev = encoder->base.dev;
1951	struct drm_i915_private *dev_priv = dev->dev_private;
1952	struct intel_crtc *intel_crtc =
1953		to_intel_crtc(encoder->base.crtc);
1954	enum dpio_channel port = vlv_dport_to_channel(dport);
1955	int pipe = intel_crtc->pipe;
1956
1957	/* Program Tx lane resets to default */
1958	mutex_lock(&dev_priv->dpio_lock);
1959	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
1960			 DPIO_PCS_TX_LANE2_RESET |
1961			 DPIO_PCS_TX_LANE1_RESET);
1962	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
1963			 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
1964			 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
1965			 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
1966				 DPIO_PCS_CLK_SOFT_RESET);
1967
1968	/* Fix up inter-pair skew failure */
1969	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
1970	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
1971	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
1972	mutex_unlock(&dev_priv->dpio_lock);
1973}
1974
1975static void chv_pre_enable_dp(struct intel_encoder *encoder)
1976{
1977	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1978	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
1979	struct drm_device *dev = encoder->base.dev;
1980	struct drm_i915_private *dev_priv = dev->dev_private;
1981	struct edp_power_seq power_seq;
1982	struct intel_crtc *intel_crtc =
1983		to_intel_crtc(encoder->base.crtc);
1984	enum dpio_channel ch = vlv_dport_to_channel(dport);
1985	int pipe = intel_crtc->pipe;
1986	int data, i;
1987
1988	/* Program Tx lane latency optimal setting*/
1989	mutex_lock(&dev_priv->dpio_lock);
1990	for (i = 0; i < 4; i++) {
1991		/* Set the latency optimal bit */
1992		data = (i == 1) ? 0x0 : 0x6;
1993		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
1994				data << DPIO_FRC_LATENCY_SHFIT);
1995
1996		/* Set the upar bit */
1997		data = (i == 1) ? 0x0 : 0x1;
1998		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
1999				data << DPIO_UPAR_SHIFT);
2000	}
2001
2002	/* Data lane stagger programming */
2003	/* FIXME: Fix up value only after power analysis */
2004
2005	mutex_unlock(&dev_priv->dpio_lock);
2006
2007	if (is_edp(intel_dp)) {
2008		/* init power sequencer on this pipe and port */
2009		intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
2010		intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
2011							      &power_seq);
2012	}
2013
2014	intel_enable_dp(encoder);
2015
2016	vlv_wait_port_ready(dev_priv, dport);
2017}
2018
2019/*
2020 * Native read with retry for link status and receiver capability reads for
2021 * cases where the sink may still be asleep.
2022 *
2023 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2024 * supposed to retry 3 times per the spec.
2025 */
2026static ssize_t
2027intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2028			void *buffer, size_t size)
2029{
2030	ssize_t ret;
2031	int i;
2032
2033	for (i = 0; i < 3; i++) {
2034		ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2035		if (ret == size)
2036			return ret;
2037		msleep(1);
2038	}
2039
2040	return ret;
2041}
2042
2043/*
2044 * Fetch AUX CH registers 0x202 - 0x207 which contain
2045 * link status information
2046 */
2047static bool
2048intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2049{
2050	return intel_dp_dpcd_read_wake(&intel_dp->aux,
2051				       DP_LANE0_1_STATUS,
2052				       link_status,
2053				       DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
2054}
2055
2056/*
2057 * These are source-specific values; current Intel hardware supports
2058 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
2059 */
2060
2061static uint8_t
2062intel_dp_voltage_max(struct intel_dp *intel_dp)
2063{
2064	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2065	enum port port = dp_to_dig_port(intel_dp)->port;
2066
2067	if (IS_VALLEYVIEW(dev) || IS_BROADWELL(dev))
2068		return DP_TRAIN_VOLTAGE_SWING_1200;
2069	else if (IS_GEN7(dev) && port == PORT_A)
2070		return DP_TRAIN_VOLTAGE_SWING_800;
2071	else if (HAS_PCH_CPT(dev) && port != PORT_A)
2072		return DP_TRAIN_VOLTAGE_SWING_1200;
2073	else
2074		return DP_TRAIN_VOLTAGE_SWING_800;
2075}
2076
2077static uint8_t
2078intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2079{
2080	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2081	enum port port = dp_to_dig_port(intel_dp)->port;
2082
2083	if (IS_BROADWELL(dev)) {
2084		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2085		case DP_TRAIN_VOLTAGE_SWING_400:
2086		case DP_TRAIN_VOLTAGE_SWING_600:
2087			return DP_TRAIN_PRE_EMPHASIS_6;
2088		case DP_TRAIN_VOLTAGE_SWING_800:
2089			return DP_TRAIN_PRE_EMPHASIS_3_5;
2090		case DP_TRAIN_VOLTAGE_SWING_1200:
2091		default:
2092			return DP_TRAIN_PRE_EMPHASIS_0;
2093		}
2094	} else if (IS_HASWELL(dev)) {
2095		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2096		case DP_TRAIN_VOLTAGE_SWING_400:
2097			return DP_TRAIN_PRE_EMPHASIS_9_5;
2098		case DP_TRAIN_VOLTAGE_SWING_600:
2099			return DP_TRAIN_PRE_EMPHASIS_6;
2100		case DP_TRAIN_VOLTAGE_SWING_800:
2101			return DP_TRAIN_PRE_EMPHASIS_3_5;
2102		case DP_TRAIN_VOLTAGE_SWING_1200:
2103		default:
2104			return DP_TRAIN_PRE_EMPHASIS_0;
2105		}
2106	} else if (IS_VALLEYVIEW(dev)) {
2107		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2108		case DP_TRAIN_VOLTAGE_SWING_400:
2109			return DP_TRAIN_PRE_EMPHASIS_9_5;
2110		case DP_TRAIN_VOLTAGE_SWING_600:
2111			return DP_TRAIN_PRE_EMPHASIS_6;
2112		case DP_TRAIN_VOLTAGE_SWING_800:
2113			return DP_TRAIN_PRE_EMPHASIS_3_5;
2114		case DP_TRAIN_VOLTAGE_SWING_1200:
2115		default:
2116			return DP_TRAIN_PRE_EMPHASIS_0;
2117		}
2118	} else if (IS_GEN7(dev) && port == PORT_A) {
2119		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2120		case DP_TRAIN_VOLTAGE_SWING_400:
2121			return DP_TRAIN_PRE_EMPHASIS_6;
2122		case DP_TRAIN_VOLTAGE_SWING_600:
2123		case DP_TRAIN_VOLTAGE_SWING_800:
2124			return DP_TRAIN_PRE_EMPHASIS_3_5;
2125		default:
2126			return DP_TRAIN_PRE_EMPHASIS_0;
2127		}
2128	} else {
2129		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2130		case DP_TRAIN_VOLTAGE_SWING_400:
2131			return DP_TRAIN_PRE_EMPHASIS_6;
2132		case DP_TRAIN_VOLTAGE_SWING_600:
2133			return DP_TRAIN_PRE_EMPHASIS_6;
2134		case DP_TRAIN_VOLTAGE_SWING_800:
2135			return DP_TRAIN_PRE_EMPHASIS_3_5;
2136		case DP_TRAIN_VOLTAGE_SWING_1200:
2137		default:
2138			return DP_TRAIN_PRE_EMPHASIS_0;
2139		}
2140	}
2141}
2142
2143static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2144{
2145	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2146	struct drm_i915_private *dev_priv = dev->dev_private;
2147	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2148	struct intel_crtc *intel_crtc =
2149		to_intel_crtc(dport->base.base.crtc);
2150	unsigned long demph_reg_value, preemph_reg_value,
2151		uniqtranscale_reg_value;
2152	uint8_t train_set = intel_dp->train_set[0];
2153	enum dpio_channel port = vlv_dport_to_channel(dport);
2154	int pipe = intel_crtc->pipe;
2155
2156	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2157	case DP_TRAIN_PRE_EMPHASIS_0:
2158		preemph_reg_value = 0x0004000;
2159		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2160		case DP_TRAIN_VOLTAGE_SWING_400:
2161			demph_reg_value = 0x2B405555;
2162			uniqtranscale_reg_value = 0x552AB83A;
2163			break;
2164		case DP_TRAIN_VOLTAGE_SWING_600:
2165			demph_reg_value = 0x2B404040;
2166			uniqtranscale_reg_value = 0x5548B83A;
2167			break;
2168		case DP_TRAIN_VOLTAGE_SWING_800:
2169			demph_reg_value = 0x2B245555;
2170			uniqtranscale_reg_value = 0x5560B83A;
2171			break;
2172		case DP_TRAIN_VOLTAGE_SWING_1200:
2173			demph_reg_value = 0x2B405555;
2174			uniqtranscale_reg_value = 0x5598DA3A;
2175			break;
2176		default:
2177			return 0;
2178		}
2179		break;
2180	case DP_TRAIN_PRE_EMPHASIS_3_5:
2181		preemph_reg_value = 0x0002000;
2182		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2183		case DP_TRAIN_VOLTAGE_SWING_400:
2184			demph_reg_value = 0x2B404040;
2185			uniqtranscale_reg_value = 0x5552B83A;
2186			break;
2187		case DP_TRAIN_VOLTAGE_SWING_600:
2188			demph_reg_value = 0x2B404848;
2189			uniqtranscale_reg_value = 0x5580B83A;
2190			break;
2191		case DP_TRAIN_VOLTAGE_SWING_800:
2192			demph_reg_value = 0x2B404040;
2193			uniqtranscale_reg_value = 0x55ADDA3A;
2194			break;
2195		default:
2196			return 0;
2197		}
2198		break;
2199	case DP_TRAIN_PRE_EMPHASIS_6:
2200		preemph_reg_value = 0x0000000;
2201		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2202		case DP_TRAIN_VOLTAGE_SWING_400:
2203			demph_reg_value = 0x2B305555;
2204			uniqtranscale_reg_value = 0x5570B83A;
2205			break;
2206		case DP_TRAIN_VOLTAGE_SWING_600:
2207			demph_reg_value = 0x2B2B4040;
2208			uniqtranscale_reg_value = 0x55ADDA3A;
2209			break;
2210		default:
2211			return 0;
2212		}
2213		break;
2214	case DP_TRAIN_PRE_EMPHASIS_9_5:
2215		preemph_reg_value = 0x0006000;
2216		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2217		case DP_TRAIN_VOLTAGE_SWING_400:
2218			demph_reg_value = 0x1B405555;
2219			uniqtranscale_reg_value = 0x55ADDA3A;
2220			break;
2221		default:
2222			return 0;
2223		}
2224		break;
2225	default:
2226		return 0;
2227	}
2228
2229	mutex_lock(&dev_priv->dpio_lock);
2230	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
2231	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
2232	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
2233			 uniqtranscale_reg_value);
2234	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
2235	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
2236	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
2237	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
2238	mutex_unlock(&dev_priv->dpio_lock);
2239
2240	return 0;
2241}
2242
2243static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
2244{
2245	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2246	struct drm_i915_private *dev_priv = dev->dev_private;
2247	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2248	struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
2249	u32 deemph_reg_value, margin_reg_value, val, tx_dw2;
2250	uint8_t train_set = intel_dp->train_set[0];
2251	enum dpio_channel ch = vlv_dport_to_channel(dport);
2252	int pipe = intel_crtc->pipe;
2253
2254	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2255	case DP_TRAIN_PRE_EMPHASIS_0:
2256		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2257		case DP_TRAIN_VOLTAGE_SWING_400:
2258			deemph_reg_value = 128;
2259			margin_reg_value = 52;
2260			break;
2261		case DP_TRAIN_VOLTAGE_SWING_600:
2262			deemph_reg_value = 128;
2263			margin_reg_value = 77;
2264			break;
2265		case DP_TRAIN_VOLTAGE_SWING_800:
2266			deemph_reg_value = 128;
2267			margin_reg_value = 102;
2268			break;
2269		case DP_TRAIN_VOLTAGE_SWING_1200:
2270			deemph_reg_value = 128;
2271			margin_reg_value = 154;
2272			/* FIXME extra to set for 1200 */
2273			break;
2274		default:
2275			return 0;
2276		}
2277		break;
2278	case DP_TRAIN_PRE_EMPHASIS_3_5:
2279		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2280		case DP_TRAIN_VOLTAGE_SWING_400:
2281			deemph_reg_value = 85;
2282			margin_reg_value = 78;
2283			break;
2284		case DP_TRAIN_VOLTAGE_SWING_600:
2285			deemph_reg_value = 85;
2286			margin_reg_value = 116;
2287			break;
2288		case DP_TRAIN_VOLTAGE_SWING_800:
2289			deemph_reg_value = 85;
2290			margin_reg_value = 154;
2291			break;
2292		default:
2293			return 0;
2294		}
2295		break;
2296	case DP_TRAIN_PRE_EMPHASIS_6:
2297		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2298		case DP_TRAIN_VOLTAGE_SWING_400:
2299			deemph_reg_value = 64;
2300			margin_reg_value = 104;
2301			break;
2302		case DP_TRAIN_VOLTAGE_SWING_600:
2303			deemph_reg_value = 64;
2304			margin_reg_value = 154;
2305			break;
2306		default:
2307			return 0;
2308		}
2309		break;
2310	case DP_TRAIN_PRE_EMPHASIS_9_5:
2311		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2312		case DP_TRAIN_VOLTAGE_SWING_400:
2313			deemph_reg_value = 43;
2314			margin_reg_value = 154;
2315			break;
2316		default:
2317			return 0;
2318		}
2319		break;
2320	default:
2321		return 0;
2322	}
2323
2324	mutex_lock(&dev_priv->dpio_lock);
2325
2326	/* Clear calc init */
2327	vlv_dpio_write(dev_priv, pipe, CHV_PCS_DW10(ch), 0);
2328
2329	/* Program swing deemph */
2330	val = vlv_dpio_read(dev_priv, pipe, VLV_TX_DW4(ch));
2331	val &= ~DPIO_SWING_DEEMPH9P5_MASK;
2332	val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
2333	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(ch), val);
2334
2335	/* Program swing margin */
2336	tx_dw2 = vlv_dpio_read(dev_priv, pipe, VLV_TX_DW2(ch));
2337	tx_dw2 &= ~DPIO_SWING_MARGIN_MASK;
2338	tx_dw2 |= margin_reg_value << DPIO_SWING_MARGIN_SHIFT;
2339	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(ch), tx_dw2);
2340
2341	/* Disable unique transition scale */
2342	val = vlv_dpio_read(dev_priv, pipe, VLV_TX_DW3(ch));
2343	val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
2344	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(ch), val);
2345
2346	if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
2347			== DP_TRAIN_PRE_EMPHASIS_0) &&
2348		((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
2349			== DP_TRAIN_VOLTAGE_SWING_1200)) {
2350
2351		/*
2352		 * The document said it needs to set bit 27 for ch0 and bit 26
2353		 * for ch1. Might be a typo in the doc.
2354		 * For now, for this unique transition scale selection, set bit
2355		 * 27 for ch0 and ch1.
2356		 */
2357		val = vlv_dpio_read(dev_priv, pipe, VLV_TX_DW3(ch));
2358		val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
2359		vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(ch), val);
2360
2361		tx_dw2 |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
2362		vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(ch), tx_dw2);
2363	}
2364
2365	/* Start swing calculation */
2366	vlv_dpio_write(dev_priv, pipe, CHV_PCS_DW10(ch),
2367		(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3));
2368
2369	/* LRC Bypass */
2370	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
2371	val |= DPIO_LRC_BYPASS;
2372	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
2373
2374	mutex_unlock(&dev_priv->dpio_lock);
2375
2376	return 0;
2377}
2378
2379static void
2380intel_get_adjust_train(struct intel_dp *intel_dp,
2381		       const uint8_t link_status[DP_LINK_STATUS_SIZE])
2382{
2383	uint8_t v = 0;
2384	uint8_t p = 0;
2385	int lane;
2386	uint8_t voltage_max;
2387	uint8_t preemph_max;
2388
2389	for (lane = 0; lane < intel_dp->lane_count; lane++) {
2390		uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
2391		uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
2392
2393		if (this_v > v)
2394			v = this_v;
2395		if (this_p > p)
2396			p = this_p;
2397	}
2398
2399	voltage_max = intel_dp_voltage_max(intel_dp);
2400	if (v >= voltage_max)
2401		v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
2402
2403	preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
2404	if (p >= preemph_max)
2405		p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
2406
2407	for (lane = 0; lane < 4; lane++)
2408		intel_dp->train_set[lane] = v | p;
2409}
2410
2411static uint32_t
2412intel_gen4_signal_levels(uint8_t train_set)
2413{
2414	uint32_t	signal_levels = 0;
2415
2416	switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2417	case DP_TRAIN_VOLTAGE_SWING_400:
2418	default:
2419		signal_levels |= DP_VOLTAGE_0_4;
2420		break;
2421	case DP_TRAIN_VOLTAGE_SWING_600:
2422		signal_levels |= DP_VOLTAGE_0_6;
2423		break;
2424	case DP_TRAIN_VOLTAGE_SWING_800:
2425		signal_levels |= DP_VOLTAGE_0_8;
2426		break;
2427	case DP_TRAIN_VOLTAGE_SWING_1200:
2428		signal_levels |= DP_VOLTAGE_1_2;
2429		break;
2430	}
2431	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2432	case DP_TRAIN_PRE_EMPHASIS_0:
2433	default:
2434		signal_levels |= DP_PRE_EMPHASIS_0;
2435		break;
2436	case DP_TRAIN_PRE_EMPHASIS_3_5:
2437		signal_levels |= DP_PRE_EMPHASIS_3_5;
2438		break;
2439	case DP_TRAIN_PRE_EMPHASIS_6:
2440		signal_levels |= DP_PRE_EMPHASIS_6;
2441		break;
2442	case DP_TRAIN_PRE_EMPHASIS_9_5:
2443		signal_levels |= DP_PRE_EMPHASIS_9_5;
2444		break;
2445	}
2446	return signal_levels;
2447}
2448
2449/* Gen6's DP voltage swing and pre-emphasis control */
2450static uint32_t
2451intel_gen6_edp_signal_levels(uint8_t train_set)
2452{
2453	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2454					 DP_TRAIN_PRE_EMPHASIS_MASK);
2455	switch (signal_levels) {
2456	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
2457	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
2458		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
2459	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
2460		return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
2461	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
2462	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
2463		return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
2464	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
2465	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
2466		return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
2467	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
2468	case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
2469		return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
2470	default:
2471		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2472			      "0x%x\n", signal_levels);
2473		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
2474	}
2475}
2476
2477/* Gen7's DP voltage swing and pre-emphasis control */
2478static uint32_t
2479intel_gen7_edp_signal_levels(uint8_t train_set)
2480{
2481	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2482					 DP_TRAIN_PRE_EMPHASIS_MASK);
2483	switch (signal_levels) {
2484	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
2485		return EDP_LINK_TRAIN_400MV_0DB_IVB;
2486	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
2487		return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
2488	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
2489		return EDP_LINK_TRAIN_400MV_6DB_IVB;
2490
2491	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
2492		return EDP_LINK_TRAIN_600MV_0DB_IVB;
2493	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
2494		return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
2495
2496	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
2497		return EDP_LINK_TRAIN_800MV_0DB_IVB;
2498	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
2499		return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
2500
2501	default:
2502		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2503			      "0x%x\n", signal_levels);
2504		return EDP_LINK_TRAIN_500MV_0DB_IVB;
2505	}
2506}
2507
2508/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
2509static uint32_t
2510intel_hsw_signal_levels(uint8_t train_set)
2511{
2512	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2513					 DP_TRAIN_PRE_EMPHASIS_MASK);
2514	switch (signal_levels) {
2515	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
2516		return DDI_BUF_EMP_400MV_0DB_HSW;
2517	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
2518		return DDI_BUF_EMP_400MV_3_5DB_HSW;
2519	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
2520		return DDI_BUF_EMP_400MV_6DB_HSW;
2521	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5:
2522		return DDI_BUF_EMP_400MV_9_5DB_HSW;
2523
2524	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
2525		return DDI_BUF_EMP_600MV_0DB_HSW;
2526	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
2527		return DDI_BUF_EMP_600MV_3_5DB_HSW;
2528	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
2529		return DDI_BUF_EMP_600MV_6DB_HSW;
2530
2531	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
2532		return DDI_BUF_EMP_800MV_0DB_HSW;
2533	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
2534		return DDI_BUF_EMP_800MV_3_5DB_HSW;
2535	default:
2536		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2537			      "0x%x\n", signal_levels);
2538		return DDI_BUF_EMP_400MV_0DB_HSW;
2539	}
2540}
2541
2542static uint32_t
2543intel_bdw_signal_levels(uint8_t train_set)
2544{
2545	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2546					 DP_TRAIN_PRE_EMPHASIS_MASK);
2547	switch (signal_levels) {
2548	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
2549		return DDI_BUF_EMP_400MV_0DB_BDW;	/* Sel0 */
2550	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
2551		return DDI_BUF_EMP_400MV_3_5DB_BDW;	/* Sel1 */
2552	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
2553		return DDI_BUF_EMP_400MV_6DB_BDW;	/* Sel2 */
2554
2555	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
2556		return DDI_BUF_EMP_600MV_0DB_BDW;	/* Sel3 */
2557	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
2558		return DDI_BUF_EMP_600MV_3_5DB_BDW;	/* Sel4 */
2559	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
2560		return DDI_BUF_EMP_600MV_6DB_BDW;	/* Sel5 */
2561
2562	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
2563		return DDI_BUF_EMP_800MV_0DB_BDW;	/* Sel6 */
2564	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
2565		return DDI_BUF_EMP_800MV_3_5DB_BDW;	/* Sel7 */
2566
2567	case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
2568		return DDI_BUF_EMP_1200MV_0DB_BDW;	/* Sel8 */
2569
2570	default:
2571		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2572			      "0x%x\n", signal_levels);
2573		return DDI_BUF_EMP_400MV_0DB_BDW;	/* Sel0 */
2574	}
2575}
2576
2577/* Properly updates "DP" with the correct signal levels. */
2578static void
2579intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
2580{
2581	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2582	enum port port = intel_dig_port->port;
2583	struct drm_device *dev = intel_dig_port->base.base.dev;
2584	uint32_t signal_levels, mask;
2585	uint8_t train_set = intel_dp->train_set[0];
2586
2587	if (IS_BROADWELL(dev)) {
2588		signal_levels = intel_bdw_signal_levels(train_set);
2589		mask = DDI_BUF_EMP_MASK;
2590	} else if (IS_HASWELL(dev)) {
2591		signal_levels = intel_hsw_signal_levels(train_set);
2592		mask = DDI_BUF_EMP_MASK;
2593	} else if (IS_CHERRYVIEW(dev)) {
2594		signal_levels = intel_chv_signal_levels(intel_dp);
2595		mask = 0;
2596	} else if (IS_VALLEYVIEW(dev)) {
2597		signal_levels = intel_vlv_signal_levels(intel_dp);
2598		mask = 0;
2599	} else if (IS_GEN7(dev) && port == PORT_A) {
2600		signal_levels = intel_gen7_edp_signal_levels(train_set);
2601		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
2602	} else if (IS_GEN6(dev) && port == PORT_A) {
2603		signal_levels = intel_gen6_edp_signal_levels(train_set);
2604		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
2605	} else {
2606		signal_levels = intel_gen4_signal_levels(train_set);
2607		mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
2608	}
2609
2610	DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
2611
2612	*DP = (*DP & ~mask) | signal_levels;
2613}
2614
2615static bool
2616intel_dp_set_link_train(struct intel_dp *intel_dp,
2617			uint32_t *DP,
2618			uint8_t dp_train_pat)
2619{
2620	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2621	struct drm_device *dev = intel_dig_port->base.base.dev;
2622	struct drm_i915_private *dev_priv = dev->dev_private;
2623	enum port port = intel_dig_port->port;
2624	uint8_t buf[sizeof(intel_dp->train_set) + 1];
2625	int ret, len;
2626
2627	if (HAS_DDI(dev)) {
2628		uint32_t temp = I915_READ(DP_TP_CTL(port));
2629
2630		if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2631			temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2632		else
2633			temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2634
2635		temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2636		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2637		case DP_TRAINING_PATTERN_DISABLE:
2638			temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2639
2640			break;
2641		case DP_TRAINING_PATTERN_1:
2642			temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2643			break;
2644		case DP_TRAINING_PATTERN_2:
2645			temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2646			break;
2647		case DP_TRAINING_PATTERN_3:
2648			temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2649			break;
2650		}
2651		I915_WRITE(DP_TP_CTL(port), temp);
2652
2653	} else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2654		*DP &= ~DP_LINK_TRAIN_MASK_CPT;
2655
2656		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2657		case DP_TRAINING_PATTERN_DISABLE:
2658			*DP |= DP_LINK_TRAIN_OFF_CPT;
2659			break;
2660		case DP_TRAINING_PATTERN_1:
2661			*DP |= DP_LINK_TRAIN_PAT_1_CPT;
2662			break;
2663		case DP_TRAINING_PATTERN_2:
2664			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
2665			break;
2666		case DP_TRAINING_PATTERN_3:
2667			DRM_ERROR("DP training pattern 3 not supported\n");
2668			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
2669			break;
2670		}
2671
2672	} else {
2673		*DP &= ~DP_LINK_TRAIN_MASK;
2674
2675		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2676		case DP_TRAINING_PATTERN_DISABLE:
2677			*DP |= DP_LINK_TRAIN_OFF;
2678			break;
2679		case DP_TRAINING_PATTERN_1:
2680			*DP |= DP_LINK_TRAIN_PAT_1;
2681			break;
2682		case DP_TRAINING_PATTERN_2:
2683			*DP |= DP_LINK_TRAIN_PAT_2;
2684			break;
2685		case DP_TRAINING_PATTERN_3:
2686			DRM_ERROR("DP training pattern 3 not supported\n");
2687			*DP |= DP_LINK_TRAIN_PAT_2;
2688			break;
2689		}
2690	}
2691
2692	I915_WRITE(intel_dp->output_reg, *DP);
2693	POSTING_READ(intel_dp->output_reg);
2694
2695	buf[0] = dp_train_pat;
2696	if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
2697	    DP_TRAINING_PATTERN_DISABLE) {
2698		/* don't write DP_TRAINING_LANEx_SET on disable */
2699		len = 1;
2700	} else {
2701		/* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
2702		memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
2703		len = intel_dp->lane_count + 1;
2704	}
2705
2706	ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
2707				buf, len);
2708
2709	return ret == len;
2710}
2711
2712static bool
2713intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
2714			uint8_t dp_train_pat)
2715{
2716	memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
2717	intel_dp_set_signal_levels(intel_dp, DP);
2718	return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
2719}
2720
2721static bool
2722intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
2723			   const uint8_t link_status[DP_LINK_STATUS_SIZE])
2724{
2725	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2726	struct drm_device *dev = intel_dig_port->base.base.dev;
2727	struct drm_i915_private *dev_priv = dev->dev_private;
2728	int ret;
2729
2730	intel_get_adjust_train(intel_dp, link_status);
2731	intel_dp_set_signal_levels(intel_dp, DP);
2732
2733	I915_WRITE(intel_dp->output_reg, *DP);
2734	POSTING_READ(intel_dp->output_reg);
2735
2736	ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
2737				intel_dp->train_set, intel_dp->lane_count);
2738
2739	return ret == intel_dp->lane_count;
2740}
2741
2742static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
2743{
2744	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2745	struct drm_device *dev = intel_dig_port->base.base.dev;
2746	struct drm_i915_private *dev_priv = dev->dev_private;
2747	enum port port = intel_dig_port->port;
2748	uint32_t val;
2749
2750	if (!HAS_DDI(dev))
2751		return;
2752
2753	val = I915_READ(DP_TP_CTL(port));
2754	val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2755	val |= DP_TP_CTL_LINK_TRAIN_IDLE;
2756	I915_WRITE(DP_TP_CTL(port), val);
2757
2758	/*
2759	 * On PORT_A we can have only eDP in SST mode. There the only reason
2760	 * we need to set idle transmission mode is to work around a HW issue
2761	 * where we enable the pipe while not in idle link-training mode.
2762	 * In this case there is requirement to wait for a minimum number of
2763	 * idle patterns to be sent.
2764	 */
2765	if (port == PORT_A)
2766		return;
2767
2768	if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
2769		     1))
2770		DRM_ERROR("Timed out waiting for DP idle patterns\n");
2771}
2772
2773/* Enable corresponding port and start training pattern 1 */
2774void
2775intel_dp_start_link_train(struct intel_dp *intel_dp)
2776{
2777	struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
2778	struct drm_device *dev = encoder->dev;
2779	int i;
2780	uint8_t voltage;
2781	int voltage_tries, loop_tries;
2782	uint32_t DP = intel_dp->DP;
2783	uint8_t link_config[2];
2784
2785	if (HAS_DDI(dev))
2786		intel_ddi_prepare_link_retrain(encoder);
2787
2788	/* Write the link configuration data */
2789	link_config[0] = intel_dp->link_bw;
2790	link_config[1] = intel_dp->lane_count;
2791	if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2792		link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
2793	drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
2794
2795	link_config[0] = 0;
2796	link_config[1] = DP_SET_ANSI_8B10B;
2797	drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
2798
2799	DP |= DP_PORT_EN;
2800
2801	/* clock recovery */
2802	if (!intel_dp_reset_link_train(intel_dp, &DP,
2803				       DP_TRAINING_PATTERN_1 |
2804				       DP_LINK_SCRAMBLING_DISABLE)) {
2805		DRM_ERROR("failed to enable link training\n");
2806		return;
2807	}
2808
2809	voltage = 0xff;
2810	voltage_tries = 0;
2811	loop_tries = 0;
2812	for (;;) {
2813		uint8_t link_status[DP_LINK_STATUS_SIZE];
2814
2815		drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
2816		if (!intel_dp_get_link_status(intel_dp, link_status)) {
2817			DRM_ERROR("failed to get link status\n");
2818			break;
2819		}
2820
2821		if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
2822			DRM_DEBUG_KMS("clock recovery OK\n");
2823			break;
2824		}
2825
2826		/* Check to see if we've tried the max voltage */
2827		for (i = 0; i < intel_dp->lane_count; i++)
2828			if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
2829				break;
2830		if (i == intel_dp->lane_count) {
2831			++loop_tries;
2832			if (loop_tries == 5) {
2833				DRM_ERROR("too many full retries, give up\n");
2834				break;
2835			}
2836			intel_dp_reset_link_train(intel_dp, &DP,
2837						  DP_TRAINING_PATTERN_1 |
2838						  DP_LINK_SCRAMBLING_DISABLE);
2839			voltage_tries = 0;
2840			continue;
2841		}
2842
2843		/* Check to see if we've tried the same voltage 5 times */
2844		if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
2845			++voltage_tries;
2846			if (voltage_tries == 5) {
2847				DRM_ERROR("too many voltage retries, give up\n");
2848				break;
2849			}
2850		} else
2851			voltage_tries = 0;
2852		voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
2853
2854		/* Update training set as requested by target */
2855		if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
2856			DRM_ERROR("failed to update link training\n");
2857			break;
2858		}
2859	}
2860
2861	intel_dp->DP = DP;
2862}
2863
2864void
2865intel_dp_complete_link_train(struct intel_dp *intel_dp)
2866{
2867	bool channel_eq = false;
2868	int tries, cr_tries;
2869	uint32_t DP = intel_dp->DP;
2870	uint32_t training_pattern = DP_TRAINING_PATTERN_2;
2871
2872	/* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
2873	if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
2874		training_pattern = DP_TRAINING_PATTERN_3;
2875
2876	/* channel equalization */
2877	if (!intel_dp_set_link_train(intel_dp, &DP,
2878				     training_pattern |
2879				     DP_LINK_SCRAMBLING_DISABLE)) {
2880		DRM_ERROR("failed to start channel equalization\n");
2881		return;
2882	}
2883
2884	tries = 0;
2885	cr_tries = 0;
2886	channel_eq = false;
2887	for (;;) {
2888		uint8_t link_status[DP_LINK_STATUS_SIZE];
2889
2890		if (cr_tries > 5) {
2891			DRM_ERROR("failed to train DP, aborting\n");
2892			break;
2893		}
2894
2895		drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
2896		if (!intel_dp_get_link_status(intel_dp, link_status)) {
2897			DRM_ERROR("failed to get link status\n");
2898			break;
2899		}
2900
2901		/* Make sure clock is still ok */
2902		if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
2903			intel_dp_start_link_train(intel_dp);
2904			intel_dp_set_link_train(intel_dp, &DP,
2905						training_pattern |
2906						DP_LINK_SCRAMBLING_DISABLE);
2907			cr_tries++;
2908			continue;
2909		}
2910
2911		if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
2912			channel_eq = true;
2913			break;
2914		}
2915
2916		/* Try 5 times, then try clock recovery if that fails */
2917		if (tries > 5) {
2918			intel_dp_link_down(intel_dp);
2919			intel_dp_start_link_train(intel_dp);
2920			intel_dp_set_link_train(intel_dp, &DP,
2921						training_pattern |
2922						DP_LINK_SCRAMBLING_DISABLE);
2923			tries = 0;
2924			cr_tries++;
2925			continue;
2926		}
2927
2928		/* Update training set as requested by target */
2929		if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
2930			DRM_ERROR("failed to update link training\n");
2931			break;
2932		}
2933		++tries;
2934	}
2935
2936	intel_dp_set_idle_link_train(intel_dp);
2937
2938	intel_dp->DP = DP;
2939
2940	if (channel_eq)
2941		DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
2942
2943}
2944
2945void intel_dp_stop_link_train(struct intel_dp *intel_dp)
2946{
2947	intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2948				DP_TRAINING_PATTERN_DISABLE);
2949}
2950
2951static void
2952intel_dp_link_down(struct intel_dp *intel_dp)
2953{
2954	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2955	enum port port = intel_dig_port->port;
2956	struct drm_device *dev = intel_dig_port->base.base.dev;
2957	struct drm_i915_private *dev_priv = dev->dev_private;
2958	struct intel_crtc *intel_crtc =
2959		to_intel_crtc(intel_dig_port->base.base.crtc);
2960	uint32_t DP = intel_dp->DP;
2961
2962	/*
2963	 * DDI code has a strict mode set sequence and we should try to respect
2964	 * it, otherwise we might hang the machine in many different ways. So we
2965	 * really should be disabling the port only on a complete crtc_disable
2966	 * sequence. This function is just called under two conditions on DDI
2967	 * code:
2968	 * - Link train failed while doing crtc_enable, and on this case we
2969	 *   really should respect the mode set sequence and wait for a
2970	 *   crtc_disable.
2971	 * - Someone turned the monitor off and intel_dp_check_link_status
2972	 *   called us. We don't need to disable the whole port on this case, so
2973	 *   when someone turns the monitor on again,
2974	 *   intel_ddi_prepare_link_retrain will take care of redoing the link
2975	 *   train.
2976	 */
2977	if (HAS_DDI(dev))
2978		return;
2979
2980	if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
2981		return;
2982
2983	DRM_DEBUG_KMS("\n");
2984
2985	if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2986		DP &= ~DP_LINK_TRAIN_MASK_CPT;
2987		I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
2988	} else {
2989		DP &= ~DP_LINK_TRAIN_MASK;
2990		I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
2991	}
2992	POSTING_READ(intel_dp->output_reg);
2993
2994	if (HAS_PCH_IBX(dev) &&
2995	    I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
2996		struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2997
2998		/* Hardware workaround: leaving our transcoder select
2999		 * set to transcoder B while it's off will prevent the
3000		 * corresponding HDMI output on transcoder A.
3001		 *
3002		 * Combine this with another hardware workaround:
3003		 * transcoder select bit can only be cleared while the
3004		 * port is enabled.
3005		 */
3006		DP &= ~DP_PIPEB_SELECT;
3007		I915_WRITE(intel_dp->output_reg, DP);
3008
3009		/* Changes to enable or select take place the vblank
3010		 * after being written.
3011		 */
3012		if (WARN_ON(crtc == NULL)) {
3013			/* We should never try to disable a port without a crtc
3014			 * attached. For paranoia keep the code around for a
3015			 * bit. */
3016			POSTING_READ(intel_dp->output_reg);
3017			msleep(50);
3018		} else
3019			intel_wait_for_vblank(dev, intel_crtc->pipe);
3020	}
3021
3022	DP &= ~DP_AUDIO_OUTPUT_ENABLE;
3023	I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
3024	POSTING_READ(intel_dp->output_reg);
3025	msleep(intel_dp->panel_power_down_delay);
3026}
3027
3028static bool
3029intel_dp_get_dpcd(struct intel_dp *intel_dp)
3030{
3031	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3032	struct drm_device *dev = dig_port->base.base.dev;
3033	struct drm_i915_private *dev_priv = dev->dev_private;
3034
3035	char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
3036
3037	if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3038				    sizeof(intel_dp->dpcd)) < 0)
3039		return false; /* aux transfer failed */
3040
3041	hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
3042			   32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
3043	DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
3044
3045	if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3046		return false; /* DPCD not present */
3047
3048	/* Check if the panel supports PSR */
3049	memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3050	if (is_edp(intel_dp)) {
3051		intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3052					intel_dp->psr_dpcd,
3053					sizeof(intel_dp->psr_dpcd));
3054		if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3055			dev_priv->psr.sink_support = true;
3056			DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3057		}
3058	}
3059
3060	/* Training Pattern 3 support */
3061	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
3062	    intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED) {
3063		intel_dp->use_tps3 = true;
3064		DRM_DEBUG_KMS("Displayport TPS3 supported");
3065	} else
3066		intel_dp->use_tps3 = false;
3067
3068	if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3069	      DP_DWN_STRM_PORT_PRESENT))
3070		return true; /* native DP sink */
3071
3072	if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3073		return true; /* no per-port downstream info */
3074
3075	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3076				    intel_dp->downstream_ports,
3077				    DP_MAX_DOWNSTREAM_PORTS) < 0)
3078		return false; /* downstream port status fetch failed */
3079
3080	return true;
3081}
3082
3083static void
3084intel_dp_probe_oui(struct intel_dp *intel_dp)
3085{
3086	u8 buf[3];
3087
3088	if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3089		return;
3090
3091	intel_edp_panel_vdd_on(intel_dp);
3092
3093	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3094		DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3095			      buf[0], buf[1], buf[2]);
3096
3097	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3098		DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3099			      buf[0], buf[1], buf[2]);
3100
3101	edp_panel_vdd_off(intel_dp, false);
3102}
3103
3104int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3105{
3106	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3107	struct drm_device *dev = intel_dig_port->base.base.dev;
3108	struct intel_crtc *intel_crtc =
3109		to_intel_crtc(intel_dig_port->base.base.crtc);
3110	u8 buf[1];
3111
3112	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, buf) < 0)
3113		return -EAGAIN;
3114
3115	if (!(buf[0] & DP_TEST_CRC_SUPPORTED))
3116		return -ENOTTY;
3117
3118	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3119			       DP_TEST_SINK_START) < 0)
3120		return -EAGAIN;
3121
3122	/* Wait 2 vblanks to be sure we will have the correct CRC value */
3123	intel_wait_for_vblank(dev, intel_crtc->pipe);
3124	intel_wait_for_vblank(dev, intel_crtc->pipe);
3125
3126	if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
3127		return -EAGAIN;
3128
3129	drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 0);
3130	return 0;
3131}
3132
3133static bool
3134intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3135{
3136	return intel_dp_dpcd_read_wake(&intel_dp->aux,
3137				       DP_DEVICE_SERVICE_IRQ_VECTOR,
3138				       sink_irq_vector, 1) == 1;
3139}
3140
3141static void
3142intel_dp_handle_test_request(struct intel_dp *intel_dp)
3143{
3144	/* NAK by default */
3145	drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
3146}
3147
3148/*
3149 * According to DP spec
3150 * 5.1.2:
3151 *  1. Read DPCD
3152 *  2. Configure link according to Receiver Capabilities
3153 *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
3154 *  4. Check link status on receipt of hot-plug interrupt
3155 */
3156
3157void
3158intel_dp_check_link_status(struct intel_dp *intel_dp)
3159{
3160	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
3161	u8 sink_irq_vector;
3162	u8 link_status[DP_LINK_STATUS_SIZE];
3163
3164	if (!intel_encoder->connectors_active)
3165		return;
3166
3167	if (WARN_ON(!intel_encoder->base.crtc))
3168		return;
3169
3170	/* Try to read receiver status if the link appears to be up */
3171	if (!intel_dp_get_link_status(intel_dp, link_status)) {
3172		return;
3173	}
3174
3175	/* Now read the DPCD to see if it's actually running */
3176	if (!intel_dp_get_dpcd(intel_dp)) {
3177		return;
3178	}
3179
3180	/* Try to read the source of the interrupt */
3181	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
3182	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
3183		/* Clear interrupt source */
3184		drm_dp_dpcd_writeb(&intel_dp->aux,
3185				   DP_DEVICE_SERVICE_IRQ_VECTOR,
3186				   sink_irq_vector);
3187
3188		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
3189			intel_dp_handle_test_request(intel_dp);
3190		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
3191			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
3192	}
3193
3194	if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3195		DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
3196			      drm_get_encoder_name(&intel_encoder->base));
3197		intel_dp_start_link_train(intel_dp);
3198		intel_dp_complete_link_train(intel_dp);
3199		intel_dp_stop_link_train(intel_dp);
3200	}
3201}
3202
3203/* XXX this is probably wrong for multiple downstream ports */
3204static enum drm_connector_status
3205intel_dp_detect_dpcd(struct intel_dp *intel_dp)
3206{
3207	uint8_t *dpcd = intel_dp->dpcd;
3208	uint8_t type;
3209
3210	if (!intel_dp_get_dpcd(intel_dp))
3211		return connector_status_disconnected;
3212
3213	/* if there's no downstream port, we're done */
3214	if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
3215		return connector_status_connected;
3216
3217	/* If we're HPD-aware, SINK_COUNT changes dynamically */
3218	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
3219	    intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
3220		uint8_t reg;
3221
3222		if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
3223					    &reg, 1) < 0)
3224			return connector_status_unknown;
3225
3226		return DP_GET_SINK_COUNT(reg) ? connector_status_connected
3227					      : connector_status_disconnected;
3228	}
3229
3230	/* If no HPD, poke DDC gently */
3231	if (drm_probe_ddc(&intel_dp->aux.ddc))
3232		return connector_status_connected;
3233
3234	/* Well we tried, say unknown for unreliable port types */
3235	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
3236		type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
3237		if (type == DP_DS_PORT_TYPE_VGA ||
3238		    type == DP_DS_PORT_TYPE_NON_EDID)
3239			return connector_status_unknown;
3240	} else {
3241		type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3242			DP_DWN_STRM_PORT_TYPE_MASK;
3243		if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
3244		    type == DP_DWN_STRM_PORT_TYPE_OTHER)
3245			return connector_status_unknown;
3246	}
3247
3248	/* Anything else is out of spec, warn and ignore */
3249	DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
3250	return connector_status_disconnected;
3251}
3252
3253static enum drm_connector_status
3254ironlake_dp_detect(struct intel_dp *intel_dp)
3255{
3256	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3257	struct drm_i915_private *dev_priv = dev->dev_private;
3258	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3259	enum drm_connector_status status;
3260
3261	/* Can't disconnect eDP, but you can close the lid... */
3262	if (is_edp(intel_dp)) {
3263		status = intel_panel_detect(dev);
3264		if (status == connector_status_unknown)
3265			status = connector_status_connected;
3266		return status;
3267	}
3268
3269	if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
3270		return connector_status_disconnected;
3271
3272	return intel_dp_detect_dpcd(intel_dp);
3273}
3274
3275static enum drm_connector_status
3276g4x_dp_detect(struct intel_dp *intel_dp)
3277{
3278	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3279	struct drm_i915_private *dev_priv = dev->dev_private;
3280	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3281	uint32_t bit;
3282
3283	/* Can't disconnect eDP, but you can close the lid... */
3284	if (is_edp(intel_dp)) {
3285		enum drm_connector_status status;
3286
3287		status = intel_panel_detect(dev);
3288		if (status == connector_status_unknown)
3289			status = connector_status_connected;
3290		return status;
3291	}
3292
3293	if (IS_VALLEYVIEW(dev)) {
3294		switch (intel_dig_port->port) {
3295		case PORT_B:
3296			bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
3297			break;
3298		case PORT_C:
3299			bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
3300			break;
3301		case PORT_D:
3302			bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
3303			break;
3304		default:
3305			return connector_status_unknown;
3306		}
3307	} else {
3308		switch (intel_dig_port->port) {
3309		case PORT_B:
3310			bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
3311			break;
3312		case PORT_C:
3313			bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
3314			break;
3315		case PORT_D:
3316			bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
3317			break;
3318		default:
3319			return connector_status_unknown;
3320		}
3321	}
3322
3323	if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
3324		return connector_status_disconnected;
3325
3326	return intel_dp_detect_dpcd(intel_dp);
3327}
3328
3329static struct edid *
3330intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
3331{
3332	struct intel_connector *intel_connector = to_intel_connector(connector);
3333
3334	/* use cached edid if we have one */
3335	if (intel_connector->edid) {
3336		/* invalid edid */
3337		if (IS_ERR(intel_connector->edid))
3338			return NULL;
3339
3340		return drm_edid_duplicate(intel_connector->edid);
3341	}
3342
3343	return drm_get_edid(connector, adapter);
3344}
3345
3346static int
3347intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter)
3348{
3349	struct intel_connector *intel_connector = to_intel_connector(connector);
3350
3351	/* use cached edid if we have one */
3352	if (intel_connector->edid) {
3353		/* invalid edid */
3354		if (IS_ERR(intel_connector->edid))
3355			return 0;
3356
3357		return intel_connector_update_modes(connector,
3358						    intel_connector->edid);
3359	}
3360
3361	return intel_ddc_get_modes(connector, adapter);
3362}
3363
3364static enum drm_connector_status
3365intel_dp_detect(struct drm_connector *connector, bool force)
3366{
3367	struct intel_dp *intel_dp = intel_attached_dp(connector);
3368	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3369	struct intel_encoder *intel_encoder = &intel_dig_port->base;
3370	struct drm_device *dev = connector->dev;
3371	struct drm_i915_private *dev_priv = dev->dev_private;
3372	enum drm_connector_status status;
3373	enum intel_display_power_domain power_domain;
3374	struct edid *edid = NULL;
3375
3376	intel_runtime_pm_get(dev_priv);
3377
3378	power_domain = intel_display_port_power_domain(intel_encoder);
3379	intel_display_power_get(dev_priv, power_domain);
3380
3381	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
3382		      connector->base.id, drm_get_connector_name(connector));
3383
3384	intel_dp->has_audio = false;
3385
3386	if (HAS_PCH_SPLIT(dev))
3387		status = ironlake_dp_detect(intel_dp);
3388	else
3389		status = g4x_dp_detect(intel_dp);
3390
3391	if (status != connector_status_connected)
3392		goto out;
3393
3394	intel_dp_probe_oui(intel_dp);
3395
3396	if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
3397		intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
3398	} else {
3399		edid = intel_dp_get_edid(connector, &intel_dp->aux.ddc);
3400		if (edid) {
3401			intel_dp->has_audio = drm_detect_monitor_audio(edid);
3402			kfree(edid);
3403		}
3404	}
3405
3406	if (intel_encoder->type != INTEL_OUTPUT_EDP)
3407		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
3408	status = connector_status_connected;
3409
3410out:
3411	intel_display_power_put(dev_priv, power_domain);
3412
3413	intel_runtime_pm_put(dev_priv);
3414
3415	return status;
3416}
3417
3418static int intel_dp_get_modes(struct drm_connector *connector)
3419{
3420	struct intel_dp *intel_dp = intel_attached_dp(connector);
3421	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3422	struct intel_encoder *intel_encoder = &intel_dig_port->base;
3423	struct intel_connector *intel_connector = to_intel_connector(connector);
3424	struct drm_device *dev = connector->dev;
3425	struct drm_i915_private *dev_priv = dev->dev_private;
3426	enum intel_display_power_domain power_domain;
3427	int ret;
3428
3429	/* We should parse the EDID data and find out if it has an audio sink
3430	 */
3431
3432	power_domain = intel_display_port_power_domain(intel_encoder);
3433	intel_display_power_get(dev_priv, power_domain);
3434
3435	ret = intel_dp_get_edid_modes(connector, &intel_dp->aux.ddc);
3436	intel_display_power_put(dev_priv, power_domain);
3437	if (ret)
3438		return ret;
3439
3440	/* if eDP has no EDID, fall back to fixed mode */
3441	if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
3442		struct drm_display_mode *mode;
3443		mode = drm_mode_duplicate(dev,
3444					  intel_connector->panel.fixed_mode);
3445		if (mode) {
3446			drm_mode_probed_add(connector, mode);
3447			return 1;
3448		}
3449	}
3450	return 0;
3451}
3452
3453static bool
3454intel_dp_detect_audio(struct drm_connector *connector)
3455{
3456	struct intel_dp *intel_dp = intel_attached_dp(connector);
3457	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3458	struct intel_encoder *intel_encoder = &intel_dig_port->base;
3459	struct drm_device *dev = connector->dev;
3460	struct drm_i915_private *dev_priv = dev->dev_private;
3461	enum intel_display_power_domain power_domain;
3462	struct edid *edid;
3463	bool has_audio = false;
3464
3465	power_domain = intel_display_port_power_domain(intel_encoder);
3466	intel_display_power_get(dev_priv, power_domain);
3467
3468	edid = intel_dp_get_edid(connector, &intel_dp->aux.ddc);
3469	if (edid) {
3470		has_audio = drm_detect_monitor_audio(edid);
3471		kfree(edid);
3472	}
3473
3474	intel_display_power_put(dev_priv, power_domain);
3475
3476	return has_audio;
3477}
3478
3479static int
3480intel_dp_set_property(struct drm_connector *connector,
3481		      struct drm_property *property,
3482		      uint64_t val)
3483{
3484	struct drm_i915_private *dev_priv = connector->dev->dev_private;
3485	struct intel_connector *intel_connector = to_intel_connector(connector);
3486	struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
3487	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
3488	int ret;
3489
3490	ret = drm_object_property_set_value(&connector->base, property, val);
3491	if (ret)
3492		return ret;
3493
3494	if (property == dev_priv->force_audio_property) {
3495		int i = val;
3496		bool has_audio;
3497
3498		if (i == intel_dp->force_audio)
3499			return 0;
3500
3501		intel_dp->force_audio = i;
3502
3503		if (i == HDMI_AUDIO_AUTO)
3504			has_audio = intel_dp_detect_audio(connector);
3505		else
3506			has_audio = (i == HDMI_AUDIO_ON);
3507
3508		if (has_audio == intel_dp->has_audio)
3509			return 0;
3510
3511		intel_dp->has_audio = has_audio;
3512		goto done;
3513	}
3514
3515	if (property == dev_priv->broadcast_rgb_property) {
3516		bool old_auto = intel_dp->color_range_auto;
3517		uint32_t old_range = intel_dp->color_range;
3518
3519		switch (val) {
3520		case INTEL_BROADCAST_RGB_AUTO:
3521			intel_dp->color_range_auto = true;
3522			break;
3523		case INTEL_BROADCAST_RGB_FULL:
3524			intel_dp->color_range_auto = false;
3525			intel_dp->color_range = 0;
3526			break;
3527		case INTEL_BROADCAST_RGB_LIMITED:
3528			intel_dp->color_range_auto = false;
3529			intel_dp->color_range = DP_COLOR_RANGE_16_235;
3530			break;
3531		default:
3532			return -EINVAL;
3533		}
3534
3535		if (old_auto == intel_dp->color_range_auto &&
3536		    old_range == intel_dp->color_range)
3537			return 0;
3538
3539		goto done;
3540	}
3541
3542	if (is_edp(intel_dp) &&
3543	    property == connector->dev->mode_config.scaling_mode_property) {
3544		if (val == DRM_MODE_SCALE_NONE) {
3545			DRM_DEBUG_KMS("no scaling not supported\n");
3546			return -EINVAL;
3547		}
3548
3549		if (intel_connector->panel.fitting_mode == val) {
3550			/* the eDP scaling property is not changed */
3551			return 0;
3552		}
3553		intel_connector->panel.fitting_mode = val;
3554
3555		goto done;
3556	}
3557
3558	return -EINVAL;
3559
3560done:
3561	if (intel_encoder->base.crtc)
3562		intel_crtc_restore_mode(intel_encoder->base.crtc);
3563
3564	return 0;
3565}
3566
3567static void
3568intel_dp_connector_destroy(struct drm_connector *connector)
3569{
3570	struct intel_connector *intel_connector = to_intel_connector(connector);
3571
3572	if (!IS_ERR_OR_NULL(intel_connector->edid))
3573		kfree(intel_connector->edid);
3574
3575	/* Can't call is_edp() since the encoder may have been destroyed
3576	 * already. */
3577	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3578		intel_panel_fini(&intel_connector->panel);
3579
3580	drm_connector_cleanup(connector);
3581	kfree(connector);
3582}
3583
3584void intel_dp_encoder_destroy(struct drm_encoder *encoder)
3585{
3586	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
3587	struct intel_dp *intel_dp = &intel_dig_port->dp;
3588	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3589
3590	drm_dp_aux_unregister_i2c_bus(&intel_dp->aux);
3591	drm_encoder_cleanup(encoder);
3592	if (is_edp(intel_dp)) {
3593		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
3594		mutex_lock(&dev->mode_config.mutex);
3595		edp_panel_vdd_off_sync(intel_dp);
3596		mutex_unlock(&dev->mode_config.mutex);
3597	}
3598	kfree(intel_dig_port);
3599}
3600
3601static const struct drm_connector_funcs intel_dp_connector_funcs = {
3602	.dpms = intel_connector_dpms,
3603	.detect = intel_dp_detect,
3604	.fill_modes = drm_helper_probe_single_connector_modes,
3605	.set_property = intel_dp_set_property,
3606	.destroy = intel_dp_connector_destroy,
3607};
3608
3609static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
3610	.get_modes = intel_dp_get_modes,
3611	.mode_valid = intel_dp_mode_valid,
3612	.best_encoder = intel_best_encoder,
3613};
3614
3615static const struct drm_encoder_funcs intel_dp_enc_funcs = {
3616	.destroy = intel_dp_encoder_destroy,
3617};
3618
3619static void
3620intel_dp_hot_plug(struct intel_encoder *intel_encoder)
3621{
3622	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
3623
3624	intel_dp_check_link_status(intel_dp);
3625}
3626
3627/* Return which DP Port should be selected for Transcoder DP control */
3628int
3629intel_trans_dp_port_sel(struct drm_crtc *crtc)
3630{
3631	struct drm_device *dev = crtc->dev;
3632	struct intel_encoder *intel_encoder;
3633	struct intel_dp *intel_dp;
3634
3635	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
3636		intel_dp = enc_to_intel_dp(&intel_encoder->base);
3637
3638		if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
3639		    intel_encoder->type == INTEL_OUTPUT_EDP)
3640			return intel_dp->output_reg;
3641	}
3642
3643	return -1;
3644}
3645
3646/* check the VBT to see whether the eDP is on DP-D port */
3647bool intel_dp_is_edp(struct drm_device *dev, enum port port)
3648{
3649	struct drm_i915_private *dev_priv = dev->dev_private;
3650	union child_device_config *p_child;
3651	int i;
3652	static const short port_mapping[] = {
3653		[PORT_B] = PORT_IDPB,
3654		[PORT_C] = PORT_IDPC,
3655		[PORT_D] = PORT_IDPD,
3656	};
3657
3658	if (port == PORT_A)
3659		return true;
3660
3661	if (!dev_priv->vbt.child_dev_num)
3662		return false;
3663
3664	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
3665		p_child = dev_priv->vbt.child_dev + i;
3666
3667		if (p_child->common.dvo_port == port_mapping[port] &&
3668		    (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
3669		    (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
3670			return true;
3671	}
3672	return false;
3673}
3674
3675static void
3676intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
3677{
3678	struct intel_connector *intel_connector = to_intel_connector(connector);
3679
3680	intel_attach_force_audio_property(connector);
3681	intel_attach_broadcast_rgb_property(connector);
3682	intel_dp->color_range_auto = true;
3683
3684	if (is_edp(intel_dp)) {
3685		drm_mode_create_scaling_mode_property(connector->dev);
3686		drm_object_attach_property(
3687			&connector->base,
3688			connector->dev->mode_config.scaling_mode_property,
3689			DRM_MODE_SCALE_ASPECT);
3690		intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
3691	}
3692}
3693
3694static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
3695{
3696	intel_dp->last_power_cycle = jiffies;
3697	intel_dp->last_power_on = jiffies;
3698	intel_dp->last_backlight_off = jiffies;
3699}
3700
3701static void
3702intel_dp_init_panel_power_sequencer(struct drm_device *dev,
3703				    struct intel_dp *intel_dp,
3704				    struct edp_power_seq *out)
3705{
3706	struct drm_i915_private *dev_priv = dev->dev_private;
3707	struct edp_power_seq cur, vbt, spec, final;
3708	u32 pp_on, pp_off, pp_div, pp;
3709	int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
3710
3711	if (HAS_PCH_SPLIT(dev)) {
3712		pp_ctrl_reg = PCH_PP_CONTROL;
3713		pp_on_reg = PCH_PP_ON_DELAYS;
3714		pp_off_reg = PCH_PP_OFF_DELAYS;
3715		pp_div_reg = PCH_PP_DIVISOR;
3716	} else {
3717		enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
3718
3719		pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
3720		pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
3721		pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
3722		pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
3723	}
3724
3725	/* Workaround: Need to write PP_CONTROL with the unlock key as
3726	 * the very first thing. */
3727	pp = ironlake_get_pp_control(intel_dp);
3728	I915_WRITE(pp_ctrl_reg, pp);
3729
3730	pp_on = I915_READ(pp_on_reg);
3731	pp_off = I915_READ(pp_off_reg);
3732	pp_div = I915_READ(pp_div_reg);
3733
3734	/* Pull timing values out of registers */
3735	cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
3736		PANEL_POWER_UP_DELAY_SHIFT;
3737
3738	cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
3739		PANEL_LIGHT_ON_DELAY_SHIFT;
3740
3741	cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
3742		PANEL_LIGHT_OFF_DELAY_SHIFT;
3743
3744	cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
3745		PANEL_POWER_DOWN_DELAY_SHIFT;
3746
3747	cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
3748		       PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
3749
3750	DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
3751		      cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
3752
3753	vbt = dev_priv->vbt.edp_pps;
3754
3755	/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
3756	 * our hw here, which are all in 100usec. */
3757	spec.t1_t3 = 210 * 10;
3758	spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
3759	spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
3760	spec.t10 = 500 * 10;
3761	/* This one is special and actually in units of 100ms, but zero
3762	 * based in the hw (so we need to add 100 ms). But the sw vbt
3763	 * table multiplies it with 1000 to make it in units of 100usec,
3764	 * too. */
3765	spec.t11_t12 = (510 + 100) * 10;
3766
3767	DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
3768		      vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
3769
3770	/* Use the max of the register settings and vbt. If both are
3771	 * unset, fall back to the spec limits. */
3772#define assign_final(field)	final.field = (max(cur.field, vbt.field) == 0 ? \
3773				       spec.field : \
3774				       max(cur.field, vbt.field))
3775	assign_final(t1_t3);
3776	assign_final(t8);
3777	assign_final(t9);
3778	assign_final(t10);
3779	assign_final(t11_t12);
3780#undef assign_final
3781
3782#define get_delay(field)	(DIV_ROUND_UP(final.field, 10))
3783	intel_dp->panel_power_up_delay = get_delay(t1_t3);
3784	intel_dp->backlight_on_delay = get_delay(t8);
3785	intel_dp->backlight_off_delay = get_delay(t9);
3786	intel_dp->panel_power_down_delay = get_delay(t10);
3787	intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
3788#undef get_delay
3789
3790	DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
3791		      intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
3792		      intel_dp->panel_power_cycle_delay);
3793
3794	DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
3795		      intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
3796
3797	if (out)
3798		*out = final;
3799}
3800
3801static void
3802intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
3803					      struct intel_dp *intel_dp,
3804					      struct edp_power_seq *seq)
3805{
3806	struct drm_i915_private *dev_priv = dev->dev_private;
3807	u32 pp_on, pp_off, pp_div, port_sel = 0;
3808	int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
3809	int pp_on_reg, pp_off_reg, pp_div_reg;
3810
3811	if (HAS_PCH_SPLIT(dev)) {
3812		pp_on_reg = PCH_PP_ON_DELAYS;
3813		pp_off_reg = PCH_PP_OFF_DELAYS;
3814		pp_div_reg = PCH_PP_DIVISOR;
3815	} else {
3816		enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
3817
3818		pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
3819		pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
3820		pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
3821	}
3822
3823	/*
3824	 * And finally store the new values in the power sequencer. The
3825	 * backlight delays are set to 1 because we do manual waits on them. For
3826	 * T8, even BSpec recommends doing it. For T9, if we don't do this,
3827	 * we'll end up waiting for the backlight off delay twice: once when we
3828	 * do the manual sleep, and once when we disable the panel and wait for
3829	 * the PP_STATUS bit to become zero.
3830	 */
3831	pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
3832		(1 << PANEL_LIGHT_ON_DELAY_SHIFT);
3833	pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
3834		 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
3835	/* Compute the divisor for the pp clock, simply match the Bspec
3836	 * formula. */
3837	pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
3838	pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
3839			<< PANEL_POWER_CYCLE_DELAY_SHIFT);
3840
3841	/* Haswell doesn't have any port selection bits for the panel
3842	 * power sequencer any more. */
3843	if (IS_VALLEYVIEW(dev)) {
3844		if (dp_to_dig_port(intel_dp)->port == PORT_B)
3845			port_sel = PANEL_PORT_SELECT_DPB_VLV;
3846		else
3847			port_sel = PANEL_PORT_SELECT_DPC_VLV;
3848	} else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
3849		if (dp_to_dig_port(intel_dp)->port == PORT_A)
3850			port_sel = PANEL_PORT_SELECT_DPA;
3851		else
3852			port_sel = PANEL_PORT_SELECT_DPD;
3853	}
3854
3855	pp_on |= port_sel;
3856
3857	I915_WRITE(pp_on_reg, pp_on);
3858	I915_WRITE(pp_off_reg, pp_off);
3859	I915_WRITE(pp_div_reg, pp_div);
3860
3861	DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
3862		      I915_READ(pp_on_reg),
3863		      I915_READ(pp_off_reg),
3864		      I915_READ(pp_div_reg));
3865}
3866
3867void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
3868{
3869	struct drm_i915_private *dev_priv = dev->dev_private;
3870	struct intel_encoder *encoder;
3871	struct intel_dp *intel_dp = NULL;
3872	struct intel_crtc_config *config = NULL;
3873	struct intel_crtc *intel_crtc = NULL;
3874	struct intel_connector *intel_connector = dev_priv->drrs.connector;
3875	u32 reg, val;
3876	enum edp_drrs_refresh_rate_type index = DRRS_HIGH_RR;
3877
3878	if (refresh_rate <= 0) {
3879		DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
3880		return;
3881	}
3882
3883	if (intel_connector == NULL) {
3884		DRM_DEBUG_KMS("DRRS supported for eDP only.\n");
3885		return;
3886	}
3887
3888	if (INTEL_INFO(dev)->gen < 8 && intel_edp_is_psr_enabled(dev)) {
3889		DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n");
3890		return;
3891	}
3892
3893	encoder = intel_attached_encoder(&intel_connector->base);
3894	intel_dp = enc_to_intel_dp(&encoder->base);
3895	intel_crtc = encoder->new_crtc;
3896
3897	if (!intel_crtc) {
3898		DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
3899		return;
3900	}
3901
3902	config = &intel_crtc->config;
3903
3904	if (intel_dp->drrs_state.type < SEAMLESS_DRRS_SUPPORT) {
3905		DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
3906		return;
3907	}
3908
3909	if (intel_connector->panel.downclock_mode->vrefresh == refresh_rate)
3910		index = DRRS_LOW_RR;
3911
3912	if (index == intel_dp->drrs_state.refresh_rate_type) {
3913		DRM_DEBUG_KMS(
3914			"DRRS requested for previously set RR...ignoring\n");
3915		return;
3916	}
3917
3918	if (!intel_crtc->active) {
3919		DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
3920		return;
3921	}
3922
3923	if (INTEL_INFO(dev)->gen > 6 && INTEL_INFO(dev)->gen < 8) {
3924		reg = PIPECONF(intel_crtc->config.cpu_transcoder);
3925		val = I915_READ(reg);
3926		if (index > DRRS_HIGH_RR) {
3927			val |= PIPECONF_EDP_RR_MODE_SWITCH;
3928			intel_dp_set_m2_n2(intel_crtc, &config->dp_m2_n2);
3929		} else {
3930			val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
3931		}
3932		I915_WRITE(reg, val);
3933	}
3934
3935	/*
3936	 * mutex taken to ensure that there is no race between differnt
3937	 * drrs calls trying to update refresh rate. This scenario may occur
3938	 * in future when idleness detection based DRRS in kernel and
3939	 * possible calls from user space to set differnt RR are made.
3940	 */
3941
3942	mutex_lock(&intel_dp->drrs_state.mutex);
3943
3944	intel_dp->drrs_state.refresh_rate_type = index;
3945
3946	mutex_unlock(&intel_dp->drrs_state.mutex);
3947
3948	DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
3949}
3950
3951static struct drm_display_mode *
3952intel_dp_drrs_init(struct intel_digital_port *intel_dig_port,
3953			struct intel_connector *intel_connector,
3954			struct drm_display_mode *fixed_mode)
3955{
3956	struct drm_connector *connector = &intel_connector->base;
3957	struct intel_dp *intel_dp = &intel_dig_port->dp;
3958	struct drm_device *dev = intel_dig_port->base.base.dev;
3959	struct drm_i915_private *dev_priv = dev->dev_private;
3960	struct drm_display_mode *downclock_mode = NULL;
3961
3962	if (INTEL_INFO(dev)->gen <= 6) {
3963		DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
3964		return NULL;
3965	}
3966
3967	if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
3968		DRM_INFO("VBT doesn't support DRRS\n");
3969		return NULL;
3970	}
3971
3972	downclock_mode = intel_find_panel_downclock
3973					(dev, fixed_mode, connector);
3974
3975	if (!downclock_mode) {
3976		DRM_INFO("DRRS not supported\n");
3977		return NULL;
3978	}
3979
3980	dev_priv->drrs.connector = intel_connector;
3981
3982	mutex_init(&intel_dp->drrs_state.mutex);
3983
3984	intel_dp->drrs_state.type = dev_priv->vbt.drrs_type;
3985
3986	intel_dp->drrs_state.refresh_rate_type = DRRS_HIGH_RR;
3987	DRM_INFO("seamless DRRS supported for eDP panel.\n");
3988	return downclock_mode;
3989}
3990
3991static bool intel_edp_init_connector(struct intel_dp *intel_dp,
3992				     struct intel_connector *intel_connector,
3993				     struct edp_power_seq *power_seq)
3994{
3995	struct drm_connector *connector = &intel_connector->base;
3996	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3997	struct intel_encoder *intel_encoder = &intel_dig_port->base;
3998	struct drm_device *dev = intel_encoder->base.dev;
3999	struct drm_i915_private *dev_priv = dev->dev_private;
4000	struct drm_display_mode *fixed_mode = NULL;
4001	struct drm_display_mode *downclock_mode = NULL;
4002	bool has_dpcd;
4003	struct drm_display_mode *scan;
4004	struct edid *edid;
4005
4006	intel_dp->drrs_state.type = DRRS_NOT_SUPPORTED;
4007
4008	if (!is_edp(intel_dp))
4009		return true;
4010
4011	/* The VDD bit needs a power domain reference, so if the bit is already
4012	 * enabled when we boot, grab this reference. */
4013	if (edp_have_panel_vdd(intel_dp)) {
4014		enum intel_display_power_domain power_domain;
4015		power_domain = intel_display_port_power_domain(intel_encoder);
4016		intel_display_power_get(dev_priv, power_domain);
4017	}
4018
4019	/* Cache DPCD and EDID for edp. */
4020	intel_edp_panel_vdd_on(intel_dp);
4021	has_dpcd = intel_dp_get_dpcd(intel_dp);
4022	edp_panel_vdd_off(intel_dp, false);
4023
4024	if (has_dpcd) {
4025		if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
4026			dev_priv->no_aux_handshake =
4027				intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
4028				DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
4029	} else {
4030		/* if this fails, presume the device is a ghost */
4031		DRM_INFO("failed to retrieve link info, disabling eDP\n");
4032		return false;
4033	}
4034
4035	/* We now know it's not a ghost, init power sequence regs. */
4036	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, power_seq);
4037
4038	mutex_lock(&dev->mode_config.mutex);
4039	edid = drm_get_edid(connector, &intel_dp->aux.ddc);
4040	if (edid) {
4041		if (drm_add_edid_modes(connector, edid)) {
4042			drm_mode_connector_update_edid_property(connector,
4043								edid);
4044			drm_edid_to_eld(connector, edid);
4045		} else {
4046			kfree(edid);
4047			edid = ERR_PTR(-EINVAL);
4048		}
4049	} else {
4050		edid = ERR_PTR(-ENOENT);
4051	}
4052	intel_connector->edid = edid;
4053
4054	/* prefer fixed mode from EDID if available */
4055	list_for_each_entry(scan, &connector->probed_modes, head) {
4056		if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
4057			fixed_mode = drm_mode_duplicate(dev, scan);
4058			downclock_mode = intel_dp_drrs_init(
4059						intel_dig_port,
4060						intel_connector, fixed_mode);
4061			break;
4062		}
4063	}
4064
4065	/* fallback to VBT if available for eDP */
4066	if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
4067		fixed_mode = drm_mode_duplicate(dev,
4068					dev_priv->vbt.lfp_lvds_vbt_mode);
4069		if (fixed_mode)
4070			fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
4071	}
4072	mutex_unlock(&dev->mode_config.mutex);
4073
4074	intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
4075	intel_panel_setup_backlight(connector);
4076
4077	return true;
4078}
4079
4080bool
4081intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
4082			struct intel_connector *intel_connector)
4083{
4084	struct drm_connector *connector = &intel_connector->base;
4085	struct intel_dp *intel_dp = &intel_dig_port->dp;
4086	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4087	struct drm_device *dev = intel_encoder->base.dev;
4088	struct drm_i915_private *dev_priv = dev->dev_private;
4089	enum port port = intel_dig_port->port;
4090	struct edp_power_seq power_seq = { 0 };
4091	int type;
4092
4093	/* intel_dp vfuncs */
4094	if (IS_VALLEYVIEW(dev))
4095		intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
4096	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4097		intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
4098	else if (HAS_PCH_SPLIT(dev))
4099		intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
4100	else
4101		intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
4102
4103	intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
4104
4105	/* Preserve the current hw state. */
4106	intel_dp->DP = I915_READ(intel_dp->output_reg);
4107	intel_dp->attached_connector = intel_connector;
4108
4109	if (intel_dp_is_edp(dev, port))
4110		type = DRM_MODE_CONNECTOR_eDP;
4111	else
4112		type = DRM_MODE_CONNECTOR_DisplayPort;
4113
4114	/*
4115	 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
4116	 * for DP the encoder type can be set by the caller to
4117	 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
4118	 */
4119	if (type == DRM_MODE_CONNECTOR_eDP)
4120		intel_encoder->type = INTEL_OUTPUT_EDP;
4121
4122	DRM_DEBUG_KMS("Adding %s connector on port %c\n",
4123			type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
4124			port_name(port));
4125
4126	drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
4127	drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
4128
4129	connector->interlace_allowed = true;
4130	connector->doublescan_allowed = 0;
4131
4132	INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
4133			  edp_panel_vdd_work);
4134
4135	intel_connector_attach_encoder(intel_connector, intel_encoder);
4136	drm_sysfs_connector_add(connector);
4137
4138	if (HAS_DDI(dev))
4139		intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
4140	else
4141		intel_connector->get_hw_state = intel_connector_get_hw_state;
4142	intel_connector->unregister = intel_dp_connector_unregister;
4143
4144	/* Set up the hotplug pin. */
4145	switch (port) {
4146	case PORT_A:
4147		intel_encoder->hpd_pin = HPD_PORT_A;
4148		break;
4149	case PORT_B:
4150		intel_encoder->hpd_pin = HPD_PORT_B;
4151		break;
4152	case PORT_C:
4153		intel_encoder->hpd_pin = HPD_PORT_C;
4154		break;
4155	case PORT_D:
4156		intel_encoder->hpd_pin = HPD_PORT_D;
4157		break;
4158	default:
4159		BUG();
4160	}
4161
4162	if (is_edp(intel_dp)) {
4163		intel_dp_init_panel_power_timestamps(intel_dp);
4164		intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
4165	}
4166
4167	intel_dp_aux_init(intel_dp, intel_connector);
4168
4169	intel_dp->psr_setup_done = false;
4170
4171	if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) {
4172		drm_dp_aux_unregister_i2c_bus(&intel_dp->aux);
4173		if (is_edp(intel_dp)) {
4174			cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4175			mutex_lock(&dev->mode_config.mutex);
4176			edp_panel_vdd_off_sync(intel_dp);
4177			mutex_unlock(&dev->mode_config.mutex);
4178		}
4179		drm_sysfs_connector_remove(connector);
4180		drm_connector_cleanup(connector);
4181		return false;
4182	}
4183
4184	intel_dp_add_properties(intel_dp, connector);
4185
4186	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
4187	 * 0xd.  Failure to do so will result in spurious interrupts being
4188	 * generated on the port when a cable is not attached.
4189	 */
4190	if (IS_G4X(dev) && !IS_GM45(dev)) {
4191		u32 temp = I915_READ(PEG_BAND_GAP_DATA);
4192		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
4193	}
4194
4195	return true;
4196}
4197
4198void
4199intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
4200{
4201	struct intel_digital_port *intel_dig_port;
4202	struct intel_encoder *intel_encoder;
4203	struct drm_encoder *encoder;
4204	struct intel_connector *intel_connector;
4205
4206	intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
4207	if (!intel_dig_port)
4208		return;
4209
4210	intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
4211	if (!intel_connector) {
4212		kfree(intel_dig_port);
4213		return;
4214	}
4215
4216	intel_encoder = &intel_dig_port->base;
4217	encoder = &intel_encoder->base;
4218
4219	drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
4220			 DRM_MODE_ENCODER_TMDS);
4221
4222	intel_encoder->compute_config = intel_dp_compute_config;
4223	intel_encoder->mode_set = intel_dp_mode_set;
4224	intel_encoder->disable = intel_disable_dp;
4225	intel_encoder->get_hw_state = intel_dp_get_hw_state;
4226	intel_encoder->get_config = intel_dp_get_config;
4227	if (IS_CHERRYVIEW(dev)) {
4228		intel_encoder->pre_enable = chv_pre_enable_dp;
4229		intel_encoder->enable = vlv_enable_dp;
4230	} else if (IS_VALLEYVIEW(dev)) {
4231		intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
4232		intel_encoder->pre_enable = vlv_pre_enable_dp;
4233		intel_encoder->enable = vlv_enable_dp;
4234		intel_encoder->post_disable = vlv_post_disable_dp;
4235	} else {
4236		intel_encoder->pre_enable = g4x_pre_enable_dp;
4237		intel_encoder->enable = g4x_enable_dp;
4238		intel_encoder->post_disable = g4x_post_disable_dp;
4239	}
4240
4241	intel_dig_port->port = port;
4242	intel_dig_port->dp.output_reg = output_reg;
4243
4244	intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4245	intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
4246	intel_encoder->cloneable = 0;
4247	intel_encoder->hot_plug = intel_dp_hot_plug;
4248
4249	if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
4250		drm_encoder_cleanup(encoder);
4251		kfree(intel_dig_port);
4252		kfree(intel_connector);
4253	}
4254}
4255