intel_dp.c revision 9f08ef59a6f71249de8b4e8a26c27075b9e99f9c
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 *    Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
29#include <linux/slab.h>
30#include <linux/export.h>
31#include <drm/drmP.h>
32#include <drm/drm_crtc.h>
33#include <drm/drm_crtc_helper.h>
34#include <drm/drm_edid.h>
35#include "intel_drv.h"
36#include <drm/i915_drm.h>
37#include "i915_drv.h"
38
39#define DP_LINK_CHECK_TIMEOUT	(10 * 1000)
40
41struct dp_link_dpll {
42	int link_bw;
43	struct dpll dpll;
44};
45
46static const struct dp_link_dpll gen4_dpll[] = {
47	{ DP_LINK_BW_1_62,
48		{ .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
49	{ DP_LINK_BW_2_7,
50		{ .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
51};
52
53static const struct dp_link_dpll pch_dpll[] = {
54	{ DP_LINK_BW_1_62,
55		{ .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
56	{ DP_LINK_BW_2_7,
57		{ .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
58};
59
60static const struct dp_link_dpll vlv_dpll[] = {
61	{ DP_LINK_BW_1_62,
62		{ .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
63	{ DP_LINK_BW_2_7,
64		{ .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
65};
66
67/**
68 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
69 * @intel_dp: DP struct
70 *
71 * If a CPU or PCH DP output is attached to an eDP panel, this function
72 * will return true, and false otherwise.
73 */
74static bool is_edp(struct intel_dp *intel_dp)
75{
76	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
77
78	return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
79}
80
81static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
82{
83	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
84
85	return intel_dig_port->base.base.dev;
86}
87
88static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
89{
90	return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
91}
92
93static void intel_dp_link_down(struct intel_dp *intel_dp);
94
95static int
96intel_dp_max_link_bw(struct intel_dp *intel_dp)
97{
98	int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
99
100	switch (max_link_bw) {
101	case DP_LINK_BW_1_62:
102	case DP_LINK_BW_2_7:
103		break;
104	case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
105		max_link_bw = DP_LINK_BW_2_7;
106		break;
107	default:
108		WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
109		     max_link_bw);
110		max_link_bw = DP_LINK_BW_1_62;
111		break;
112	}
113	return max_link_bw;
114}
115
116/*
117 * The units on the numbers in the next two are... bizarre.  Examples will
118 * make it clearer; this one parallels an example in the eDP spec.
119 *
120 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
121 *
122 *     270000 * 1 * 8 / 10 == 216000
123 *
124 * The actual data capacity of that configuration is 2.16Gbit/s, so the
125 * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
126 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
127 * 119000.  At 18bpp that's 2142000 kilobits per second.
128 *
129 * Thus the strange-looking division by 10 in intel_dp_link_required, to
130 * get the result in decakilobits instead of kilobits.
131 */
132
133static int
134intel_dp_link_required(int pixel_clock, int bpp)
135{
136	return (pixel_clock * bpp + 9) / 10;
137}
138
139static int
140intel_dp_max_data_rate(int max_link_clock, int max_lanes)
141{
142	return (max_link_clock * max_lanes * 8) / 10;
143}
144
145static int
146intel_dp_mode_valid(struct drm_connector *connector,
147		    struct drm_display_mode *mode)
148{
149	struct intel_dp *intel_dp = intel_attached_dp(connector);
150	struct intel_connector *intel_connector = to_intel_connector(connector);
151	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
152	int target_clock = mode->clock;
153	int max_rate, mode_rate, max_lanes, max_link_clock;
154
155	if (is_edp(intel_dp) && fixed_mode) {
156		if (mode->hdisplay > fixed_mode->hdisplay)
157			return MODE_PANEL;
158
159		if (mode->vdisplay > fixed_mode->vdisplay)
160			return MODE_PANEL;
161
162		target_clock = fixed_mode->clock;
163	}
164
165	max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
166	max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
167
168	max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
169	mode_rate = intel_dp_link_required(target_clock, 18);
170
171	if (mode_rate > max_rate)
172		return MODE_CLOCK_HIGH;
173
174	if (mode->clock < 10000)
175		return MODE_CLOCK_LOW;
176
177	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
178		return MODE_H_ILLEGAL;
179
180	return MODE_OK;
181}
182
183static uint32_t
184pack_aux(uint8_t *src, int src_bytes)
185{
186	int	i;
187	uint32_t v = 0;
188
189	if (src_bytes > 4)
190		src_bytes = 4;
191	for (i = 0; i < src_bytes; i++)
192		v |= ((uint32_t) src[i]) << ((3-i) * 8);
193	return v;
194}
195
196static void
197unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
198{
199	int i;
200	if (dst_bytes > 4)
201		dst_bytes = 4;
202	for (i = 0; i < dst_bytes; i++)
203		dst[i] = src >> ((3-i) * 8);
204}
205
206/* hrawclock is 1/4 the FSB frequency */
207static int
208intel_hrawclk(struct drm_device *dev)
209{
210	struct drm_i915_private *dev_priv = dev->dev_private;
211	uint32_t clkcfg;
212
213	/* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
214	if (IS_VALLEYVIEW(dev))
215		return 200;
216
217	clkcfg = I915_READ(CLKCFG);
218	switch (clkcfg & CLKCFG_FSB_MASK) {
219	case CLKCFG_FSB_400:
220		return 100;
221	case CLKCFG_FSB_533:
222		return 133;
223	case CLKCFG_FSB_667:
224		return 166;
225	case CLKCFG_FSB_800:
226		return 200;
227	case CLKCFG_FSB_1067:
228		return 266;
229	case CLKCFG_FSB_1333:
230		return 333;
231	/* these two are just a guess; one of them might be right */
232	case CLKCFG_FSB_1600:
233	case CLKCFG_FSB_1600_ALT:
234		return 400;
235	default:
236		return 133;
237	}
238}
239
240static void
241intel_dp_init_panel_power_sequencer(struct drm_device *dev,
242				    struct intel_dp *intel_dp,
243				    struct edp_power_seq *out);
244static void
245intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
246					      struct intel_dp *intel_dp,
247					      struct edp_power_seq *out);
248
249static enum pipe
250vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
251{
252	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
253	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
254	struct drm_device *dev = intel_dig_port->base.base.dev;
255	struct drm_i915_private *dev_priv = dev->dev_private;
256	enum port port = intel_dig_port->port;
257	enum pipe pipe;
258
259	/* modeset should have pipe */
260	if (crtc)
261		return to_intel_crtc(crtc)->pipe;
262
263	/* init time, try to find a pipe with this port selected */
264	for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
265		u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
266			PANEL_PORT_SELECT_MASK;
267		if (port_sel == PANEL_PORT_SELECT_DPB_VLV && port == PORT_B)
268			return pipe;
269		if (port_sel == PANEL_PORT_SELECT_DPC_VLV && port == PORT_C)
270			return pipe;
271	}
272
273	/* shrug */
274	return PIPE_A;
275}
276
277static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
278{
279	struct drm_device *dev = intel_dp_to_dev(intel_dp);
280
281	if (HAS_PCH_SPLIT(dev))
282		return PCH_PP_CONTROL;
283	else
284		return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
285}
286
287static u32 _pp_stat_reg(struct intel_dp *intel_dp)
288{
289	struct drm_device *dev = intel_dp_to_dev(intel_dp);
290
291	if (HAS_PCH_SPLIT(dev))
292		return PCH_PP_STATUS;
293	else
294		return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
295}
296
297static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
298{
299	struct drm_device *dev = intel_dp_to_dev(intel_dp);
300	struct drm_i915_private *dev_priv = dev->dev_private;
301
302	return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
303}
304
305static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
306{
307	struct drm_device *dev = intel_dp_to_dev(intel_dp);
308	struct drm_i915_private *dev_priv = dev->dev_private;
309
310	return (I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0;
311}
312
313static void
314intel_dp_check_edp(struct intel_dp *intel_dp)
315{
316	struct drm_device *dev = intel_dp_to_dev(intel_dp);
317	struct drm_i915_private *dev_priv = dev->dev_private;
318
319	if (!is_edp(intel_dp))
320		return;
321
322	if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
323		WARN(1, "eDP powered off while attempting aux channel communication.\n");
324		DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
325			      I915_READ(_pp_stat_reg(intel_dp)),
326			      I915_READ(_pp_ctrl_reg(intel_dp)));
327	}
328}
329
330static uint32_t
331intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
332{
333	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
334	struct drm_device *dev = intel_dig_port->base.base.dev;
335	struct drm_i915_private *dev_priv = dev->dev_private;
336	uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
337	uint32_t status;
338	bool done;
339
340#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
341	if (has_aux_irq)
342		done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
343					  msecs_to_jiffies_timeout(10));
344	else
345		done = wait_for_atomic(C, 10) == 0;
346	if (!done)
347		DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
348			  has_aux_irq);
349#undef C
350
351	return status;
352}
353
354static uint32_t get_aux_clock_divider(struct intel_dp *intel_dp,
355				      int index)
356{
357	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
358	struct drm_device *dev = intel_dig_port->base.base.dev;
359	struct drm_i915_private *dev_priv = dev->dev_private;
360
361	/* The clock divider is based off the hrawclk,
362	 * and would like to run at 2MHz. So, take the
363	 * hrawclk value and divide by 2 and use that
364	 *
365	 * Note that PCH attached eDP panels should use a 125MHz input
366	 * clock divider.
367	 */
368	if (IS_VALLEYVIEW(dev)) {
369		return index ? 0 : 100;
370	} else if (intel_dig_port->port == PORT_A) {
371		if (index)
372			return 0;
373		if (HAS_DDI(dev))
374			return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
375		else if (IS_GEN6(dev) || IS_GEN7(dev))
376			return 200; /* SNB & IVB eDP input clock at 400Mhz */
377		else
378			return 225; /* eDP input clock at 450Mhz */
379	} else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
380		/* Workaround for non-ULT HSW */
381		switch (index) {
382		case 0: return 63;
383		case 1: return 72;
384		default: return 0;
385		}
386	} else if (HAS_PCH_SPLIT(dev)) {
387		return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
388	} else {
389		return index ? 0 :intel_hrawclk(dev) / 2;
390	}
391}
392
393static int
394intel_dp_aux_ch(struct intel_dp *intel_dp,
395		uint8_t *send, int send_bytes,
396		uint8_t *recv, int recv_size)
397{
398	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
399	struct drm_device *dev = intel_dig_port->base.base.dev;
400	struct drm_i915_private *dev_priv = dev->dev_private;
401	uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
402	uint32_t ch_data = ch_ctl + 4;
403	uint32_t aux_clock_divider;
404	int i, ret, recv_bytes;
405	uint32_t status;
406	int try, precharge, clock = 0;
407	bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
408
409	/* dp aux is extremely sensitive to irq latency, hence request the
410	 * lowest possible wakeup latency and so prevent the cpu from going into
411	 * deep sleep states.
412	 */
413	pm_qos_update_request(&dev_priv->pm_qos, 0);
414
415	intel_dp_check_edp(intel_dp);
416
417	if (IS_GEN6(dev))
418		precharge = 3;
419	else
420		precharge = 5;
421
422	intel_aux_display_runtime_get(dev_priv);
423
424	/* Try to wait for any previous AUX channel activity */
425	for (try = 0; try < 3; try++) {
426		status = I915_READ_NOTRACE(ch_ctl);
427		if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
428			break;
429		msleep(1);
430	}
431
432	if (try == 3) {
433		WARN(1, "dp_aux_ch not started status 0x%08x\n",
434		     I915_READ(ch_ctl));
435		ret = -EBUSY;
436		goto out;
437	}
438
439	/* Only 5 data registers! */
440	if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
441		ret = -E2BIG;
442		goto out;
443	}
444
445	while ((aux_clock_divider = get_aux_clock_divider(intel_dp, clock++))) {
446		/* Must try at least 3 times according to DP spec */
447		for (try = 0; try < 5; try++) {
448			/* Load the send data into the aux channel data registers */
449			for (i = 0; i < send_bytes; i += 4)
450				I915_WRITE(ch_data + i,
451					   pack_aux(send + i, send_bytes - i));
452
453			/* Send the command and wait for it to complete */
454			I915_WRITE(ch_ctl,
455				   DP_AUX_CH_CTL_SEND_BUSY |
456				   (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
457				   DP_AUX_CH_CTL_TIME_OUT_400us |
458				   (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
459				   (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
460				   (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
461				   DP_AUX_CH_CTL_DONE |
462				   DP_AUX_CH_CTL_TIME_OUT_ERROR |
463				   DP_AUX_CH_CTL_RECEIVE_ERROR);
464
465			status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
466
467			/* Clear done status and any errors */
468			I915_WRITE(ch_ctl,
469				   status |
470				   DP_AUX_CH_CTL_DONE |
471				   DP_AUX_CH_CTL_TIME_OUT_ERROR |
472				   DP_AUX_CH_CTL_RECEIVE_ERROR);
473
474			if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
475				      DP_AUX_CH_CTL_RECEIVE_ERROR))
476				continue;
477			if (status & DP_AUX_CH_CTL_DONE)
478				break;
479		}
480		if (status & DP_AUX_CH_CTL_DONE)
481			break;
482	}
483
484	if ((status & DP_AUX_CH_CTL_DONE) == 0) {
485		DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
486		ret = -EBUSY;
487		goto out;
488	}
489
490	/* Check for timeout or receive error.
491	 * Timeouts occur when the sink is not connected
492	 */
493	if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
494		DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
495		ret = -EIO;
496		goto out;
497	}
498
499	/* Timeouts occur when the device isn't connected, so they're
500	 * "normal" -- don't fill the kernel log with these */
501	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
502		DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
503		ret = -ETIMEDOUT;
504		goto out;
505	}
506
507	/* Unload any bytes sent back from the other side */
508	recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
509		      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
510	if (recv_bytes > recv_size)
511		recv_bytes = recv_size;
512
513	for (i = 0; i < recv_bytes; i += 4)
514		unpack_aux(I915_READ(ch_data + i),
515			   recv + i, recv_bytes - i);
516
517	ret = recv_bytes;
518out:
519	pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
520	intel_aux_display_runtime_put(dev_priv);
521
522	return ret;
523}
524
525/* Write data to the aux channel in native mode */
526static int
527intel_dp_aux_native_write(struct intel_dp *intel_dp,
528			  uint16_t address, uint8_t *send, int send_bytes)
529{
530	int ret;
531	uint8_t	msg[20];
532	int msg_bytes;
533	uint8_t	ack;
534
535	if (WARN_ON(send_bytes > 16))
536		return -E2BIG;
537
538	intel_dp_check_edp(intel_dp);
539	msg[0] = AUX_NATIVE_WRITE << 4;
540	msg[1] = address >> 8;
541	msg[2] = address & 0xff;
542	msg[3] = send_bytes - 1;
543	memcpy(&msg[4], send, send_bytes);
544	msg_bytes = send_bytes + 4;
545	for (;;) {
546		ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
547		if (ret < 0)
548			return ret;
549		if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
550			break;
551		else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
552			udelay(100);
553		else
554			return -EIO;
555	}
556	return send_bytes;
557}
558
559/* Write a single byte to the aux channel in native mode */
560static int
561intel_dp_aux_native_write_1(struct intel_dp *intel_dp,
562			    uint16_t address, uint8_t byte)
563{
564	return intel_dp_aux_native_write(intel_dp, address, &byte, 1);
565}
566
567/* read bytes from a native aux channel */
568static int
569intel_dp_aux_native_read(struct intel_dp *intel_dp,
570			 uint16_t address, uint8_t *recv, int recv_bytes)
571{
572	uint8_t msg[4];
573	int msg_bytes;
574	uint8_t reply[20];
575	int reply_bytes;
576	uint8_t ack;
577	int ret;
578
579	if (WARN_ON(recv_bytes > 19))
580		return -E2BIG;
581
582	intel_dp_check_edp(intel_dp);
583	msg[0] = AUX_NATIVE_READ << 4;
584	msg[1] = address >> 8;
585	msg[2] = address & 0xff;
586	msg[3] = recv_bytes - 1;
587
588	msg_bytes = 4;
589	reply_bytes = recv_bytes + 1;
590
591	for (;;) {
592		ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
593				      reply, reply_bytes);
594		if (ret == 0)
595			return -EPROTO;
596		if (ret < 0)
597			return ret;
598		ack = reply[0];
599		if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) {
600			memcpy(recv, reply + 1, ret - 1);
601			return ret - 1;
602		}
603		else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
604			udelay(100);
605		else
606			return -EIO;
607	}
608}
609
610static int
611intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
612		    uint8_t write_byte, uint8_t *read_byte)
613{
614	struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
615	struct intel_dp *intel_dp = container_of(adapter,
616						struct intel_dp,
617						adapter);
618	uint16_t address = algo_data->address;
619	uint8_t msg[5];
620	uint8_t reply[2];
621	unsigned retry;
622	int msg_bytes;
623	int reply_bytes;
624	int ret;
625
626	ironlake_edp_panel_vdd_on(intel_dp);
627	intel_dp_check_edp(intel_dp);
628	/* Set up the command byte */
629	if (mode & MODE_I2C_READ)
630		msg[0] = AUX_I2C_READ << 4;
631	else
632		msg[0] = AUX_I2C_WRITE << 4;
633
634	if (!(mode & MODE_I2C_STOP))
635		msg[0] |= AUX_I2C_MOT << 4;
636
637	msg[1] = address >> 8;
638	msg[2] = address;
639
640	switch (mode) {
641	case MODE_I2C_WRITE:
642		msg[3] = 0;
643		msg[4] = write_byte;
644		msg_bytes = 5;
645		reply_bytes = 1;
646		break;
647	case MODE_I2C_READ:
648		msg[3] = 0;
649		msg_bytes = 4;
650		reply_bytes = 2;
651		break;
652	default:
653		msg_bytes = 3;
654		reply_bytes = 1;
655		break;
656	}
657
658	/*
659	 * DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device is
660	 * required to retry at least seven times upon receiving AUX_DEFER
661	 * before giving up the AUX transaction.
662	 */
663	for (retry = 0; retry < 7; retry++) {
664		ret = intel_dp_aux_ch(intel_dp,
665				      msg, msg_bytes,
666				      reply, reply_bytes);
667		if (ret < 0) {
668			DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
669			goto out;
670		}
671
672		switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
673		case AUX_NATIVE_REPLY_ACK:
674			/* I2C-over-AUX Reply field is only valid
675			 * when paired with AUX ACK.
676			 */
677			break;
678		case AUX_NATIVE_REPLY_NACK:
679			DRM_DEBUG_KMS("aux_ch native nack\n");
680			ret = -EREMOTEIO;
681			goto out;
682		case AUX_NATIVE_REPLY_DEFER:
683			/*
684			 * For now, just give more slack to branch devices. We
685			 * could check the DPCD for I2C bit rate capabilities,
686			 * and if available, adjust the interval. We could also
687			 * be more careful with DP-to-Legacy adapters where a
688			 * long legacy cable may force very low I2C bit rates.
689			 */
690			if (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
691			    DP_DWN_STRM_PORT_PRESENT)
692				usleep_range(500, 600);
693			else
694				usleep_range(300, 400);
695			continue;
696		default:
697			DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
698				  reply[0]);
699			ret = -EREMOTEIO;
700			goto out;
701		}
702
703		switch (reply[0] & AUX_I2C_REPLY_MASK) {
704		case AUX_I2C_REPLY_ACK:
705			if (mode == MODE_I2C_READ) {
706				*read_byte = reply[1];
707			}
708			ret = reply_bytes - 1;
709			goto out;
710		case AUX_I2C_REPLY_NACK:
711			DRM_DEBUG_KMS("aux_i2c nack\n");
712			ret = -EREMOTEIO;
713			goto out;
714		case AUX_I2C_REPLY_DEFER:
715			DRM_DEBUG_KMS("aux_i2c defer\n");
716			udelay(100);
717			break;
718		default:
719			DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
720			ret = -EREMOTEIO;
721			goto out;
722		}
723	}
724
725	DRM_ERROR("too many retries, giving up\n");
726	ret = -EREMOTEIO;
727
728out:
729	ironlake_edp_panel_vdd_off(intel_dp, false);
730	return ret;
731}
732
733static int
734intel_dp_i2c_init(struct intel_dp *intel_dp,
735		  struct intel_connector *intel_connector, const char *name)
736{
737	int	ret;
738
739	DRM_DEBUG_KMS("i2c_init %s\n", name);
740	intel_dp->algo.running = false;
741	intel_dp->algo.address = 0;
742	intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch;
743
744	memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter));
745	intel_dp->adapter.owner = THIS_MODULE;
746	intel_dp->adapter.class = I2C_CLASS_DDC;
747	strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
748	intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
749	intel_dp->adapter.algo_data = &intel_dp->algo;
750	intel_dp->adapter.dev.parent = &intel_connector->base.kdev;
751
752	ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
753	return ret;
754}
755
756static void
757intel_dp_set_clock(struct intel_encoder *encoder,
758		   struct intel_crtc_config *pipe_config, int link_bw)
759{
760	struct drm_device *dev = encoder->base.dev;
761	const struct dp_link_dpll *divisor = NULL;
762	int i, count = 0;
763
764	if (IS_G4X(dev)) {
765		divisor = gen4_dpll;
766		count = ARRAY_SIZE(gen4_dpll);
767	} else if (IS_HASWELL(dev)) {
768		/* Haswell has special-purpose DP DDI clocks. */
769	} else if (HAS_PCH_SPLIT(dev)) {
770		divisor = pch_dpll;
771		count = ARRAY_SIZE(pch_dpll);
772	} else if (IS_VALLEYVIEW(dev)) {
773		divisor = vlv_dpll;
774		count = ARRAY_SIZE(vlv_dpll);
775	}
776
777	if (divisor && count) {
778		for (i = 0; i < count; i++) {
779			if (link_bw == divisor[i].link_bw) {
780				pipe_config->dpll = divisor[i].dpll;
781				pipe_config->clock_set = true;
782				break;
783			}
784		}
785	}
786}
787
788bool
789intel_dp_compute_config(struct intel_encoder *encoder,
790			struct intel_crtc_config *pipe_config)
791{
792	struct drm_device *dev = encoder->base.dev;
793	struct drm_i915_private *dev_priv = dev->dev_private;
794	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
795	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
796	enum port port = dp_to_dig_port(intel_dp)->port;
797	struct intel_crtc *intel_crtc = encoder->new_crtc;
798	struct intel_connector *intel_connector = intel_dp->attached_connector;
799	int lane_count, clock;
800	int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
801	int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
802	int bpp, mode_rate;
803	static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
804	int link_avail, link_clock;
805
806	if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
807		pipe_config->has_pch_encoder = true;
808
809	pipe_config->has_dp_encoder = true;
810
811	if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
812		intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
813				       adjusted_mode);
814		if (!HAS_PCH_SPLIT(dev))
815			intel_gmch_panel_fitting(intel_crtc, pipe_config,
816						 intel_connector->panel.fitting_mode);
817		else
818			intel_pch_panel_fitting(intel_crtc, pipe_config,
819						intel_connector->panel.fitting_mode);
820	}
821
822	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
823		return false;
824
825	DRM_DEBUG_KMS("DP link computation with max lane count %i "
826		      "max bw %02x pixel clock %iKHz\n",
827		      max_lane_count, bws[max_clock],
828		      adjusted_mode->crtc_clock);
829
830	/* Walk through all bpp values. Luckily they're all nicely spaced with 2
831	 * bpc in between. */
832	bpp = pipe_config->pipe_bpp;
833	if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
834	    dev_priv->vbt.edp_bpp < bpp) {
835		DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
836			      dev_priv->vbt.edp_bpp);
837		bpp = dev_priv->vbt.edp_bpp;
838	}
839
840	for (; bpp >= 6*3; bpp -= 2*3) {
841		mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
842						   bpp);
843
844		for (clock = 0; clock <= max_clock; clock++) {
845			for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
846				link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
847				link_avail = intel_dp_max_data_rate(link_clock,
848								    lane_count);
849
850				if (mode_rate <= link_avail) {
851					goto found;
852				}
853			}
854		}
855	}
856
857	return false;
858
859found:
860	if (intel_dp->color_range_auto) {
861		/*
862		 * See:
863		 * CEA-861-E - 5.1 Default Encoding Parameters
864		 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
865		 */
866		if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
867			intel_dp->color_range = DP_COLOR_RANGE_16_235;
868		else
869			intel_dp->color_range = 0;
870	}
871
872	if (intel_dp->color_range)
873		pipe_config->limited_color_range = true;
874
875	intel_dp->link_bw = bws[clock];
876	intel_dp->lane_count = lane_count;
877	pipe_config->pipe_bpp = bpp;
878	pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
879
880	DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
881		      intel_dp->link_bw, intel_dp->lane_count,
882		      pipe_config->port_clock, bpp);
883	DRM_DEBUG_KMS("DP link bw required %i available %i\n",
884		      mode_rate, link_avail);
885
886	intel_link_compute_m_n(bpp, lane_count,
887			       adjusted_mode->crtc_clock,
888			       pipe_config->port_clock,
889			       &pipe_config->dp_m_n);
890
891	intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
892
893	return true;
894}
895
896static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
897{
898	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
899	struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
900	struct drm_device *dev = crtc->base.dev;
901	struct drm_i915_private *dev_priv = dev->dev_private;
902	u32 dpa_ctl;
903
904	DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", crtc->config.port_clock);
905	dpa_ctl = I915_READ(DP_A);
906	dpa_ctl &= ~DP_PLL_FREQ_MASK;
907
908	if (crtc->config.port_clock == 162000) {
909		/* For a long time we've carried around a ILK-DevA w/a for the
910		 * 160MHz clock. If we're really unlucky, it's still required.
911		 */
912		DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
913		dpa_ctl |= DP_PLL_FREQ_160MHZ;
914		intel_dp->DP |= DP_PLL_FREQ_160MHZ;
915	} else {
916		dpa_ctl |= DP_PLL_FREQ_270MHZ;
917		intel_dp->DP |= DP_PLL_FREQ_270MHZ;
918	}
919
920	I915_WRITE(DP_A, dpa_ctl);
921
922	POSTING_READ(DP_A);
923	udelay(500);
924}
925
926static void intel_dp_mode_set(struct intel_encoder *encoder)
927{
928	struct drm_device *dev = encoder->base.dev;
929	struct drm_i915_private *dev_priv = dev->dev_private;
930	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
931	enum port port = dp_to_dig_port(intel_dp)->port;
932	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
933	struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
934
935	/*
936	 * There are four kinds of DP registers:
937	 *
938	 * 	IBX PCH
939	 * 	SNB CPU
940	 *	IVB CPU
941	 * 	CPT PCH
942	 *
943	 * IBX PCH and CPU are the same for almost everything,
944	 * except that the CPU DP PLL is configured in this
945	 * register
946	 *
947	 * CPT PCH is quite different, having many bits moved
948	 * to the TRANS_DP_CTL register instead. That
949	 * configuration happens (oddly) in ironlake_pch_enable
950	 */
951
952	/* Preserve the BIOS-computed detected bit. This is
953	 * supposed to be read-only.
954	 */
955	intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
956
957	/* Handle DP bits in common between all three register formats */
958	intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
959	intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
960
961	if (intel_dp->has_audio) {
962		DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
963				 pipe_name(crtc->pipe));
964		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
965		intel_write_eld(&encoder->base, adjusted_mode);
966	}
967
968	/* Split out the IBX/CPU vs CPT settings */
969
970	if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
971		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
972			intel_dp->DP |= DP_SYNC_HS_HIGH;
973		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
974			intel_dp->DP |= DP_SYNC_VS_HIGH;
975		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
976
977		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
978			intel_dp->DP |= DP_ENHANCED_FRAMING;
979
980		intel_dp->DP |= crtc->pipe << 29;
981	} else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
982		if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
983			intel_dp->DP |= intel_dp->color_range;
984
985		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
986			intel_dp->DP |= DP_SYNC_HS_HIGH;
987		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
988			intel_dp->DP |= DP_SYNC_VS_HIGH;
989		intel_dp->DP |= DP_LINK_TRAIN_OFF;
990
991		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
992			intel_dp->DP |= DP_ENHANCED_FRAMING;
993
994		if (crtc->pipe == 1)
995			intel_dp->DP |= DP_PIPEB_SELECT;
996	} else {
997		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
998	}
999
1000	if (port == PORT_A && !IS_VALLEYVIEW(dev))
1001		ironlake_set_pll_cpu_edp(intel_dp);
1002}
1003
1004#define IDLE_ON_MASK		(PP_ON | 0 	  | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1005#define IDLE_ON_VALUE   	(PP_ON | 0 	  | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
1006
1007#define IDLE_OFF_MASK		(PP_ON | 0        | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1008#define IDLE_OFF_VALUE		(0     | 0        | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1009
1010#define IDLE_CYCLE_MASK		(PP_ON | 0        | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1011#define IDLE_CYCLE_VALUE	(0     | 0        | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1012
1013static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
1014				       u32 mask,
1015				       u32 value)
1016{
1017	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1018	struct drm_i915_private *dev_priv = dev->dev_private;
1019	u32 pp_stat_reg, pp_ctrl_reg;
1020
1021	pp_stat_reg = _pp_stat_reg(intel_dp);
1022	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1023
1024	DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1025			mask, value,
1026			I915_READ(pp_stat_reg),
1027			I915_READ(pp_ctrl_reg));
1028
1029	if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1030		DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1031				I915_READ(pp_stat_reg),
1032				I915_READ(pp_ctrl_reg));
1033	}
1034}
1035
1036static void ironlake_wait_panel_on(struct intel_dp *intel_dp)
1037{
1038	DRM_DEBUG_KMS("Wait for panel power on\n");
1039	ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1040}
1041
1042static void ironlake_wait_panel_off(struct intel_dp *intel_dp)
1043{
1044	DRM_DEBUG_KMS("Wait for panel power off time\n");
1045	ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1046}
1047
1048static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp)
1049{
1050	DRM_DEBUG_KMS("Wait for panel power cycle\n");
1051	ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1052}
1053
1054
1055/* Read the current pp_control value, unlocking the register if it
1056 * is locked
1057 */
1058
1059static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1060{
1061	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1062	struct drm_i915_private *dev_priv = dev->dev_private;
1063	u32 control;
1064
1065	control = I915_READ(_pp_ctrl_reg(intel_dp));
1066	control &= ~PANEL_UNLOCK_MASK;
1067	control |= PANEL_UNLOCK_REGS;
1068	return control;
1069}
1070
1071void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
1072{
1073	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1074	struct drm_i915_private *dev_priv = dev->dev_private;
1075	u32 pp;
1076	u32 pp_stat_reg, pp_ctrl_reg;
1077
1078	if (!is_edp(intel_dp))
1079		return;
1080
1081	WARN(intel_dp->want_panel_vdd,
1082	     "eDP VDD already requested on\n");
1083
1084	intel_dp->want_panel_vdd = true;
1085
1086	if (ironlake_edp_have_panel_vdd(intel_dp))
1087		return;
1088
1089	DRM_DEBUG_KMS("Turning eDP VDD on\n");
1090
1091	if (!ironlake_edp_have_panel_power(intel_dp))
1092		ironlake_wait_panel_power_cycle(intel_dp);
1093
1094	pp = ironlake_get_pp_control(intel_dp);
1095	pp |= EDP_FORCE_VDD;
1096
1097	pp_stat_reg = _pp_stat_reg(intel_dp);
1098	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1099
1100	I915_WRITE(pp_ctrl_reg, pp);
1101	POSTING_READ(pp_ctrl_reg);
1102	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1103			I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1104	/*
1105	 * If the panel wasn't on, delay before accessing aux channel
1106	 */
1107	if (!ironlake_edp_have_panel_power(intel_dp)) {
1108		DRM_DEBUG_KMS("eDP was not running\n");
1109		msleep(intel_dp->panel_power_up_delay);
1110	}
1111}
1112
1113static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
1114{
1115	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1116	struct drm_i915_private *dev_priv = dev->dev_private;
1117	u32 pp;
1118	u32 pp_stat_reg, pp_ctrl_reg;
1119
1120	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
1121
1122	if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
1123		DRM_DEBUG_KMS("Turning eDP VDD off\n");
1124
1125		pp = ironlake_get_pp_control(intel_dp);
1126		pp &= ~EDP_FORCE_VDD;
1127
1128		pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1129		pp_stat_reg = _pp_stat_reg(intel_dp);
1130
1131		I915_WRITE(pp_ctrl_reg, pp);
1132		POSTING_READ(pp_ctrl_reg);
1133
1134		/* Make sure sequencer is idle before allowing subsequent activity */
1135		DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1136		I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1137		msleep(intel_dp->panel_power_down_delay);
1138	}
1139}
1140
1141static void ironlake_panel_vdd_work(struct work_struct *__work)
1142{
1143	struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1144						 struct intel_dp, panel_vdd_work);
1145	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1146
1147	mutex_lock(&dev->mode_config.mutex);
1148	ironlake_panel_vdd_off_sync(intel_dp);
1149	mutex_unlock(&dev->mode_config.mutex);
1150}
1151
1152void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1153{
1154	if (!is_edp(intel_dp))
1155		return;
1156
1157	WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
1158
1159	intel_dp->want_panel_vdd = false;
1160
1161	if (sync) {
1162		ironlake_panel_vdd_off_sync(intel_dp);
1163	} else {
1164		/*
1165		 * Queue the timer to fire a long
1166		 * time from now (relative to the power down delay)
1167		 * to keep the panel power up across a sequence of operations
1168		 */
1169		schedule_delayed_work(&intel_dp->panel_vdd_work,
1170				      msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
1171	}
1172}
1173
1174void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1175{
1176	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1177	struct drm_i915_private *dev_priv = dev->dev_private;
1178	u32 pp;
1179	u32 pp_ctrl_reg;
1180
1181	if (!is_edp(intel_dp))
1182		return;
1183
1184	DRM_DEBUG_KMS("Turn eDP power on\n");
1185
1186	if (ironlake_edp_have_panel_power(intel_dp)) {
1187		DRM_DEBUG_KMS("eDP power already on\n");
1188		return;
1189	}
1190
1191	ironlake_wait_panel_power_cycle(intel_dp);
1192
1193	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1194	pp = ironlake_get_pp_control(intel_dp);
1195	if (IS_GEN5(dev)) {
1196		/* ILK workaround: disable reset around power sequence */
1197		pp &= ~PANEL_POWER_RESET;
1198		I915_WRITE(pp_ctrl_reg, pp);
1199		POSTING_READ(pp_ctrl_reg);
1200	}
1201
1202	pp |= POWER_TARGET_ON;
1203	if (!IS_GEN5(dev))
1204		pp |= PANEL_POWER_RESET;
1205
1206	I915_WRITE(pp_ctrl_reg, pp);
1207	POSTING_READ(pp_ctrl_reg);
1208
1209	ironlake_wait_panel_on(intel_dp);
1210
1211	if (IS_GEN5(dev)) {
1212		pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1213		I915_WRITE(pp_ctrl_reg, pp);
1214		POSTING_READ(pp_ctrl_reg);
1215	}
1216}
1217
1218void ironlake_edp_panel_off(struct intel_dp *intel_dp)
1219{
1220	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1221	struct drm_i915_private *dev_priv = dev->dev_private;
1222	u32 pp;
1223	u32 pp_ctrl_reg;
1224
1225	if (!is_edp(intel_dp))
1226		return;
1227
1228	DRM_DEBUG_KMS("Turn eDP power off\n");
1229
1230	WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
1231
1232	pp = ironlake_get_pp_control(intel_dp);
1233	/* We need to switch off panel power _and_ force vdd, for otherwise some
1234	 * panels get very unhappy and cease to work. */
1235	pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
1236
1237	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1238
1239	I915_WRITE(pp_ctrl_reg, pp);
1240	POSTING_READ(pp_ctrl_reg);
1241
1242	intel_dp->want_panel_vdd = false;
1243
1244	ironlake_wait_panel_off(intel_dp);
1245}
1246
1247void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
1248{
1249	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1250	struct drm_device *dev = intel_dig_port->base.base.dev;
1251	struct drm_i915_private *dev_priv = dev->dev_private;
1252	int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe;
1253	u32 pp;
1254	u32 pp_ctrl_reg;
1255
1256	if (!is_edp(intel_dp))
1257		return;
1258
1259	DRM_DEBUG_KMS("\n");
1260	/*
1261	 * If we enable the backlight right away following a panel power
1262	 * on, we may see slight flicker as the panel syncs with the eDP
1263	 * link.  So delay a bit to make sure the image is solid before
1264	 * allowing it to appear.
1265	 */
1266	msleep(intel_dp->backlight_on_delay);
1267	pp = ironlake_get_pp_control(intel_dp);
1268	pp |= EDP_BLC_ENABLE;
1269
1270	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1271
1272	I915_WRITE(pp_ctrl_reg, pp);
1273	POSTING_READ(pp_ctrl_reg);
1274
1275	intel_panel_enable_backlight(dev, pipe);
1276}
1277
1278void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
1279{
1280	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1281	struct drm_i915_private *dev_priv = dev->dev_private;
1282	u32 pp;
1283	u32 pp_ctrl_reg;
1284
1285	if (!is_edp(intel_dp))
1286		return;
1287
1288	intel_panel_disable_backlight(dev);
1289
1290	DRM_DEBUG_KMS("\n");
1291	pp = ironlake_get_pp_control(intel_dp);
1292	pp &= ~EDP_BLC_ENABLE;
1293
1294	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1295
1296	I915_WRITE(pp_ctrl_reg, pp);
1297	POSTING_READ(pp_ctrl_reg);
1298	msleep(intel_dp->backlight_off_delay);
1299}
1300
1301static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
1302{
1303	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1304	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1305	struct drm_device *dev = crtc->dev;
1306	struct drm_i915_private *dev_priv = dev->dev_private;
1307	u32 dpa_ctl;
1308
1309	assert_pipe_disabled(dev_priv,
1310			     to_intel_crtc(crtc)->pipe);
1311
1312	DRM_DEBUG_KMS("\n");
1313	dpa_ctl = I915_READ(DP_A);
1314	WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
1315	WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1316
1317	/* We don't adjust intel_dp->DP while tearing down the link, to
1318	 * facilitate link retraining (e.g. after hotplug). Hence clear all
1319	 * enable bits here to ensure that we don't enable too much. */
1320	intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
1321	intel_dp->DP |= DP_PLL_ENABLE;
1322	I915_WRITE(DP_A, intel_dp->DP);
1323	POSTING_READ(DP_A);
1324	udelay(200);
1325}
1326
1327static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
1328{
1329	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1330	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1331	struct drm_device *dev = crtc->dev;
1332	struct drm_i915_private *dev_priv = dev->dev_private;
1333	u32 dpa_ctl;
1334
1335	assert_pipe_disabled(dev_priv,
1336			     to_intel_crtc(crtc)->pipe);
1337
1338	dpa_ctl = I915_READ(DP_A);
1339	WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
1340	     "dp pll off, should be on\n");
1341	WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1342
1343	/* We can't rely on the value tracked for the DP register in
1344	 * intel_dp->DP because link_down must not change that (otherwise link
1345	 * re-training will fail. */
1346	dpa_ctl &= ~DP_PLL_ENABLE;
1347	I915_WRITE(DP_A, dpa_ctl);
1348	POSTING_READ(DP_A);
1349	udelay(200);
1350}
1351
1352/* If the sink supports it, try to set the power state appropriately */
1353void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
1354{
1355	int ret, i;
1356
1357	/* Should have a valid DPCD by this point */
1358	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
1359		return;
1360
1361	if (mode != DRM_MODE_DPMS_ON) {
1362		ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER,
1363						  DP_SET_POWER_D3);
1364		if (ret != 1)
1365			DRM_DEBUG_DRIVER("failed to write sink power state\n");
1366	} else {
1367		/*
1368		 * When turning on, we need to retry for 1ms to give the sink
1369		 * time to wake up.
1370		 */
1371		for (i = 0; i < 3; i++) {
1372			ret = intel_dp_aux_native_write_1(intel_dp,
1373							  DP_SET_POWER,
1374							  DP_SET_POWER_D0);
1375			if (ret == 1)
1376				break;
1377			msleep(1);
1378		}
1379	}
1380}
1381
1382static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
1383				  enum pipe *pipe)
1384{
1385	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1386	enum port port = dp_to_dig_port(intel_dp)->port;
1387	struct drm_device *dev = encoder->base.dev;
1388	struct drm_i915_private *dev_priv = dev->dev_private;
1389	u32 tmp = I915_READ(intel_dp->output_reg);
1390
1391	if (!(tmp & DP_PORT_EN))
1392		return false;
1393
1394	if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1395		*pipe = PORT_TO_PIPE_CPT(tmp);
1396	} else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
1397		*pipe = PORT_TO_PIPE(tmp);
1398	} else {
1399		u32 trans_sel;
1400		u32 trans_dp;
1401		int i;
1402
1403		switch (intel_dp->output_reg) {
1404		case PCH_DP_B:
1405			trans_sel = TRANS_DP_PORT_SEL_B;
1406			break;
1407		case PCH_DP_C:
1408			trans_sel = TRANS_DP_PORT_SEL_C;
1409			break;
1410		case PCH_DP_D:
1411			trans_sel = TRANS_DP_PORT_SEL_D;
1412			break;
1413		default:
1414			return true;
1415		}
1416
1417		for_each_pipe(i) {
1418			trans_dp = I915_READ(TRANS_DP_CTL(i));
1419			if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
1420				*pipe = i;
1421				return true;
1422			}
1423		}
1424
1425		DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
1426			      intel_dp->output_reg);
1427	}
1428
1429	return true;
1430}
1431
1432static void intel_dp_get_config(struct intel_encoder *encoder,
1433				struct intel_crtc_config *pipe_config)
1434{
1435	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1436	u32 tmp, flags = 0;
1437	struct drm_device *dev = encoder->base.dev;
1438	struct drm_i915_private *dev_priv = dev->dev_private;
1439	enum port port = dp_to_dig_port(intel_dp)->port;
1440	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1441	int dotclock;
1442
1443	if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
1444		tmp = I915_READ(intel_dp->output_reg);
1445		if (tmp & DP_SYNC_HS_HIGH)
1446			flags |= DRM_MODE_FLAG_PHSYNC;
1447		else
1448			flags |= DRM_MODE_FLAG_NHSYNC;
1449
1450		if (tmp & DP_SYNC_VS_HIGH)
1451			flags |= DRM_MODE_FLAG_PVSYNC;
1452		else
1453			flags |= DRM_MODE_FLAG_NVSYNC;
1454	} else {
1455		tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1456		if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
1457			flags |= DRM_MODE_FLAG_PHSYNC;
1458		else
1459			flags |= DRM_MODE_FLAG_NHSYNC;
1460
1461		if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
1462			flags |= DRM_MODE_FLAG_PVSYNC;
1463		else
1464			flags |= DRM_MODE_FLAG_NVSYNC;
1465	}
1466
1467	pipe_config->adjusted_mode.flags |= flags;
1468
1469	pipe_config->has_dp_encoder = true;
1470
1471	intel_dp_get_m_n(crtc, pipe_config);
1472
1473	if (port == PORT_A) {
1474		if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
1475			pipe_config->port_clock = 162000;
1476		else
1477			pipe_config->port_clock = 270000;
1478	}
1479
1480	dotclock = intel_dotclock_calculate(pipe_config->port_clock,
1481					    &pipe_config->dp_m_n);
1482
1483	if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
1484		ironlake_check_encoder_dotclock(pipe_config, dotclock);
1485
1486	pipe_config->adjusted_mode.crtc_clock = dotclock;
1487}
1488
1489static bool is_edp_psr(struct drm_device *dev)
1490{
1491	struct drm_i915_private *dev_priv = dev->dev_private;
1492
1493	return dev_priv->psr.sink_support;
1494}
1495
1496static bool intel_edp_is_psr_enabled(struct drm_device *dev)
1497{
1498	struct drm_i915_private *dev_priv = dev->dev_private;
1499
1500	if (!HAS_PSR(dev))
1501		return false;
1502
1503	return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
1504}
1505
1506static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
1507				    struct edp_vsc_psr *vsc_psr)
1508{
1509	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1510	struct drm_device *dev = dig_port->base.base.dev;
1511	struct drm_i915_private *dev_priv = dev->dev_private;
1512	struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1513	u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
1514	u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
1515	uint32_t *data = (uint32_t *) vsc_psr;
1516	unsigned int i;
1517
1518	/* As per BSPec (Pipe Video Data Island Packet), we need to disable
1519	   the video DIP being updated before program video DIP data buffer
1520	   registers for DIP being updated. */
1521	I915_WRITE(ctl_reg, 0);
1522	POSTING_READ(ctl_reg);
1523
1524	for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
1525		if (i < sizeof(struct edp_vsc_psr))
1526			I915_WRITE(data_reg + i, *data++);
1527		else
1528			I915_WRITE(data_reg + i, 0);
1529	}
1530
1531	I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
1532	POSTING_READ(ctl_reg);
1533}
1534
1535static void intel_edp_psr_setup(struct intel_dp *intel_dp)
1536{
1537	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1538	struct drm_i915_private *dev_priv = dev->dev_private;
1539	struct edp_vsc_psr psr_vsc;
1540
1541	if (intel_dp->psr_setup_done)
1542		return;
1543
1544	/* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
1545	memset(&psr_vsc, 0, sizeof(psr_vsc));
1546	psr_vsc.sdp_header.HB0 = 0;
1547	psr_vsc.sdp_header.HB1 = 0x7;
1548	psr_vsc.sdp_header.HB2 = 0x2;
1549	psr_vsc.sdp_header.HB3 = 0x8;
1550	intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
1551
1552	/* Avoid continuous PSR exit by masking memup and hpd */
1553	I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
1554		   EDP_PSR_DEBUG_MASK_HPD);
1555
1556	intel_dp->psr_setup_done = true;
1557}
1558
1559static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
1560{
1561	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1562	struct drm_i915_private *dev_priv = dev->dev_private;
1563	uint32_t aux_clock_divider = get_aux_clock_divider(intel_dp, 0);
1564	int precharge = 0x3;
1565	int msg_size = 5;       /* Header(4) + Message(1) */
1566
1567	/* Enable PSR in sink */
1568	if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT)
1569		intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG,
1570					    DP_PSR_ENABLE &
1571					    ~DP_PSR_MAIN_LINK_ACTIVE);
1572	else
1573		intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG,
1574					    DP_PSR_ENABLE |
1575					    DP_PSR_MAIN_LINK_ACTIVE);
1576
1577	/* Setup AUX registers */
1578	I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND);
1579	I915_WRITE(EDP_PSR_AUX_DATA2(dev), EDP_PSR_DPCD_NORMAL_OPERATION);
1580	I915_WRITE(EDP_PSR_AUX_CTL(dev),
1581		   DP_AUX_CH_CTL_TIME_OUT_400us |
1582		   (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1583		   (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
1584		   (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
1585}
1586
1587static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
1588{
1589	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1590	struct drm_i915_private *dev_priv = dev->dev_private;
1591	uint32_t max_sleep_time = 0x1f;
1592	uint32_t idle_frames = 1;
1593	uint32_t val = 0x0;
1594
1595	if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) {
1596		val |= EDP_PSR_LINK_STANDBY;
1597		val |= EDP_PSR_TP2_TP3_TIME_0us;
1598		val |= EDP_PSR_TP1_TIME_0us;
1599		val |= EDP_PSR_SKIP_AUX_EXIT;
1600	} else
1601		val |= EDP_PSR_LINK_DISABLE;
1602
1603	I915_WRITE(EDP_PSR_CTL(dev), val |
1604		   EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES |
1605		   max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
1606		   idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
1607		   EDP_PSR_ENABLE);
1608}
1609
1610static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
1611{
1612	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1613	struct drm_device *dev = dig_port->base.base.dev;
1614	struct drm_i915_private *dev_priv = dev->dev_private;
1615	struct drm_crtc *crtc = dig_port->base.base.crtc;
1616	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1617	struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->fb)->obj;
1618	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
1619
1620	dev_priv->psr.source_ok = false;
1621
1622	if (!HAS_PSR(dev)) {
1623		DRM_DEBUG_KMS("PSR not supported on this platform\n");
1624		return false;
1625	}
1626
1627	if ((intel_encoder->type != INTEL_OUTPUT_EDP) ||
1628	    (dig_port->port != PORT_A)) {
1629		DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
1630		return false;
1631	}
1632
1633	if (!i915_enable_psr) {
1634		DRM_DEBUG_KMS("PSR disable by flag\n");
1635		return false;
1636	}
1637
1638	crtc = dig_port->base.base.crtc;
1639	if (crtc == NULL) {
1640		DRM_DEBUG_KMS("crtc not active for PSR\n");
1641		return false;
1642	}
1643
1644	intel_crtc = to_intel_crtc(crtc);
1645	if (!intel_crtc_active(crtc)) {
1646		DRM_DEBUG_KMS("crtc not active for PSR\n");
1647		return false;
1648	}
1649
1650	obj = to_intel_framebuffer(crtc->fb)->obj;
1651	if (obj->tiling_mode != I915_TILING_X ||
1652	    obj->fence_reg == I915_FENCE_REG_NONE) {
1653		DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
1654		return false;
1655	}
1656
1657	if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) {
1658		DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n");
1659		return false;
1660	}
1661
1662	if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
1663	    S3D_ENABLE) {
1664		DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
1665		return false;
1666	}
1667
1668	if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
1669		DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
1670		return false;
1671	}
1672
1673	dev_priv->psr.source_ok = true;
1674	return true;
1675}
1676
1677static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
1678{
1679	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1680
1681	if (!intel_edp_psr_match_conditions(intel_dp) ||
1682	    intel_edp_is_psr_enabled(dev))
1683		return;
1684
1685	/* Setup PSR once */
1686	intel_edp_psr_setup(intel_dp);
1687
1688	/* Enable PSR on the panel */
1689	intel_edp_psr_enable_sink(intel_dp);
1690
1691	/* Enable PSR on the host */
1692	intel_edp_psr_enable_source(intel_dp);
1693}
1694
1695void intel_edp_psr_enable(struct intel_dp *intel_dp)
1696{
1697	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1698
1699	if (intel_edp_psr_match_conditions(intel_dp) &&
1700	    !intel_edp_is_psr_enabled(dev))
1701		intel_edp_psr_do_enable(intel_dp);
1702}
1703
1704void intel_edp_psr_disable(struct intel_dp *intel_dp)
1705{
1706	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1707	struct drm_i915_private *dev_priv = dev->dev_private;
1708
1709	if (!intel_edp_is_psr_enabled(dev))
1710		return;
1711
1712	I915_WRITE(EDP_PSR_CTL(dev),
1713		   I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
1714
1715	/* Wait till PSR is idle */
1716	if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
1717		       EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
1718		DRM_ERROR("Timed out waiting for PSR Idle State\n");
1719}
1720
1721void intel_edp_psr_update(struct drm_device *dev)
1722{
1723	struct intel_encoder *encoder;
1724	struct intel_dp *intel_dp = NULL;
1725
1726	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head)
1727		if (encoder->type == INTEL_OUTPUT_EDP) {
1728			intel_dp = enc_to_intel_dp(&encoder->base);
1729
1730			if (!is_edp_psr(dev))
1731				return;
1732
1733			if (!intel_edp_psr_match_conditions(intel_dp))
1734				intel_edp_psr_disable(intel_dp);
1735			else
1736				if (!intel_edp_is_psr_enabled(dev))
1737					intel_edp_psr_do_enable(intel_dp);
1738		}
1739}
1740
1741static void intel_disable_dp(struct intel_encoder *encoder)
1742{
1743	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1744	enum port port = dp_to_dig_port(intel_dp)->port;
1745	struct drm_device *dev = encoder->base.dev;
1746
1747	/* Make sure the panel is off before trying to change the mode. But also
1748	 * ensure that we have vdd while we switch off the panel. */
1749	ironlake_edp_panel_vdd_on(intel_dp);
1750	ironlake_edp_backlight_off(intel_dp);
1751	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1752	ironlake_edp_panel_off(intel_dp);
1753
1754	/* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
1755	if (!(port == PORT_A || IS_VALLEYVIEW(dev)))
1756		intel_dp_link_down(intel_dp);
1757}
1758
1759static void intel_post_disable_dp(struct intel_encoder *encoder)
1760{
1761	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1762	enum port port = dp_to_dig_port(intel_dp)->port;
1763	struct drm_device *dev = encoder->base.dev;
1764
1765	if (port == PORT_A || IS_VALLEYVIEW(dev)) {
1766		intel_dp_link_down(intel_dp);
1767		if (!IS_VALLEYVIEW(dev))
1768			ironlake_edp_pll_off(intel_dp);
1769	}
1770}
1771
1772static void intel_enable_dp(struct intel_encoder *encoder)
1773{
1774	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1775	struct drm_device *dev = encoder->base.dev;
1776	struct drm_i915_private *dev_priv = dev->dev_private;
1777	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
1778
1779	if (WARN_ON(dp_reg & DP_PORT_EN))
1780		return;
1781
1782	ironlake_edp_panel_vdd_on(intel_dp);
1783	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1784	intel_dp_start_link_train(intel_dp);
1785	ironlake_edp_panel_on(intel_dp);
1786	ironlake_edp_panel_vdd_off(intel_dp, true);
1787	intel_dp_complete_link_train(intel_dp);
1788	intel_dp_stop_link_train(intel_dp);
1789}
1790
1791static void g4x_enable_dp(struct intel_encoder *encoder)
1792{
1793	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1794
1795	intel_enable_dp(encoder);
1796	ironlake_edp_backlight_on(intel_dp);
1797}
1798
1799static void vlv_enable_dp(struct intel_encoder *encoder)
1800{
1801	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1802
1803	ironlake_edp_backlight_on(intel_dp);
1804}
1805
1806static void g4x_pre_enable_dp(struct intel_encoder *encoder)
1807{
1808	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1809	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
1810
1811	if (dport->port == PORT_A)
1812		ironlake_edp_pll_on(intel_dp);
1813}
1814
1815static void vlv_pre_enable_dp(struct intel_encoder *encoder)
1816{
1817	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1818	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
1819	struct drm_device *dev = encoder->base.dev;
1820	struct drm_i915_private *dev_priv = dev->dev_private;
1821	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
1822	int port = vlv_dport_to_channel(dport);
1823	int pipe = intel_crtc->pipe;
1824	struct edp_power_seq power_seq;
1825	u32 val;
1826
1827	mutex_lock(&dev_priv->dpio_lock);
1828
1829	val = vlv_dpio_read(dev_priv, pipe, DPIO_DATA_LANE_A(port));
1830	val = 0;
1831	if (pipe)
1832		val |= (1<<21);
1833	else
1834		val &= ~(1<<21);
1835	val |= 0x001000c4;
1836	vlv_dpio_write(dev_priv, pipe, DPIO_DATA_CHANNEL(port), val);
1837	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF0(port), 0x00760018);
1838	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF8(port), 0x00400888);
1839
1840	mutex_unlock(&dev_priv->dpio_lock);
1841
1842	/* init power sequencer on this pipe and port */
1843	intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
1844	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
1845						      &power_seq);
1846
1847	intel_enable_dp(encoder);
1848
1849	vlv_wait_port_ready(dev_priv, port);
1850}
1851
1852static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
1853{
1854	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1855	struct drm_device *dev = encoder->base.dev;
1856	struct drm_i915_private *dev_priv = dev->dev_private;
1857	struct intel_crtc *intel_crtc =
1858		to_intel_crtc(encoder->base.crtc);
1859	int port = vlv_dport_to_channel(dport);
1860	int pipe = intel_crtc->pipe;
1861
1862	/* Program Tx lane resets to default */
1863	mutex_lock(&dev_priv->dpio_lock);
1864	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port),
1865			 DPIO_PCS_TX_LANE2_RESET |
1866			 DPIO_PCS_TX_LANE1_RESET);
1867	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port),
1868			 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
1869			 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
1870			 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
1871				 DPIO_PCS_CLK_SOFT_RESET);
1872
1873	/* Fix up inter-pair skew failure */
1874	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER1(port), 0x00750f00);
1875	vlv_dpio_write(dev_priv, pipe, DPIO_TX_CTL(port), 0x00001500);
1876	vlv_dpio_write(dev_priv, pipe, DPIO_TX_LANE(port), 0x40400000);
1877	mutex_unlock(&dev_priv->dpio_lock);
1878}
1879
1880/*
1881 * Native read with retry for link status and receiver capability reads for
1882 * cases where the sink may still be asleep.
1883 */
1884static bool
1885intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
1886			       uint8_t *recv, int recv_bytes)
1887{
1888	int ret, i;
1889
1890	/*
1891	 * Sinks are *supposed* to come up within 1ms from an off state,
1892	 * but we're also supposed to retry 3 times per the spec.
1893	 */
1894	for (i = 0; i < 3; i++) {
1895		ret = intel_dp_aux_native_read(intel_dp, address, recv,
1896					       recv_bytes);
1897		if (ret == recv_bytes)
1898			return true;
1899		msleep(1);
1900	}
1901
1902	return false;
1903}
1904
1905/*
1906 * Fetch AUX CH registers 0x202 - 0x207 which contain
1907 * link status information
1908 */
1909static bool
1910intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
1911{
1912	return intel_dp_aux_native_read_retry(intel_dp,
1913					      DP_LANE0_1_STATUS,
1914					      link_status,
1915					      DP_LINK_STATUS_SIZE);
1916}
1917
1918#if 0
1919static char	*voltage_names[] = {
1920	"0.4V", "0.6V", "0.8V", "1.2V"
1921};
1922static char	*pre_emph_names[] = {
1923	"0dB", "3.5dB", "6dB", "9.5dB"
1924};
1925static char	*link_train_names[] = {
1926	"pattern 1", "pattern 2", "idle", "off"
1927};
1928#endif
1929
1930/*
1931 * These are source-specific values; current Intel hardware supports
1932 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
1933 */
1934
1935static uint8_t
1936intel_dp_voltage_max(struct intel_dp *intel_dp)
1937{
1938	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1939	enum port port = dp_to_dig_port(intel_dp)->port;
1940
1941	if (IS_VALLEYVIEW(dev))
1942		return DP_TRAIN_VOLTAGE_SWING_1200;
1943	else if (IS_GEN7(dev) && port == PORT_A)
1944		return DP_TRAIN_VOLTAGE_SWING_800;
1945	else if (HAS_PCH_CPT(dev) && port != PORT_A)
1946		return DP_TRAIN_VOLTAGE_SWING_1200;
1947	else
1948		return DP_TRAIN_VOLTAGE_SWING_800;
1949}
1950
1951static uint8_t
1952intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
1953{
1954	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1955	enum port port = dp_to_dig_port(intel_dp)->port;
1956
1957	if (HAS_DDI(dev)) {
1958		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1959		case DP_TRAIN_VOLTAGE_SWING_400:
1960			return DP_TRAIN_PRE_EMPHASIS_9_5;
1961		case DP_TRAIN_VOLTAGE_SWING_600:
1962			return DP_TRAIN_PRE_EMPHASIS_6;
1963		case DP_TRAIN_VOLTAGE_SWING_800:
1964			return DP_TRAIN_PRE_EMPHASIS_3_5;
1965		case DP_TRAIN_VOLTAGE_SWING_1200:
1966		default:
1967			return DP_TRAIN_PRE_EMPHASIS_0;
1968		}
1969	} else if (IS_VALLEYVIEW(dev)) {
1970		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1971		case DP_TRAIN_VOLTAGE_SWING_400:
1972			return DP_TRAIN_PRE_EMPHASIS_9_5;
1973		case DP_TRAIN_VOLTAGE_SWING_600:
1974			return DP_TRAIN_PRE_EMPHASIS_6;
1975		case DP_TRAIN_VOLTAGE_SWING_800:
1976			return DP_TRAIN_PRE_EMPHASIS_3_5;
1977		case DP_TRAIN_VOLTAGE_SWING_1200:
1978		default:
1979			return DP_TRAIN_PRE_EMPHASIS_0;
1980		}
1981	} else if (IS_GEN7(dev) && port == PORT_A) {
1982		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1983		case DP_TRAIN_VOLTAGE_SWING_400:
1984			return DP_TRAIN_PRE_EMPHASIS_6;
1985		case DP_TRAIN_VOLTAGE_SWING_600:
1986		case DP_TRAIN_VOLTAGE_SWING_800:
1987			return DP_TRAIN_PRE_EMPHASIS_3_5;
1988		default:
1989			return DP_TRAIN_PRE_EMPHASIS_0;
1990		}
1991	} else {
1992		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1993		case DP_TRAIN_VOLTAGE_SWING_400:
1994			return DP_TRAIN_PRE_EMPHASIS_6;
1995		case DP_TRAIN_VOLTAGE_SWING_600:
1996			return DP_TRAIN_PRE_EMPHASIS_6;
1997		case DP_TRAIN_VOLTAGE_SWING_800:
1998			return DP_TRAIN_PRE_EMPHASIS_3_5;
1999		case DP_TRAIN_VOLTAGE_SWING_1200:
2000		default:
2001			return DP_TRAIN_PRE_EMPHASIS_0;
2002		}
2003	}
2004}
2005
2006static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2007{
2008	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2009	struct drm_i915_private *dev_priv = dev->dev_private;
2010	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2011	struct intel_crtc *intel_crtc =
2012		to_intel_crtc(dport->base.base.crtc);
2013	unsigned long demph_reg_value, preemph_reg_value,
2014		uniqtranscale_reg_value;
2015	uint8_t train_set = intel_dp->train_set[0];
2016	int port = vlv_dport_to_channel(dport);
2017	int pipe = intel_crtc->pipe;
2018
2019	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2020	case DP_TRAIN_PRE_EMPHASIS_0:
2021		preemph_reg_value = 0x0004000;
2022		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2023		case DP_TRAIN_VOLTAGE_SWING_400:
2024			demph_reg_value = 0x2B405555;
2025			uniqtranscale_reg_value = 0x552AB83A;
2026			break;
2027		case DP_TRAIN_VOLTAGE_SWING_600:
2028			demph_reg_value = 0x2B404040;
2029			uniqtranscale_reg_value = 0x5548B83A;
2030			break;
2031		case DP_TRAIN_VOLTAGE_SWING_800:
2032			demph_reg_value = 0x2B245555;
2033			uniqtranscale_reg_value = 0x5560B83A;
2034			break;
2035		case DP_TRAIN_VOLTAGE_SWING_1200:
2036			demph_reg_value = 0x2B405555;
2037			uniqtranscale_reg_value = 0x5598DA3A;
2038			break;
2039		default:
2040			return 0;
2041		}
2042		break;
2043	case DP_TRAIN_PRE_EMPHASIS_3_5:
2044		preemph_reg_value = 0x0002000;
2045		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2046		case DP_TRAIN_VOLTAGE_SWING_400:
2047			demph_reg_value = 0x2B404040;
2048			uniqtranscale_reg_value = 0x5552B83A;
2049			break;
2050		case DP_TRAIN_VOLTAGE_SWING_600:
2051			demph_reg_value = 0x2B404848;
2052			uniqtranscale_reg_value = 0x5580B83A;
2053			break;
2054		case DP_TRAIN_VOLTAGE_SWING_800:
2055			demph_reg_value = 0x2B404040;
2056			uniqtranscale_reg_value = 0x55ADDA3A;
2057			break;
2058		default:
2059			return 0;
2060		}
2061		break;
2062	case DP_TRAIN_PRE_EMPHASIS_6:
2063		preemph_reg_value = 0x0000000;
2064		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2065		case DP_TRAIN_VOLTAGE_SWING_400:
2066			demph_reg_value = 0x2B305555;
2067			uniqtranscale_reg_value = 0x5570B83A;
2068			break;
2069		case DP_TRAIN_VOLTAGE_SWING_600:
2070			demph_reg_value = 0x2B2B4040;
2071			uniqtranscale_reg_value = 0x55ADDA3A;
2072			break;
2073		default:
2074			return 0;
2075		}
2076		break;
2077	case DP_TRAIN_PRE_EMPHASIS_9_5:
2078		preemph_reg_value = 0x0006000;
2079		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2080		case DP_TRAIN_VOLTAGE_SWING_400:
2081			demph_reg_value = 0x1B405555;
2082			uniqtranscale_reg_value = 0x55ADDA3A;
2083			break;
2084		default:
2085			return 0;
2086		}
2087		break;
2088	default:
2089		return 0;
2090	}
2091
2092	mutex_lock(&dev_priv->dpio_lock);
2093	vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0x00000000);
2094	vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL4(port), demph_reg_value);
2095	vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL2(port),
2096			 uniqtranscale_reg_value);
2097	vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL3(port), 0x0C782040);
2098	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER0(port), 0x00030000);
2099	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port), preemph_reg_value);
2100	vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0x80000000);
2101	mutex_unlock(&dev_priv->dpio_lock);
2102
2103	return 0;
2104}
2105
2106static void
2107intel_get_adjust_train(struct intel_dp *intel_dp,
2108		       const uint8_t link_status[DP_LINK_STATUS_SIZE])
2109{
2110	uint8_t v = 0;
2111	uint8_t p = 0;
2112	int lane;
2113	uint8_t voltage_max;
2114	uint8_t preemph_max;
2115
2116	for (lane = 0; lane < intel_dp->lane_count; lane++) {
2117		uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
2118		uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
2119
2120		if (this_v > v)
2121			v = this_v;
2122		if (this_p > p)
2123			p = this_p;
2124	}
2125
2126	voltage_max = intel_dp_voltage_max(intel_dp);
2127	if (v >= voltage_max)
2128		v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
2129
2130	preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
2131	if (p >= preemph_max)
2132		p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
2133
2134	for (lane = 0; lane < 4; lane++)
2135		intel_dp->train_set[lane] = v | p;
2136}
2137
2138static uint32_t
2139intel_gen4_signal_levels(uint8_t train_set)
2140{
2141	uint32_t	signal_levels = 0;
2142
2143	switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2144	case DP_TRAIN_VOLTAGE_SWING_400:
2145	default:
2146		signal_levels |= DP_VOLTAGE_0_4;
2147		break;
2148	case DP_TRAIN_VOLTAGE_SWING_600:
2149		signal_levels |= DP_VOLTAGE_0_6;
2150		break;
2151	case DP_TRAIN_VOLTAGE_SWING_800:
2152		signal_levels |= DP_VOLTAGE_0_8;
2153		break;
2154	case DP_TRAIN_VOLTAGE_SWING_1200:
2155		signal_levels |= DP_VOLTAGE_1_2;
2156		break;
2157	}
2158	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2159	case DP_TRAIN_PRE_EMPHASIS_0:
2160	default:
2161		signal_levels |= DP_PRE_EMPHASIS_0;
2162		break;
2163	case DP_TRAIN_PRE_EMPHASIS_3_5:
2164		signal_levels |= DP_PRE_EMPHASIS_3_5;
2165		break;
2166	case DP_TRAIN_PRE_EMPHASIS_6:
2167		signal_levels |= DP_PRE_EMPHASIS_6;
2168		break;
2169	case DP_TRAIN_PRE_EMPHASIS_9_5:
2170		signal_levels |= DP_PRE_EMPHASIS_9_5;
2171		break;
2172	}
2173	return signal_levels;
2174}
2175
2176/* Gen6's DP voltage swing and pre-emphasis control */
2177static uint32_t
2178intel_gen6_edp_signal_levels(uint8_t train_set)
2179{
2180	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2181					 DP_TRAIN_PRE_EMPHASIS_MASK);
2182	switch (signal_levels) {
2183	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
2184	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
2185		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
2186	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
2187		return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
2188	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
2189	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
2190		return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
2191	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
2192	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
2193		return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
2194	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
2195	case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
2196		return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
2197	default:
2198		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2199			      "0x%x\n", signal_levels);
2200		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
2201	}
2202}
2203
2204/* Gen7's DP voltage swing and pre-emphasis control */
2205static uint32_t
2206intel_gen7_edp_signal_levels(uint8_t train_set)
2207{
2208	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2209					 DP_TRAIN_PRE_EMPHASIS_MASK);
2210	switch (signal_levels) {
2211	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
2212		return EDP_LINK_TRAIN_400MV_0DB_IVB;
2213	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
2214		return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
2215	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
2216		return EDP_LINK_TRAIN_400MV_6DB_IVB;
2217
2218	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
2219		return EDP_LINK_TRAIN_600MV_0DB_IVB;
2220	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
2221		return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
2222
2223	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
2224		return EDP_LINK_TRAIN_800MV_0DB_IVB;
2225	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
2226		return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
2227
2228	default:
2229		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2230			      "0x%x\n", signal_levels);
2231		return EDP_LINK_TRAIN_500MV_0DB_IVB;
2232	}
2233}
2234
2235/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
2236static uint32_t
2237intel_hsw_signal_levels(uint8_t train_set)
2238{
2239	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2240					 DP_TRAIN_PRE_EMPHASIS_MASK);
2241	switch (signal_levels) {
2242	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
2243		return DDI_BUF_EMP_400MV_0DB_HSW;
2244	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
2245		return DDI_BUF_EMP_400MV_3_5DB_HSW;
2246	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
2247		return DDI_BUF_EMP_400MV_6DB_HSW;
2248	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5:
2249		return DDI_BUF_EMP_400MV_9_5DB_HSW;
2250
2251	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
2252		return DDI_BUF_EMP_600MV_0DB_HSW;
2253	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
2254		return DDI_BUF_EMP_600MV_3_5DB_HSW;
2255	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
2256		return DDI_BUF_EMP_600MV_6DB_HSW;
2257
2258	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
2259		return DDI_BUF_EMP_800MV_0DB_HSW;
2260	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
2261		return DDI_BUF_EMP_800MV_3_5DB_HSW;
2262	default:
2263		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2264			      "0x%x\n", signal_levels);
2265		return DDI_BUF_EMP_400MV_0DB_HSW;
2266	}
2267}
2268
2269/* Properly updates "DP" with the correct signal levels. */
2270static void
2271intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
2272{
2273	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2274	enum port port = intel_dig_port->port;
2275	struct drm_device *dev = intel_dig_port->base.base.dev;
2276	uint32_t signal_levels, mask;
2277	uint8_t train_set = intel_dp->train_set[0];
2278
2279	if (HAS_DDI(dev)) {
2280		signal_levels = intel_hsw_signal_levels(train_set);
2281		mask = DDI_BUF_EMP_MASK;
2282	} else if (IS_VALLEYVIEW(dev)) {
2283		signal_levels = intel_vlv_signal_levels(intel_dp);
2284		mask = 0;
2285	} else if (IS_GEN7(dev) && port == PORT_A) {
2286		signal_levels = intel_gen7_edp_signal_levels(train_set);
2287		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
2288	} else if (IS_GEN6(dev) && port == PORT_A) {
2289		signal_levels = intel_gen6_edp_signal_levels(train_set);
2290		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
2291	} else {
2292		signal_levels = intel_gen4_signal_levels(train_set);
2293		mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
2294	}
2295
2296	DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
2297
2298	*DP = (*DP & ~mask) | signal_levels;
2299}
2300
2301static bool
2302intel_dp_set_link_train(struct intel_dp *intel_dp,
2303			uint32_t *DP,
2304			uint8_t dp_train_pat)
2305{
2306	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2307	struct drm_device *dev = intel_dig_port->base.base.dev;
2308	struct drm_i915_private *dev_priv = dev->dev_private;
2309	enum port port = intel_dig_port->port;
2310	uint8_t buf[sizeof(intel_dp->train_set) + 1];
2311	int ret, len;
2312
2313	if (HAS_DDI(dev)) {
2314		uint32_t temp = I915_READ(DP_TP_CTL(port));
2315
2316		if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2317			temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2318		else
2319			temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2320
2321		temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2322		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2323		case DP_TRAINING_PATTERN_DISABLE:
2324			temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2325
2326			break;
2327		case DP_TRAINING_PATTERN_1:
2328			temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2329			break;
2330		case DP_TRAINING_PATTERN_2:
2331			temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2332			break;
2333		case DP_TRAINING_PATTERN_3:
2334			temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2335			break;
2336		}
2337		I915_WRITE(DP_TP_CTL(port), temp);
2338
2339	} else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2340		*DP &= ~DP_LINK_TRAIN_MASK_CPT;
2341
2342		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2343		case DP_TRAINING_PATTERN_DISABLE:
2344			*DP |= DP_LINK_TRAIN_OFF_CPT;
2345			break;
2346		case DP_TRAINING_PATTERN_1:
2347			*DP |= DP_LINK_TRAIN_PAT_1_CPT;
2348			break;
2349		case DP_TRAINING_PATTERN_2:
2350			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
2351			break;
2352		case DP_TRAINING_PATTERN_3:
2353			DRM_ERROR("DP training pattern 3 not supported\n");
2354			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
2355			break;
2356		}
2357
2358	} else {
2359		*DP &= ~DP_LINK_TRAIN_MASK;
2360
2361		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2362		case DP_TRAINING_PATTERN_DISABLE:
2363			*DP |= DP_LINK_TRAIN_OFF;
2364			break;
2365		case DP_TRAINING_PATTERN_1:
2366			*DP |= DP_LINK_TRAIN_PAT_1;
2367			break;
2368		case DP_TRAINING_PATTERN_2:
2369			*DP |= DP_LINK_TRAIN_PAT_2;
2370			break;
2371		case DP_TRAINING_PATTERN_3:
2372			DRM_ERROR("DP training pattern 3 not supported\n");
2373			*DP |= DP_LINK_TRAIN_PAT_2;
2374			break;
2375		}
2376	}
2377
2378	I915_WRITE(intel_dp->output_reg, *DP);
2379	POSTING_READ(intel_dp->output_reg);
2380
2381	buf[0] = dp_train_pat;
2382	if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
2383	    DP_TRAINING_PATTERN_DISABLE) {
2384		/* don't write DP_TRAINING_LANEx_SET on disable */
2385		len = 1;
2386	} else {
2387		/* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
2388		memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
2389		len = intel_dp->lane_count + 1;
2390	}
2391
2392	ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_PATTERN_SET,
2393					buf, len);
2394
2395	return ret == len;
2396}
2397
2398static bool
2399intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
2400			uint8_t dp_train_pat)
2401{
2402	memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
2403	intel_dp_set_signal_levels(intel_dp, DP);
2404	return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
2405}
2406
2407static bool
2408intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
2409			   const uint8_t link_status[DP_LINK_STATUS_SIZE])
2410{
2411	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2412	struct drm_device *dev = intel_dig_port->base.base.dev;
2413	struct drm_i915_private *dev_priv = dev->dev_private;
2414	int ret;
2415
2416	intel_get_adjust_train(intel_dp, link_status);
2417	intel_dp_set_signal_levels(intel_dp, DP);
2418
2419	I915_WRITE(intel_dp->output_reg, *DP);
2420	POSTING_READ(intel_dp->output_reg);
2421
2422	ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_LANE0_SET,
2423					intel_dp->train_set,
2424					intel_dp->lane_count);
2425
2426	return ret == intel_dp->lane_count;
2427}
2428
2429static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
2430{
2431	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2432	struct drm_device *dev = intel_dig_port->base.base.dev;
2433	struct drm_i915_private *dev_priv = dev->dev_private;
2434	enum port port = intel_dig_port->port;
2435	uint32_t val;
2436
2437	if (!HAS_DDI(dev))
2438		return;
2439
2440	val = I915_READ(DP_TP_CTL(port));
2441	val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2442	val |= DP_TP_CTL_LINK_TRAIN_IDLE;
2443	I915_WRITE(DP_TP_CTL(port), val);
2444
2445	/*
2446	 * On PORT_A we can have only eDP in SST mode. There the only reason
2447	 * we need to set idle transmission mode is to work around a HW issue
2448	 * where we enable the pipe while not in idle link-training mode.
2449	 * In this case there is requirement to wait for a minimum number of
2450	 * idle patterns to be sent.
2451	 */
2452	if (port == PORT_A)
2453		return;
2454
2455	if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
2456		     1))
2457		DRM_ERROR("Timed out waiting for DP idle patterns\n");
2458}
2459
2460/* Enable corresponding port and start training pattern 1 */
2461void
2462intel_dp_start_link_train(struct intel_dp *intel_dp)
2463{
2464	struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
2465	struct drm_device *dev = encoder->dev;
2466	int i;
2467	uint8_t voltage;
2468	int voltage_tries, loop_tries;
2469	uint32_t DP = intel_dp->DP;
2470	uint8_t link_config[2];
2471
2472	if (HAS_DDI(dev))
2473		intel_ddi_prepare_link_retrain(encoder);
2474
2475	/* Write the link configuration data */
2476	link_config[0] = intel_dp->link_bw;
2477	link_config[1] = intel_dp->lane_count;
2478	if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2479		link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
2480	intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, link_config, 2);
2481
2482	link_config[0] = 0;
2483	link_config[1] = DP_SET_ANSI_8B10B;
2484	intel_dp_aux_native_write(intel_dp, DP_DOWNSPREAD_CTRL, link_config, 2);
2485
2486	DP |= DP_PORT_EN;
2487
2488	/* clock recovery */
2489	if (!intel_dp_reset_link_train(intel_dp, &DP,
2490				       DP_TRAINING_PATTERN_1 |
2491				       DP_LINK_SCRAMBLING_DISABLE)) {
2492		DRM_ERROR("failed to enable link training\n");
2493		return;
2494	}
2495
2496	voltage = 0xff;
2497	voltage_tries = 0;
2498	loop_tries = 0;
2499	for (;;) {
2500		uint8_t link_status[DP_LINK_STATUS_SIZE];
2501
2502		drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
2503		if (!intel_dp_get_link_status(intel_dp, link_status)) {
2504			DRM_ERROR("failed to get link status\n");
2505			break;
2506		}
2507
2508		if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
2509			DRM_DEBUG_KMS("clock recovery OK\n");
2510			break;
2511		}
2512
2513		/* Check to see if we've tried the max voltage */
2514		for (i = 0; i < intel_dp->lane_count; i++)
2515			if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
2516				break;
2517		if (i == intel_dp->lane_count) {
2518			++loop_tries;
2519			if (loop_tries == 5) {
2520				DRM_ERROR("too many full retries, give up\n");
2521				break;
2522			}
2523			intel_dp_reset_link_train(intel_dp, &DP,
2524						  DP_TRAINING_PATTERN_1 |
2525						  DP_LINK_SCRAMBLING_DISABLE);
2526			voltage_tries = 0;
2527			continue;
2528		}
2529
2530		/* Check to see if we've tried the same voltage 5 times */
2531		if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
2532			++voltage_tries;
2533			if (voltage_tries == 5) {
2534				DRM_ERROR("too many voltage retries, give up\n");
2535				break;
2536			}
2537		} else
2538			voltage_tries = 0;
2539		voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
2540
2541		/* Update training set as requested by target */
2542		if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
2543			DRM_ERROR("failed to update link training\n");
2544			break;
2545		}
2546	}
2547
2548	intel_dp->DP = DP;
2549}
2550
2551void
2552intel_dp_complete_link_train(struct intel_dp *intel_dp)
2553{
2554	bool channel_eq = false;
2555	int tries, cr_tries;
2556	uint32_t DP = intel_dp->DP;
2557
2558	/* channel equalization */
2559	if (!intel_dp_set_link_train(intel_dp, &DP,
2560				     DP_TRAINING_PATTERN_2 |
2561				     DP_LINK_SCRAMBLING_DISABLE)) {
2562		DRM_ERROR("failed to start channel equalization\n");
2563		return;
2564	}
2565
2566	tries = 0;
2567	cr_tries = 0;
2568	channel_eq = false;
2569	for (;;) {
2570		uint8_t link_status[DP_LINK_STATUS_SIZE];
2571
2572		if (cr_tries > 5) {
2573			DRM_ERROR("failed to train DP, aborting\n");
2574			intel_dp_link_down(intel_dp);
2575			break;
2576		}
2577
2578		drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
2579		if (!intel_dp_get_link_status(intel_dp, link_status)) {
2580			DRM_ERROR("failed to get link status\n");
2581			break;
2582		}
2583
2584		/* Make sure clock is still ok */
2585		if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
2586			intel_dp_start_link_train(intel_dp);
2587			intel_dp_set_link_train(intel_dp, &DP,
2588						DP_TRAINING_PATTERN_2 |
2589						DP_LINK_SCRAMBLING_DISABLE);
2590			cr_tries++;
2591			continue;
2592		}
2593
2594		if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
2595			channel_eq = true;
2596			break;
2597		}
2598
2599		/* Try 5 times, then try clock recovery if that fails */
2600		if (tries > 5) {
2601			intel_dp_link_down(intel_dp);
2602			intel_dp_start_link_train(intel_dp);
2603			intel_dp_set_link_train(intel_dp, &DP,
2604						DP_TRAINING_PATTERN_2 |
2605						DP_LINK_SCRAMBLING_DISABLE);
2606			tries = 0;
2607			cr_tries++;
2608			continue;
2609		}
2610
2611		/* Update training set as requested by target */
2612		if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
2613			DRM_ERROR("failed to update link training\n");
2614			break;
2615		}
2616		++tries;
2617	}
2618
2619	intel_dp_set_idle_link_train(intel_dp);
2620
2621	intel_dp->DP = DP;
2622
2623	if (channel_eq)
2624		DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
2625
2626}
2627
2628void intel_dp_stop_link_train(struct intel_dp *intel_dp)
2629{
2630	intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2631				DP_TRAINING_PATTERN_DISABLE);
2632}
2633
2634static void
2635intel_dp_link_down(struct intel_dp *intel_dp)
2636{
2637	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2638	enum port port = intel_dig_port->port;
2639	struct drm_device *dev = intel_dig_port->base.base.dev;
2640	struct drm_i915_private *dev_priv = dev->dev_private;
2641	struct intel_crtc *intel_crtc =
2642		to_intel_crtc(intel_dig_port->base.base.crtc);
2643	uint32_t DP = intel_dp->DP;
2644
2645	/*
2646	 * DDI code has a strict mode set sequence and we should try to respect
2647	 * it, otherwise we might hang the machine in many different ways. So we
2648	 * really should be disabling the port only on a complete crtc_disable
2649	 * sequence. This function is just called under two conditions on DDI
2650	 * code:
2651	 * - Link train failed while doing crtc_enable, and on this case we
2652	 *   really should respect the mode set sequence and wait for a
2653	 *   crtc_disable.
2654	 * - Someone turned the monitor off and intel_dp_check_link_status
2655	 *   called us. We don't need to disable the whole port on this case, so
2656	 *   when someone turns the monitor on again,
2657	 *   intel_ddi_prepare_link_retrain will take care of redoing the link
2658	 *   train.
2659	 */
2660	if (HAS_DDI(dev))
2661		return;
2662
2663	if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
2664		return;
2665
2666	DRM_DEBUG_KMS("\n");
2667
2668	if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2669		DP &= ~DP_LINK_TRAIN_MASK_CPT;
2670		I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
2671	} else {
2672		DP &= ~DP_LINK_TRAIN_MASK;
2673		I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
2674	}
2675	POSTING_READ(intel_dp->output_reg);
2676
2677	/* We don't really know why we're doing this */
2678	intel_wait_for_vblank(dev, intel_crtc->pipe);
2679
2680	if (HAS_PCH_IBX(dev) &&
2681	    I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
2682		struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2683
2684		/* Hardware workaround: leaving our transcoder select
2685		 * set to transcoder B while it's off will prevent the
2686		 * corresponding HDMI output on transcoder A.
2687		 *
2688		 * Combine this with another hardware workaround:
2689		 * transcoder select bit can only be cleared while the
2690		 * port is enabled.
2691		 */
2692		DP &= ~DP_PIPEB_SELECT;
2693		I915_WRITE(intel_dp->output_reg, DP);
2694
2695		/* Changes to enable or select take place the vblank
2696		 * after being written.
2697		 */
2698		if (WARN_ON(crtc == NULL)) {
2699			/* We should never try to disable a port without a crtc
2700			 * attached. For paranoia keep the code around for a
2701			 * bit. */
2702			POSTING_READ(intel_dp->output_reg);
2703			msleep(50);
2704		} else
2705			intel_wait_for_vblank(dev, intel_crtc->pipe);
2706	}
2707
2708	DP &= ~DP_AUDIO_OUTPUT_ENABLE;
2709	I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
2710	POSTING_READ(intel_dp->output_reg);
2711	msleep(intel_dp->panel_power_down_delay);
2712}
2713
2714static bool
2715intel_dp_get_dpcd(struct intel_dp *intel_dp)
2716{
2717	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2718	struct drm_device *dev = dig_port->base.base.dev;
2719	struct drm_i915_private *dev_priv = dev->dev_private;
2720
2721	char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
2722
2723	if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
2724					   sizeof(intel_dp->dpcd)) == 0)
2725		return false; /* aux transfer failed */
2726
2727	hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
2728			   32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
2729	DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
2730
2731	if (intel_dp->dpcd[DP_DPCD_REV] == 0)
2732		return false; /* DPCD not present */
2733
2734	/* Check if the panel supports PSR */
2735	memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
2736	if (is_edp(intel_dp)) {
2737		intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT,
2738					       intel_dp->psr_dpcd,
2739					       sizeof(intel_dp->psr_dpcd));
2740		if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
2741			dev_priv->psr.sink_support = true;
2742			DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
2743		}
2744	}
2745
2746	if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
2747	      DP_DWN_STRM_PORT_PRESENT))
2748		return true; /* native DP sink */
2749
2750	if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
2751		return true; /* no per-port downstream info */
2752
2753	if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0,
2754					   intel_dp->downstream_ports,
2755					   DP_MAX_DOWNSTREAM_PORTS) == 0)
2756		return false; /* downstream port status fetch failed */
2757
2758	return true;
2759}
2760
2761static void
2762intel_dp_probe_oui(struct intel_dp *intel_dp)
2763{
2764	u8 buf[3];
2765
2766	if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
2767		return;
2768
2769	ironlake_edp_panel_vdd_on(intel_dp);
2770
2771	if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
2772		DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
2773			      buf[0], buf[1], buf[2]);
2774
2775	if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3))
2776		DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
2777			      buf[0], buf[1], buf[2]);
2778
2779	ironlake_edp_panel_vdd_off(intel_dp, false);
2780}
2781
2782static bool
2783intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
2784{
2785	int ret;
2786
2787	ret = intel_dp_aux_native_read_retry(intel_dp,
2788					     DP_DEVICE_SERVICE_IRQ_VECTOR,
2789					     sink_irq_vector, 1);
2790	if (!ret)
2791		return false;
2792
2793	return true;
2794}
2795
2796static void
2797intel_dp_handle_test_request(struct intel_dp *intel_dp)
2798{
2799	/* NAK by default */
2800	intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK);
2801}
2802
2803/*
2804 * According to DP spec
2805 * 5.1.2:
2806 *  1. Read DPCD
2807 *  2. Configure link according to Receiver Capabilities
2808 *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
2809 *  4. Check link status on receipt of hot-plug interrupt
2810 */
2811
2812void
2813intel_dp_check_link_status(struct intel_dp *intel_dp)
2814{
2815	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
2816	u8 sink_irq_vector;
2817	u8 link_status[DP_LINK_STATUS_SIZE];
2818
2819	if (!intel_encoder->connectors_active)
2820		return;
2821
2822	if (WARN_ON(!intel_encoder->base.crtc))
2823		return;
2824
2825	/* Try to read receiver status if the link appears to be up */
2826	if (!intel_dp_get_link_status(intel_dp, link_status)) {
2827		intel_dp_link_down(intel_dp);
2828		return;
2829	}
2830
2831	/* Now read the DPCD to see if it's actually running */
2832	if (!intel_dp_get_dpcd(intel_dp)) {
2833		intel_dp_link_down(intel_dp);
2834		return;
2835	}
2836
2837	/* Try to read the source of the interrupt */
2838	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
2839	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
2840		/* Clear interrupt source */
2841		intel_dp_aux_native_write_1(intel_dp,
2842					    DP_DEVICE_SERVICE_IRQ_VECTOR,
2843					    sink_irq_vector);
2844
2845		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
2846			intel_dp_handle_test_request(intel_dp);
2847		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
2848			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
2849	}
2850
2851	if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
2852		DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
2853			      drm_get_encoder_name(&intel_encoder->base));
2854		intel_dp_start_link_train(intel_dp);
2855		intel_dp_complete_link_train(intel_dp);
2856		intel_dp_stop_link_train(intel_dp);
2857	}
2858}
2859
2860/* XXX this is probably wrong for multiple downstream ports */
2861static enum drm_connector_status
2862intel_dp_detect_dpcd(struct intel_dp *intel_dp)
2863{
2864	uint8_t *dpcd = intel_dp->dpcd;
2865	uint8_t type;
2866
2867	if (!intel_dp_get_dpcd(intel_dp))
2868		return connector_status_disconnected;
2869
2870	/* if there's no downstream port, we're done */
2871	if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
2872		return connector_status_connected;
2873
2874	/* If we're HPD-aware, SINK_COUNT changes dynamically */
2875	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
2876	    intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
2877		uint8_t reg;
2878		if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT,
2879						    &reg, 1))
2880			return connector_status_unknown;
2881		return DP_GET_SINK_COUNT(reg) ? connector_status_connected
2882					      : connector_status_disconnected;
2883	}
2884
2885	/* If no HPD, poke DDC gently */
2886	if (drm_probe_ddc(&intel_dp->adapter))
2887		return connector_status_connected;
2888
2889	/* Well we tried, say unknown for unreliable port types */
2890	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
2891		type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
2892		if (type == DP_DS_PORT_TYPE_VGA ||
2893		    type == DP_DS_PORT_TYPE_NON_EDID)
2894			return connector_status_unknown;
2895	} else {
2896		type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
2897			DP_DWN_STRM_PORT_TYPE_MASK;
2898		if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
2899		    type == DP_DWN_STRM_PORT_TYPE_OTHER)
2900			return connector_status_unknown;
2901	}
2902
2903	/* Anything else is out of spec, warn and ignore */
2904	DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
2905	return connector_status_disconnected;
2906}
2907
2908static enum drm_connector_status
2909ironlake_dp_detect(struct intel_dp *intel_dp)
2910{
2911	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2912	struct drm_i915_private *dev_priv = dev->dev_private;
2913	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2914	enum drm_connector_status status;
2915
2916	/* Can't disconnect eDP, but you can close the lid... */
2917	if (is_edp(intel_dp)) {
2918		status = intel_panel_detect(dev);
2919		if (status == connector_status_unknown)
2920			status = connector_status_connected;
2921		return status;
2922	}
2923
2924	if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
2925		return connector_status_disconnected;
2926
2927	return intel_dp_detect_dpcd(intel_dp);
2928}
2929
2930static enum drm_connector_status
2931g4x_dp_detect(struct intel_dp *intel_dp)
2932{
2933	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2934	struct drm_i915_private *dev_priv = dev->dev_private;
2935	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2936	uint32_t bit;
2937
2938	/* Can't disconnect eDP, but you can close the lid... */
2939	if (is_edp(intel_dp)) {
2940		enum drm_connector_status status;
2941
2942		status = intel_panel_detect(dev);
2943		if (status == connector_status_unknown)
2944			status = connector_status_connected;
2945		return status;
2946	}
2947
2948	switch (intel_dig_port->port) {
2949	case PORT_B:
2950		bit = PORTB_HOTPLUG_LIVE_STATUS;
2951		break;
2952	case PORT_C:
2953		bit = PORTC_HOTPLUG_LIVE_STATUS;
2954		break;
2955	case PORT_D:
2956		bit = PORTD_HOTPLUG_LIVE_STATUS;
2957		break;
2958	default:
2959		return connector_status_unknown;
2960	}
2961
2962	if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
2963		return connector_status_disconnected;
2964
2965	return intel_dp_detect_dpcd(intel_dp);
2966}
2967
2968static struct edid *
2969intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
2970{
2971	struct intel_connector *intel_connector = to_intel_connector(connector);
2972
2973	/* use cached edid if we have one */
2974	if (intel_connector->edid) {
2975		/* invalid edid */
2976		if (IS_ERR(intel_connector->edid))
2977			return NULL;
2978
2979		return drm_edid_duplicate(intel_connector->edid);
2980	}
2981
2982	return drm_get_edid(connector, adapter);
2983}
2984
2985static int
2986intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter)
2987{
2988	struct intel_connector *intel_connector = to_intel_connector(connector);
2989
2990	/* use cached edid if we have one */
2991	if (intel_connector->edid) {
2992		/* invalid edid */
2993		if (IS_ERR(intel_connector->edid))
2994			return 0;
2995
2996		return intel_connector_update_modes(connector,
2997						    intel_connector->edid);
2998	}
2999
3000	return intel_ddc_get_modes(connector, adapter);
3001}
3002
3003static enum drm_connector_status
3004intel_dp_detect(struct drm_connector *connector, bool force)
3005{
3006	struct intel_dp *intel_dp = intel_attached_dp(connector);
3007	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3008	struct intel_encoder *intel_encoder = &intel_dig_port->base;
3009	struct drm_device *dev = connector->dev;
3010	enum drm_connector_status status;
3011	struct edid *edid = NULL;
3012
3013	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
3014		      connector->base.id, drm_get_connector_name(connector));
3015
3016	intel_dp->has_audio = false;
3017
3018	if (HAS_PCH_SPLIT(dev))
3019		status = ironlake_dp_detect(intel_dp);
3020	else
3021		status = g4x_dp_detect(intel_dp);
3022
3023	if (status != connector_status_connected)
3024		return status;
3025
3026	intel_dp_probe_oui(intel_dp);
3027
3028	if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
3029		intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
3030	} else {
3031		edid = intel_dp_get_edid(connector, &intel_dp->adapter);
3032		if (edid) {
3033			intel_dp->has_audio = drm_detect_monitor_audio(edid);
3034			kfree(edid);
3035		}
3036	}
3037
3038	if (intel_encoder->type != INTEL_OUTPUT_EDP)
3039		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
3040	return connector_status_connected;
3041}
3042
3043static int intel_dp_get_modes(struct drm_connector *connector)
3044{
3045	struct intel_dp *intel_dp = intel_attached_dp(connector);
3046	struct intel_connector *intel_connector = to_intel_connector(connector);
3047	struct drm_device *dev = connector->dev;
3048	int ret;
3049
3050	/* We should parse the EDID data and find out if it has an audio sink
3051	 */
3052
3053	ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter);
3054	if (ret)
3055		return ret;
3056
3057	/* if eDP has no EDID, fall back to fixed mode */
3058	if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
3059		struct drm_display_mode *mode;
3060		mode = drm_mode_duplicate(dev,
3061					  intel_connector->panel.fixed_mode);
3062		if (mode) {
3063			drm_mode_probed_add(connector, mode);
3064			return 1;
3065		}
3066	}
3067	return 0;
3068}
3069
3070static bool
3071intel_dp_detect_audio(struct drm_connector *connector)
3072{
3073	struct intel_dp *intel_dp = intel_attached_dp(connector);
3074	struct edid *edid;
3075	bool has_audio = false;
3076
3077	edid = intel_dp_get_edid(connector, &intel_dp->adapter);
3078	if (edid) {
3079		has_audio = drm_detect_monitor_audio(edid);
3080		kfree(edid);
3081	}
3082
3083	return has_audio;
3084}
3085
3086static int
3087intel_dp_set_property(struct drm_connector *connector,
3088		      struct drm_property *property,
3089		      uint64_t val)
3090{
3091	struct drm_i915_private *dev_priv = connector->dev->dev_private;
3092	struct intel_connector *intel_connector = to_intel_connector(connector);
3093	struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
3094	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
3095	int ret;
3096
3097	ret = drm_object_property_set_value(&connector->base, property, val);
3098	if (ret)
3099		return ret;
3100
3101	if (property == dev_priv->force_audio_property) {
3102		int i = val;
3103		bool has_audio;
3104
3105		if (i == intel_dp->force_audio)
3106			return 0;
3107
3108		intel_dp->force_audio = i;
3109
3110		if (i == HDMI_AUDIO_AUTO)
3111			has_audio = intel_dp_detect_audio(connector);
3112		else
3113			has_audio = (i == HDMI_AUDIO_ON);
3114
3115		if (has_audio == intel_dp->has_audio)
3116			return 0;
3117
3118		intel_dp->has_audio = has_audio;
3119		goto done;
3120	}
3121
3122	if (property == dev_priv->broadcast_rgb_property) {
3123		bool old_auto = intel_dp->color_range_auto;
3124		uint32_t old_range = intel_dp->color_range;
3125
3126		switch (val) {
3127		case INTEL_BROADCAST_RGB_AUTO:
3128			intel_dp->color_range_auto = true;
3129			break;
3130		case INTEL_BROADCAST_RGB_FULL:
3131			intel_dp->color_range_auto = false;
3132			intel_dp->color_range = 0;
3133			break;
3134		case INTEL_BROADCAST_RGB_LIMITED:
3135			intel_dp->color_range_auto = false;
3136			intel_dp->color_range = DP_COLOR_RANGE_16_235;
3137			break;
3138		default:
3139			return -EINVAL;
3140		}
3141
3142		if (old_auto == intel_dp->color_range_auto &&
3143		    old_range == intel_dp->color_range)
3144			return 0;
3145
3146		goto done;
3147	}
3148
3149	if (is_edp(intel_dp) &&
3150	    property == connector->dev->mode_config.scaling_mode_property) {
3151		if (val == DRM_MODE_SCALE_NONE) {
3152			DRM_DEBUG_KMS("no scaling not supported\n");
3153			return -EINVAL;
3154		}
3155
3156		if (intel_connector->panel.fitting_mode == val) {
3157			/* the eDP scaling property is not changed */
3158			return 0;
3159		}
3160		intel_connector->panel.fitting_mode = val;
3161
3162		goto done;
3163	}
3164
3165	return -EINVAL;
3166
3167done:
3168	if (intel_encoder->base.crtc)
3169		intel_crtc_restore_mode(intel_encoder->base.crtc);
3170
3171	return 0;
3172}
3173
3174static void
3175intel_dp_connector_destroy(struct drm_connector *connector)
3176{
3177	struct intel_connector *intel_connector = to_intel_connector(connector);
3178
3179	if (!IS_ERR_OR_NULL(intel_connector->edid))
3180		kfree(intel_connector->edid);
3181
3182	/* Can't call is_edp() since the encoder may have been destroyed
3183	 * already. */
3184	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3185		intel_panel_fini(&intel_connector->panel);
3186
3187	drm_connector_cleanup(connector);
3188	kfree(connector);
3189}
3190
3191void intel_dp_encoder_destroy(struct drm_encoder *encoder)
3192{
3193	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
3194	struct intel_dp *intel_dp = &intel_dig_port->dp;
3195	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3196
3197	i2c_del_adapter(&intel_dp->adapter);
3198	drm_encoder_cleanup(encoder);
3199	if (is_edp(intel_dp)) {
3200		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
3201		mutex_lock(&dev->mode_config.mutex);
3202		ironlake_panel_vdd_off_sync(intel_dp);
3203		mutex_unlock(&dev->mode_config.mutex);
3204	}
3205	kfree(intel_dig_port);
3206}
3207
3208static const struct drm_connector_funcs intel_dp_connector_funcs = {
3209	.dpms = intel_connector_dpms,
3210	.detect = intel_dp_detect,
3211	.fill_modes = drm_helper_probe_single_connector_modes,
3212	.set_property = intel_dp_set_property,
3213	.destroy = intel_dp_connector_destroy,
3214};
3215
3216static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
3217	.get_modes = intel_dp_get_modes,
3218	.mode_valid = intel_dp_mode_valid,
3219	.best_encoder = intel_best_encoder,
3220};
3221
3222static const struct drm_encoder_funcs intel_dp_enc_funcs = {
3223	.destroy = intel_dp_encoder_destroy,
3224};
3225
3226static void
3227intel_dp_hot_plug(struct intel_encoder *intel_encoder)
3228{
3229	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
3230
3231	intel_dp_check_link_status(intel_dp);
3232}
3233
3234/* Return which DP Port should be selected for Transcoder DP control */
3235int
3236intel_trans_dp_port_sel(struct drm_crtc *crtc)
3237{
3238	struct drm_device *dev = crtc->dev;
3239	struct intel_encoder *intel_encoder;
3240	struct intel_dp *intel_dp;
3241
3242	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
3243		intel_dp = enc_to_intel_dp(&intel_encoder->base);
3244
3245		if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
3246		    intel_encoder->type == INTEL_OUTPUT_EDP)
3247			return intel_dp->output_reg;
3248	}
3249
3250	return -1;
3251}
3252
3253/* check the VBT to see whether the eDP is on DP-D port */
3254bool intel_dpd_is_edp(struct drm_device *dev)
3255{
3256	struct drm_i915_private *dev_priv = dev->dev_private;
3257	union child_device_config *p_child;
3258	int i;
3259
3260	if (!dev_priv->vbt.child_dev_num)
3261		return false;
3262
3263	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
3264		p_child = dev_priv->vbt.child_dev + i;
3265
3266		if (p_child->common.dvo_port == PORT_IDPD &&
3267		    p_child->common.device_type == DEVICE_TYPE_eDP)
3268			return true;
3269	}
3270	return false;
3271}
3272
3273static void
3274intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
3275{
3276	struct intel_connector *intel_connector = to_intel_connector(connector);
3277
3278	intel_attach_force_audio_property(connector);
3279	intel_attach_broadcast_rgb_property(connector);
3280	intel_dp->color_range_auto = true;
3281
3282	if (is_edp(intel_dp)) {
3283		drm_mode_create_scaling_mode_property(connector->dev);
3284		drm_object_attach_property(
3285			&connector->base,
3286			connector->dev->mode_config.scaling_mode_property,
3287			DRM_MODE_SCALE_ASPECT);
3288		intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
3289	}
3290}
3291
3292static void
3293intel_dp_init_panel_power_sequencer(struct drm_device *dev,
3294				    struct intel_dp *intel_dp,
3295				    struct edp_power_seq *out)
3296{
3297	struct drm_i915_private *dev_priv = dev->dev_private;
3298	struct edp_power_seq cur, vbt, spec, final;
3299	u32 pp_on, pp_off, pp_div, pp;
3300	int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
3301
3302	if (HAS_PCH_SPLIT(dev)) {
3303		pp_ctrl_reg = PCH_PP_CONTROL;
3304		pp_on_reg = PCH_PP_ON_DELAYS;
3305		pp_off_reg = PCH_PP_OFF_DELAYS;
3306		pp_div_reg = PCH_PP_DIVISOR;
3307	} else {
3308		enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
3309
3310		pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
3311		pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
3312		pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
3313		pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
3314	}
3315
3316	/* Workaround: Need to write PP_CONTROL with the unlock key as
3317	 * the very first thing. */
3318	pp = ironlake_get_pp_control(intel_dp);
3319	I915_WRITE(pp_ctrl_reg, pp);
3320
3321	pp_on = I915_READ(pp_on_reg);
3322	pp_off = I915_READ(pp_off_reg);
3323	pp_div = I915_READ(pp_div_reg);
3324
3325	/* Pull timing values out of registers */
3326	cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
3327		PANEL_POWER_UP_DELAY_SHIFT;
3328
3329	cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
3330		PANEL_LIGHT_ON_DELAY_SHIFT;
3331
3332	cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
3333		PANEL_LIGHT_OFF_DELAY_SHIFT;
3334
3335	cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
3336		PANEL_POWER_DOWN_DELAY_SHIFT;
3337
3338	cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
3339		       PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
3340
3341	DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
3342		      cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
3343
3344	vbt = dev_priv->vbt.edp_pps;
3345
3346	/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
3347	 * our hw here, which are all in 100usec. */
3348	spec.t1_t3 = 210 * 10;
3349	spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
3350	spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
3351	spec.t10 = 500 * 10;
3352	/* This one is special and actually in units of 100ms, but zero
3353	 * based in the hw (so we need to add 100 ms). But the sw vbt
3354	 * table multiplies it with 1000 to make it in units of 100usec,
3355	 * too. */
3356	spec.t11_t12 = (510 + 100) * 10;
3357
3358	DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
3359		      vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
3360
3361	/* Use the max of the register settings and vbt. If both are
3362	 * unset, fall back to the spec limits. */
3363#define assign_final(field)	final.field = (max(cur.field, vbt.field) == 0 ? \
3364				       spec.field : \
3365				       max(cur.field, vbt.field))
3366	assign_final(t1_t3);
3367	assign_final(t8);
3368	assign_final(t9);
3369	assign_final(t10);
3370	assign_final(t11_t12);
3371#undef assign_final
3372
3373#define get_delay(field)	(DIV_ROUND_UP(final.field, 10))
3374	intel_dp->panel_power_up_delay = get_delay(t1_t3);
3375	intel_dp->backlight_on_delay = get_delay(t8);
3376	intel_dp->backlight_off_delay = get_delay(t9);
3377	intel_dp->panel_power_down_delay = get_delay(t10);
3378	intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
3379#undef get_delay
3380
3381	DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
3382		      intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
3383		      intel_dp->panel_power_cycle_delay);
3384
3385	DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
3386		      intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
3387
3388	if (out)
3389		*out = final;
3390}
3391
3392static void
3393intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
3394					      struct intel_dp *intel_dp,
3395					      struct edp_power_seq *seq)
3396{
3397	struct drm_i915_private *dev_priv = dev->dev_private;
3398	u32 pp_on, pp_off, pp_div, port_sel = 0;
3399	int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
3400	int pp_on_reg, pp_off_reg, pp_div_reg;
3401
3402	if (HAS_PCH_SPLIT(dev)) {
3403		pp_on_reg = PCH_PP_ON_DELAYS;
3404		pp_off_reg = PCH_PP_OFF_DELAYS;
3405		pp_div_reg = PCH_PP_DIVISOR;
3406	} else {
3407		enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
3408
3409		pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
3410		pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
3411		pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
3412	}
3413
3414	/* And finally store the new values in the power sequencer. */
3415	pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
3416		(seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
3417	pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
3418		 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
3419	/* Compute the divisor for the pp clock, simply match the Bspec
3420	 * formula. */
3421	pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
3422	pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
3423			<< PANEL_POWER_CYCLE_DELAY_SHIFT);
3424
3425	/* Haswell doesn't have any port selection bits for the panel
3426	 * power sequencer any more. */
3427	if (IS_VALLEYVIEW(dev)) {
3428		if (dp_to_dig_port(intel_dp)->port == PORT_B)
3429			port_sel = PANEL_PORT_SELECT_DPB_VLV;
3430		else
3431			port_sel = PANEL_PORT_SELECT_DPC_VLV;
3432	} else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
3433		if (dp_to_dig_port(intel_dp)->port == PORT_A)
3434			port_sel = PANEL_PORT_SELECT_DPA;
3435		else
3436			port_sel = PANEL_PORT_SELECT_DPD;
3437	}
3438
3439	pp_on |= port_sel;
3440
3441	I915_WRITE(pp_on_reg, pp_on);
3442	I915_WRITE(pp_off_reg, pp_off);
3443	I915_WRITE(pp_div_reg, pp_div);
3444
3445	DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
3446		      I915_READ(pp_on_reg),
3447		      I915_READ(pp_off_reg),
3448		      I915_READ(pp_div_reg));
3449}
3450
3451static bool intel_edp_init_connector(struct intel_dp *intel_dp,
3452				     struct intel_connector *intel_connector)
3453{
3454	struct drm_connector *connector = &intel_connector->base;
3455	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3456	struct drm_device *dev = intel_dig_port->base.base.dev;
3457	struct drm_i915_private *dev_priv = dev->dev_private;
3458	struct drm_display_mode *fixed_mode = NULL;
3459	struct edp_power_seq power_seq = { 0 };
3460	bool has_dpcd;
3461	struct drm_display_mode *scan;
3462	struct edid *edid;
3463
3464	if (!is_edp(intel_dp))
3465		return true;
3466
3467	intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
3468
3469	/* Cache DPCD and EDID for edp. */
3470	ironlake_edp_panel_vdd_on(intel_dp);
3471	has_dpcd = intel_dp_get_dpcd(intel_dp);
3472	ironlake_edp_panel_vdd_off(intel_dp, false);
3473
3474	if (has_dpcd) {
3475		if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
3476			dev_priv->no_aux_handshake =
3477				intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
3478				DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
3479	} else {
3480		/* if this fails, presume the device is a ghost */
3481		DRM_INFO("failed to retrieve link info, disabling eDP\n");
3482		return false;
3483	}
3484
3485	/* We now know it's not a ghost, init power sequence regs. */
3486	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
3487						      &power_seq);
3488
3489	edid = drm_get_edid(connector, &intel_dp->adapter);
3490	if (edid) {
3491		if (drm_add_edid_modes(connector, edid)) {
3492			drm_mode_connector_update_edid_property(connector,
3493								edid);
3494			drm_edid_to_eld(connector, edid);
3495		} else {
3496			kfree(edid);
3497			edid = ERR_PTR(-EINVAL);
3498		}
3499	} else {
3500		edid = ERR_PTR(-ENOENT);
3501	}
3502	intel_connector->edid = edid;
3503
3504	/* prefer fixed mode from EDID if available */
3505	list_for_each_entry(scan, &connector->probed_modes, head) {
3506		if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
3507			fixed_mode = drm_mode_duplicate(dev, scan);
3508			break;
3509		}
3510	}
3511
3512	/* fallback to VBT if available for eDP */
3513	if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
3514		fixed_mode = drm_mode_duplicate(dev,
3515					dev_priv->vbt.lfp_lvds_vbt_mode);
3516		if (fixed_mode)
3517			fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
3518	}
3519
3520	intel_panel_init(&intel_connector->panel, fixed_mode);
3521	intel_panel_setup_backlight(connector);
3522
3523	return true;
3524}
3525
3526bool
3527intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
3528			struct intel_connector *intel_connector)
3529{
3530	struct drm_connector *connector = &intel_connector->base;
3531	struct intel_dp *intel_dp = &intel_dig_port->dp;
3532	struct intel_encoder *intel_encoder = &intel_dig_port->base;
3533	struct drm_device *dev = intel_encoder->base.dev;
3534	struct drm_i915_private *dev_priv = dev->dev_private;
3535	enum port port = intel_dig_port->port;
3536	const char *name = NULL;
3537	int type, error;
3538
3539	/* Preserve the current hw state. */
3540	intel_dp->DP = I915_READ(intel_dp->output_reg);
3541	intel_dp->attached_connector = intel_connector;
3542
3543	type = DRM_MODE_CONNECTOR_DisplayPort;
3544	/*
3545	 * FIXME : We need to initialize built-in panels before external panels.
3546	 * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup
3547	 */
3548	switch (port) {
3549	case PORT_A:
3550		type = DRM_MODE_CONNECTOR_eDP;
3551		break;
3552	case PORT_C:
3553		if (IS_VALLEYVIEW(dev))
3554			type = DRM_MODE_CONNECTOR_eDP;
3555		break;
3556	case PORT_D:
3557		if (HAS_PCH_SPLIT(dev) && intel_dpd_is_edp(dev))
3558			type = DRM_MODE_CONNECTOR_eDP;
3559		break;
3560	default:	/* silence GCC warning */
3561		break;
3562	}
3563
3564	/*
3565	 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
3566	 * for DP the encoder type can be set by the caller to
3567	 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
3568	 */
3569	if (type == DRM_MODE_CONNECTOR_eDP)
3570		intel_encoder->type = INTEL_OUTPUT_EDP;
3571
3572	DRM_DEBUG_KMS("Adding %s connector on port %c\n",
3573			type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
3574			port_name(port));
3575
3576	drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
3577	drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
3578
3579	connector->interlace_allowed = true;
3580	connector->doublescan_allowed = 0;
3581
3582	INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
3583			  ironlake_panel_vdd_work);
3584
3585	intel_connector_attach_encoder(intel_connector, intel_encoder);
3586	drm_sysfs_connector_add(connector);
3587
3588	if (HAS_DDI(dev))
3589		intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
3590	else
3591		intel_connector->get_hw_state = intel_connector_get_hw_state;
3592
3593	intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
3594	if (HAS_DDI(dev)) {
3595		switch (intel_dig_port->port) {
3596		case PORT_A:
3597			intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
3598			break;
3599		case PORT_B:
3600			intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
3601			break;
3602		case PORT_C:
3603			intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
3604			break;
3605		case PORT_D:
3606			intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
3607			break;
3608		default:
3609			BUG();
3610		}
3611	}
3612
3613	/* Set up the DDC bus. */
3614	switch (port) {
3615	case PORT_A:
3616		intel_encoder->hpd_pin = HPD_PORT_A;
3617		name = "DPDDC-A";
3618		break;
3619	case PORT_B:
3620		intel_encoder->hpd_pin = HPD_PORT_B;
3621		name = "DPDDC-B";
3622		break;
3623	case PORT_C:
3624		intel_encoder->hpd_pin = HPD_PORT_C;
3625		name = "DPDDC-C";
3626		break;
3627	case PORT_D:
3628		intel_encoder->hpd_pin = HPD_PORT_D;
3629		name = "DPDDC-D";
3630		break;
3631	default:
3632		BUG();
3633	}
3634
3635	error = intel_dp_i2c_init(intel_dp, intel_connector, name);
3636	WARN(error, "intel_dp_i2c_init failed with error %d for port %c\n",
3637	     error, port_name(port));
3638
3639	intel_dp->psr_setup_done = false;
3640
3641	if (!intel_edp_init_connector(intel_dp, intel_connector)) {
3642		i2c_del_adapter(&intel_dp->adapter);
3643		if (is_edp(intel_dp)) {
3644			cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
3645			mutex_lock(&dev->mode_config.mutex);
3646			ironlake_panel_vdd_off_sync(intel_dp);
3647			mutex_unlock(&dev->mode_config.mutex);
3648		}
3649		drm_sysfs_connector_remove(connector);
3650		drm_connector_cleanup(connector);
3651		return false;
3652	}
3653
3654	intel_dp_add_properties(intel_dp, connector);
3655
3656	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
3657	 * 0xd.  Failure to do so will result in spurious interrupts being
3658	 * generated on the port when a cable is not attached.
3659	 */
3660	if (IS_G4X(dev) && !IS_GM45(dev)) {
3661		u32 temp = I915_READ(PEG_BAND_GAP_DATA);
3662		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
3663	}
3664
3665	return true;
3666}
3667
3668void
3669intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
3670{
3671	struct intel_digital_port *intel_dig_port;
3672	struct intel_encoder *intel_encoder;
3673	struct drm_encoder *encoder;
3674	struct intel_connector *intel_connector;
3675
3676	intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
3677	if (!intel_dig_port)
3678		return;
3679
3680	intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
3681	if (!intel_connector) {
3682		kfree(intel_dig_port);
3683		return;
3684	}
3685
3686	intel_encoder = &intel_dig_port->base;
3687	encoder = &intel_encoder->base;
3688
3689	drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
3690			 DRM_MODE_ENCODER_TMDS);
3691
3692	intel_encoder->compute_config = intel_dp_compute_config;
3693	intel_encoder->mode_set = intel_dp_mode_set;
3694	intel_encoder->disable = intel_disable_dp;
3695	intel_encoder->post_disable = intel_post_disable_dp;
3696	intel_encoder->get_hw_state = intel_dp_get_hw_state;
3697	intel_encoder->get_config = intel_dp_get_config;
3698	if (IS_VALLEYVIEW(dev)) {
3699		intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
3700		intel_encoder->pre_enable = vlv_pre_enable_dp;
3701		intel_encoder->enable = vlv_enable_dp;
3702	} else {
3703		intel_encoder->pre_enable = g4x_pre_enable_dp;
3704		intel_encoder->enable = g4x_enable_dp;
3705	}
3706
3707	intel_dig_port->port = port;
3708	intel_dig_port->dp.output_reg = output_reg;
3709
3710	intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
3711	intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
3712	intel_encoder->cloneable = false;
3713	intel_encoder->hot_plug = intel_dp_hot_plug;
3714
3715	if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
3716		drm_encoder_cleanup(encoder);
3717		kfree(intel_dig_port);
3718		kfree(intel_connector);
3719	}
3720}
3721