intel_dp.c revision bc7d38a43ab1af4cad1c235c3aa30426b6c7d6c5
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 *    Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
29#include <linux/slab.h>
30#include <linux/export.h>
31#include <drm/drmP.h>
32#include <drm/drm_crtc.h>
33#include <drm/drm_crtc_helper.h>
34#include <drm/drm_edid.h>
35#include "intel_drv.h"
36#include <drm/i915_drm.h>
37#include "i915_drv.h"
38
39#define DP_LINK_CHECK_TIMEOUT	(10 * 1000)
40
41/**
42 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
43 * @intel_dp: DP struct
44 *
45 * If a CPU or PCH DP output is attached to an eDP panel, this function
46 * will return true, and false otherwise.
47 */
48static bool is_edp(struct intel_dp *intel_dp)
49{
50	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
51
52	return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
53}
54
55static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
56{
57	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
58
59	return intel_dig_port->base.base.dev;
60}
61
62/**
63 * is_cpu_edp - is the port on the CPU and attached to an eDP panel?
64 * @intel_dp: DP struct
65 *
66 * Returns true if the given DP struct corresponds to a CPU eDP port.
67 */
68static bool is_cpu_edp(struct intel_dp *intel_dp)
69{
70	struct drm_device *dev = intel_dp_to_dev(intel_dp);
71	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
72	enum port port = intel_dig_port->port;
73
74	return is_edp(intel_dp) &&
75		(port == PORT_A || (port == PORT_C && IS_VALLEYVIEW(dev)));
76}
77
78static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
79{
80	return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
81}
82
83static void intel_dp_link_down(struct intel_dp *intel_dp);
84
85static int
86intel_dp_max_link_bw(struct intel_dp *intel_dp)
87{
88	int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
89
90	switch (max_link_bw) {
91	case DP_LINK_BW_1_62:
92	case DP_LINK_BW_2_7:
93		break;
94	default:
95		max_link_bw = DP_LINK_BW_1_62;
96		break;
97	}
98	return max_link_bw;
99}
100
101/*
102 * The units on the numbers in the next two are... bizarre.  Examples will
103 * make it clearer; this one parallels an example in the eDP spec.
104 *
105 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
106 *
107 *     270000 * 1 * 8 / 10 == 216000
108 *
109 * The actual data capacity of that configuration is 2.16Gbit/s, so the
110 * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
111 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
112 * 119000.  At 18bpp that's 2142000 kilobits per second.
113 *
114 * Thus the strange-looking division by 10 in intel_dp_link_required, to
115 * get the result in decakilobits instead of kilobits.
116 */
117
118static int
119intel_dp_link_required(int pixel_clock, int bpp)
120{
121	return (pixel_clock * bpp + 9) / 10;
122}
123
124static int
125intel_dp_max_data_rate(int max_link_clock, int max_lanes)
126{
127	return (max_link_clock * max_lanes * 8) / 10;
128}
129
130static int
131intel_dp_mode_valid(struct drm_connector *connector,
132		    struct drm_display_mode *mode)
133{
134	struct intel_dp *intel_dp = intel_attached_dp(connector);
135	struct intel_connector *intel_connector = to_intel_connector(connector);
136	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
137	int target_clock = mode->clock;
138	int max_rate, mode_rate, max_lanes, max_link_clock;
139
140	if (is_edp(intel_dp) && fixed_mode) {
141		if (mode->hdisplay > fixed_mode->hdisplay)
142			return MODE_PANEL;
143
144		if (mode->vdisplay > fixed_mode->vdisplay)
145			return MODE_PANEL;
146
147		target_clock = fixed_mode->clock;
148	}
149
150	max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
151	max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
152
153	max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
154	mode_rate = intel_dp_link_required(target_clock, 18);
155
156	if (mode_rate > max_rate)
157		return MODE_CLOCK_HIGH;
158
159	if (mode->clock < 10000)
160		return MODE_CLOCK_LOW;
161
162	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
163		return MODE_H_ILLEGAL;
164
165	return MODE_OK;
166}
167
168static uint32_t
169pack_aux(uint8_t *src, int src_bytes)
170{
171	int	i;
172	uint32_t v = 0;
173
174	if (src_bytes > 4)
175		src_bytes = 4;
176	for (i = 0; i < src_bytes; i++)
177		v |= ((uint32_t) src[i]) << ((3-i) * 8);
178	return v;
179}
180
181static void
182unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
183{
184	int i;
185	if (dst_bytes > 4)
186		dst_bytes = 4;
187	for (i = 0; i < dst_bytes; i++)
188		dst[i] = src >> ((3-i) * 8);
189}
190
191/* hrawclock is 1/4 the FSB frequency */
192static int
193intel_hrawclk(struct drm_device *dev)
194{
195	struct drm_i915_private *dev_priv = dev->dev_private;
196	uint32_t clkcfg;
197
198	/* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
199	if (IS_VALLEYVIEW(dev))
200		return 200;
201
202	clkcfg = I915_READ(CLKCFG);
203	switch (clkcfg & CLKCFG_FSB_MASK) {
204	case CLKCFG_FSB_400:
205		return 100;
206	case CLKCFG_FSB_533:
207		return 133;
208	case CLKCFG_FSB_667:
209		return 166;
210	case CLKCFG_FSB_800:
211		return 200;
212	case CLKCFG_FSB_1067:
213		return 266;
214	case CLKCFG_FSB_1333:
215		return 333;
216	/* these two are just a guess; one of them might be right */
217	case CLKCFG_FSB_1600:
218	case CLKCFG_FSB_1600_ALT:
219		return 400;
220	default:
221		return 133;
222	}
223}
224
225static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
226{
227	struct drm_device *dev = intel_dp_to_dev(intel_dp);
228	struct drm_i915_private *dev_priv = dev->dev_private;
229	u32 pp_stat_reg;
230
231	pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
232	return (I915_READ(pp_stat_reg) & PP_ON) != 0;
233}
234
235static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
236{
237	struct drm_device *dev = intel_dp_to_dev(intel_dp);
238	struct drm_i915_private *dev_priv = dev->dev_private;
239	u32 pp_ctrl_reg;
240
241	pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
242	return (I915_READ(pp_ctrl_reg) & EDP_FORCE_VDD) != 0;
243}
244
245static void
246intel_dp_check_edp(struct intel_dp *intel_dp)
247{
248	struct drm_device *dev = intel_dp_to_dev(intel_dp);
249	struct drm_i915_private *dev_priv = dev->dev_private;
250	u32 pp_stat_reg, pp_ctrl_reg;
251
252	if (!is_edp(intel_dp))
253		return;
254
255	pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
256	pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
257
258	if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
259		WARN(1, "eDP powered off while attempting aux channel communication.\n");
260		DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
261				I915_READ(pp_stat_reg),
262				I915_READ(pp_ctrl_reg));
263	}
264}
265
266static uint32_t
267intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
268{
269	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
270	struct drm_device *dev = intel_dig_port->base.base.dev;
271	struct drm_i915_private *dev_priv = dev->dev_private;
272	uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
273	uint32_t status;
274	bool done;
275
276#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
277	if (has_aux_irq)
278		done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
279					  msecs_to_jiffies(10));
280	else
281		done = wait_for_atomic(C, 10) == 0;
282	if (!done)
283		DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
284			  has_aux_irq);
285#undef C
286
287	return status;
288}
289
290static int
291intel_dp_aux_ch(struct intel_dp *intel_dp,
292		uint8_t *send, int send_bytes,
293		uint8_t *recv, int recv_size)
294{
295	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
296	struct drm_device *dev = intel_dig_port->base.base.dev;
297	struct drm_i915_private *dev_priv = dev->dev_private;
298	uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
299	uint32_t ch_data = ch_ctl + 4;
300	int i, ret, recv_bytes;
301	uint32_t status;
302	uint32_t aux_clock_divider;
303	int try, precharge;
304	bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
305
306	/* dp aux is extremely sensitive to irq latency, hence request the
307	 * lowest possible wakeup latency and so prevent the cpu from going into
308	 * deep sleep states.
309	 */
310	pm_qos_update_request(&dev_priv->pm_qos, 0);
311
312	intel_dp_check_edp(intel_dp);
313	/* The clock divider is based off the hrawclk,
314	 * and would like to run at 2MHz. So, take the
315	 * hrawclk value and divide by 2 and use that
316	 *
317	 * Note that PCH attached eDP panels should use a 125MHz input
318	 * clock divider.
319	 */
320	if (IS_VALLEYVIEW(dev)) {
321		aux_clock_divider = 100;
322	} else if (intel_dig_port->port == PORT_A) {
323		if (HAS_DDI(dev))
324			aux_clock_divider = DIV_ROUND_CLOSEST(
325				intel_ddi_get_cdclk_freq(dev_priv), 2000);
326		else if (IS_GEN6(dev) || IS_GEN7(dev))
327			aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
328		else
329			aux_clock_divider = 225; /* eDP input clock at 450Mhz */
330	} else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
331		/* Workaround for non-ULT HSW */
332		aux_clock_divider = 74;
333	} else if (HAS_PCH_SPLIT(dev)) {
334		aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
335	} else {
336		aux_clock_divider = intel_hrawclk(dev) / 2;
337	}
338
339	if (IS_GEN6(dev))
340		precharge = 3;
341	else
342		precharge = 5;
343
344	/* Try to wait for any previous AUX channel activity */
345	for (try = 0; try < 3; try++) {
346		status = I915_READ_NOTRACE(ch_ctl);
347		if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
348			break;
349		msleep(1);
350	}
351
352	if (try == 3) {
353		WARN(1, "dp_aux_ch not started status 0x%08x\n",
354		     I915_READ(ch_ctl));
355		ret = -EBUSY;
356		goto out;
357	}
358
359	/* Must try at least 3 times according to DP spec */
360	for (try = 0; try < 5; try++) {
361		/* Load the send data into the aux channel data registers */
362		for (i = 0; i < send_bytes; i += 4)
363			I915_WRITE(ch_data + i,
364				   pack_aux(send + i, send_bytes - i));
365
366		/* Send the command and wait for it to complete */
367		I915_WRITE(ch_ctl,
368			   DP_AUX_CH_CTL_SEND_BUSY |
369			   (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
370			   DP_AUX_CH_CTL_TIME_OUT_400us |
371			   (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
372			   (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
373			   (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
374			   DP_AUX_CH_CTL_DONE |
375			   DP_AUX_CH_CTL_TIME_OUT_ERROR |
376			   DP_AUX_CH_CTL_RECEIVE_ERROR);
377
378		status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
379
380		/* Clear done status and any errors */
381		I915_WRITE(ch_ctl,
382			   status |
383			   DP_AUX_CH_CTL_DONE |
384			   DP_AUX_CH_CTL_TIME_OUT_ERROR |
385			   DP_AUX_CH_CTL_RECEIVE_ERROR);
386
387		if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
388			      DP_AUX_CH_CTL_RECEIVE_ERROR))
389			continue;
390		if (status & DP_AUX_CH_CTL_DONE)
391			break;
392	}
393
394	if ((status & DP_AUX_CH_CTL_DONE) == 0) {
395		DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
396		ret = -EBUSY;
397		goto out;
398	}
399
400	/* Check for timeout or receive error.
401	 * Timeouts occur when the sink is not connected
402	 */
403	if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
404		DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
405		ret = -EIO;
406		goto out;
407	}
408
409	/* Timeouts occur when the device isn't connected, so they're
410	 * "normal" -- don't fill the kernel log with these */
411	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
412		DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
413		ret = -ETIMEDOUT;
414		goto out;
415	}
416
417	/* Unload any bytes sent back from the other side */
418	recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
419		      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
420	if (recv_bytes > recv_size)
421		recv_bytes = recv_size;
422
423	for (i = 0; i < recv_bytes; i += 4)
424		unpack_aux(I915_READ(ch_data + i),
425			   recv + i, recv_bytes - i);
426
427	ret = recv_bytes;
428out:
429	pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
430
431	return ret;
432}
433
434/* Write data to the aux channel in native mode */
435static int
436intel_dp_aux_native_write(struct intel_dp *intel_dp,
437			  uint16_t address, uint8_t *send, int send_bytes)
438{
439	int ret;
440	uint8_t	msg[20];
441	int msg_bytes;
442	uint8_t	ack;
443
444	intel_dp_check_edp(intel_dp);
445	if (send_bytes > 16)
446		return -1;
447	msg[0] = AUX_NATIVE_WRITE << 4;
448	msg[1] = address >> 8;
449	msg[2] = address & 0xff;
450	msg[3] = send_bytes - 1;
451	memcpy(&msg[4], send, send_bytes);
452	msg_bytes = send_bytes + 4;
453	for (;;) {
454		ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
455		if (ret < 0)
456			return ret;
457		if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
458			break;
459		else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
460			udelay(100);
461		else
462			return -EIO;
463	}
464	return send_bytes;
465}
466
467/* Write a single byte to the aux channel in native mode */
468static int
469intel_dp_aux_native_write_1(struct intel_dp *intel_dp,
470			    uint16_t address, uint8_t byte)
471{
472	return intel_dp_aux_native_write(intel_dp, address, &byte, 1);
473}
474
475/* read bytes from a native aux channel */
476static int
477intel_dp_aux_native_read(struct intel_dp *intel_dp,
478			 uint16_t address, uint8_t *recv, int recv_bytes)
479{
480	uint8_t msg[4];
481	int msg_bytes;
482	uint8_t reply[20];
483	int reply_bytes;
484	uint8_t ack;
485	int ret;
486
487	intel_dp_check_edp(intel_dp);
488	msg[0] = AUX_NATIVE_READ << 4;
489	msg[1] = address >> 8;
490	msg[2] = address & 0xff;
491	msg[3] = recv_bytes - 1;
492
493	msg_bytes = 4;
494	reply_bytes = recv_bytes + 1;
495
496	for (;;) {
497		ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
498				      reply, reply_bytes);
499		if (ret == 0)
500			return -EPROTO;
501		if (ret < 0)
502			return ret;
503		ack = reply[0];
504		if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) {
505			memcpy(recv, reply + 1, ret - 1);
506			return ret - 1;
507		}
508		else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
509			udelay(100);
510		else
511			return -EIO;
512	}
513}
514
515static int
516intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
517		    uint8_t write_byte, uint8_t *read_byte)
518{
519	struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
520	struct intel_dp *intel_dp = container_of(adapter,
521						struct intel_dp,
522						adapter);
523	uint16_t address = algo_data->address;
524	uint8_t msg[5];
525	uint8_t reply[2];
526	unsigned retry;
527	int msg_bytes;
528	int reply_bytes;
529	int ret;
530
531	intel_dp_check_edp(intel_dp);
532	/* Set up the command byte */
533	if (mode & MODE_I2C_READ)
534		msg[0] = AUX_I2C_READ << 4;
535	else
536		msg[0] = AUX_I2C_WRITE << 4;
537
538	if (!(mode & MODE_I2C_STOP))
539		msg[0] |= AUX_I2C_MOT << 4;
540
541	msg[1] = address >> 8;
542	msg[2] = address;
543
544	switch (mode) {
545	case MODE_I2C_WRITE:
546		msg[3] = 0;
547		msg[4] = write_byte;
548		msg_bytes = 5;
549		reply_bytes = 1;
550		break;
551	case MODE_I2C_READ:
552		msg[3] = 0;
553		msg_bytes = 4;
554		reply_bytes = 2;
555		break;
556	default:
557		msg_bytes = 3;
558		reply_bytes = 1;
559		break;
560	}
561
562	for (retry = 0; retry < 5; retry++) {
563		ret = intel_dp_aux_ch(intel_dp,
564				      msg, msg_bytes,
565				      reply, reply_bytes);
566		if (ret < 0) {
567			DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
568			return ret;
569		}
570
571		switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
572		case AUX_NATIVE_REPLY_ACK:
573			/* I2C-over-AUX Reply field is only valid
574			 * when paired with AUX ACK.
575			 */
576			break;
577		case AUX_NATIVE_REPLY_NACK:
578			DRM_DEBUG_KMS("aux_ch native nack\n");
579			return -EREMOTEIO;
580		case AUX_NATIVE_REPLY_DEFER:
581			udelay(100);
582			continue;
583		default:
584			DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
585				  reply[0]);
586			return -EREMOTEIO;
587		}
588
589		switch (reply[0] & AUX_I2C_REPLY_MASK) {
590		case AUX_I2C_REPLY_ACK:
591			if (mode == MODE_I2C_READ) {
592				*read_byte = reply[1];
593			}
594			return reply_bytes - 1;
595		case AUX_I2C_REPLY_NACK:
596			DRM_DEBUG_KMS("aux_i2c nack\n");
597			return -EREMOTEIO;
598		case AUX_I2C_REPLY_DEFER:
599			DRM_DEBUG_KMS("aux_i2c defer\n");
600			udelay(100);
601			break;
602		default:
603			DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
604			return -EREMOTEIO;
605		}
606	}
607
608	DRM_ERROR("too many retries, giving up\n");
609	return -EREMOTEIO;
610}
611
612static int
613intel_dp_i2c_init(struct intel_dp *intel_dp,
614		  struct intel_connector *intel_connector, const char *name)
615{
616	int	ret;
617
618	DRM_DEBUG_KMS("i2c_init %s\n", name);
619	intel_dp->algo.running = false;
620	intel_dp->algo.address = 0;
621	intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch;
622
623	memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter));
624	intel_dp->adapter.owner = THIS_MODULE;
625	intel_dp->adapter.class = I2C_CLASS_DDC;
626	strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
627	intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
628	intel_dp->adapter.algo_data = &intel_dp->algo;
629	intel_dp->adapter.dev.parent = &intel_connector->base.kdev;
630
631	ironlake_edp_panel_vdd_on(intel_dp);
632	ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
633	ironlake_edp_panel_vdd_off(intel_dp, false);
634	return ret;
635}
636
637static void
638intel_dp_set_clock(struct intel_encoder *encoder,
639		   struct intel_crtc_config *pipe_config, int link_bw)
640{
641	struct drm_device *dev = encoder->base.dev;
642
643	if (IS_G4X(dev)) {
644		if (link_bw == DP_LINK_BW_1_62) {
645			pipe_config->dpll.p1 = 2;
646			pipe_config->dpll.p2 = 10;
647			pipe_config->dpll.n = 2;
648			pipe_config->dpll.m1 = 23;
649			pipe_config->dpll.m2 = 8;
650		} else {
651			pipe_config->dpll.p1 = 1;
652			pipe_config->dpll.p2 = 10;
653			pipe_config->dpll.n = 1;
654			pipe_config->dpll.m1 = 14;
655			pipe_config->dpll.m2 = 2;
656		}
657		pipe_config->clock_set = true;
658	} else if (IS_HASWELL(dev)) {
659		/* Haswell has special-purpose DP DDI clocks. */
660	} else if (HAS_PCH_SPLIT(dev)) {
661		if (link_bw == DP_LINK_BW_1_62) {
662			pipe_config->dpll.n = 1;
663			pipe_config->dpll.p1 = 2;
664			pipe_config->dpll.p2 = 10;
665			pipe_config->dpll.m1 = 12;
666			pipe_config->dpll.m2 = 9;
667		} else {
668			pipe_config->dpll.n = 2;
669			pipe_config->dpll.p1 = 1;
670			pipe_config->dpll.p2 = 10;
671			pipe_config->dpll.m1 = 14;
672			pipe_config->dpll.m2 = 8;
673		}
674		pipe_config->clock_set = true;
675	} else if (IS_VALLEYVIEW(dev)) {
676		/* FIXME: Need to figure out optimized DP clocks for vlv. */
677	}
678}
679
680bool
681intel_dp_compute_config(struct intel_encoder *encoder,
682			struct intel_crtc_config *pipe_config)
683{
684	struct drm_device *dev = encoder->base.dev;
685	struct drm_i915_private *dev_priv = dev->dev_private;
686	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
687	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
688	enum port port = dp_to_dig_port(intel_dp)->port;
689	struct intel_crtc *intel_crtc = encoder->new_crtc;
690	struct intel_connector *intel_connector = intel_dp->attached_connector;
691	int lane_count, clock;
692	int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
693	int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
694	int bpp, mode_rate;
695	static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
696	int target_clock, link_avail, link_clock;
697
698	if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
699		pipe_config->has_pch_encoder = true;
700
701	pipe_config->has_dp_encoder = true;
702
703	if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
704		intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
705				       adjusted_mode);
706		if (!HAS_PCH_SPLIT(dev))
707			intel_gmch_panel_fitting(intel_crtc, pipe_config,
708						 intel_connector->panel.fitting_mode);
709		else
710			intel_pch_panel_fitting(intel_crtc, pipe_config,
711						intel_connector->panel.fitting_mode);
712	}
713	/* We need to take the panel's fixed mode into account. */
714	target_clock = adjusted_mode->clock;
715
716	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
717		return false;
718
719	DRM_DEBUG_KMS("DP link computation with max lane count %i "
720		      "max bw %02x pixel clock %iKHz\n",
721		      max_lane_count, bws[max_clock], adjusted_mode->clock);
722
723	/* Walk through all bpp values. Luckily they're all nicely spaced with 2
724	 * bpc in between. */
725	bpp = min_t(int, 8*3, pipe_config->pipe_bpp);
726	if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp)
727		bpp = min_t(int, bpp, dev_priv->vbt.edp_bpp);
728
729	for (; bpp >= 6*3; bpp -= 2*3) {
730		mode_rate = intel_dp_link_required(target_clock, bpp);
731
732		for (clock = 0; clock <= max_clock; clock++) {
733			for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
734				link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
735				link_avail = intel_dp_max_data_rate(link_clock,
736								    lane_count);
737
738				if (mode_rate <= link_avail) {
739					goto found;
740				}
741			}
742		}
743	}
744
745	return false;
746
747found:
748	if (intel_dp->color_range_auto) {
749		/*
750		 * See:
751		 * CEA-861-E - 5.1 Default Encoding Parameters
752		 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
753		 */
754		if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
755			intel_dp->color_range = DP_COLOR_RANGE_16_235;
756		else
757			intel_dp->color_range = 0;
758	}
759
760	if (intel_dp->color_range)
761		pipe_config->limited_color_range = true;
762
763	intel_dp->link_bw = bws[clock];
764	intel_dp->lane_count = lane_count;
765	adjusted_mode->clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
766	pipe_config->pipe_bpp = bpp;
767	pipe_config->pixel_target_clock = target_clock;
768
769	DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
770		      intel_dp->link_bw, intel_dp->lane_count,
771		      adjusted_mode->clock, bpp);
772	DRM_DEBUG_KMS("DP link bw required %i available %i\n",
773		      mode_rate, link_avail);
774
775	intel_link_compute_m_n(bpp, lane_count,
776			       target_clock, adjusted_mode->clock,
777			       &pipe_config->dp_m_n);
778
779	intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
780
781	return true;
782}
783
784void intel_dp_init_link_config(struct intel_dp *intel_dp)
785{
786	memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
787	intel_dp->link_configuration[0] = intel_dp->link_bw;
788	intel_dp->link_configuration[1] = intel_dp->lane_count;
789	intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
790	/*
791	 * Check for DPCD version > 1.1 and enhanced framing support
792	 */
793	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
794	    (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
795		intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
796	}
797}
798
799static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
800{
801	struct drm_device *dev = crtc->dev;
802	struct drm_i915_private *dev_priv = dev->dev_private;
803	u32 dpa_ctl;
804
805	DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
806	dpa_ctl = I915_READ(DP_A);
807	dpa_ctl &= ~DP_PLL_FREQ_MASK;
808
809	if (clock < 200000) {
810		/* For a long time we've carried around a ILK-DevA w/a for the
811		 * 160MHz clock. If we're really unlucky, it's still required.
812		 */
813		DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
814		dpa_ctl |= DP_PLL_FREQ_160MHZ;
815	} else {
816		dpa_ctl |= DP_PLL_FREQ_270MHZ;
817	}
818
819	I915_WRITE(DP_A, dpa_ctl);
820
821	POSTING_READ(DP_A);
822	udelay(500);
823}
824
825static void
826intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
827		  struct drm_display_mode *adjusted_mode)
828{
829	struct drm_device *dev = encoder->dev;
830	struct drm_i915_private *dev_priv = dev->dev_private;
831	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
832	enum port port = dp_to_dig_port(intel_dp)->port;
833	struct drm_crtc *crtc = encoder->crtc;
834	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
835
836	/*
837	 * There are four kinds of DP registers:
838	 *
839	 * 	IBX PCH
840	 * 	SNB CPU
841	 *	IVB CPU
842	 * 	CPT PCH
843	 *
844	 * IBX PCH and CPU are the same for almost everything,
845	 * except that the CPU DP PLL is configured in this
846	 * register
847	 *
848	 * CPT PCH is quite different, having many bits moved
849	 * to the TRANS_DP_CTL register instead. That
850	 * configuration happens (oddly) in ironlake_pch_enable
851	 */
852
853	/* Preserve the BIOS-computed detected bit. This is
854	 * supposed to be read-only.
855	 */
856	intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
857
858	/* Handle DP bits in common between all three register formats */
859	intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
860	intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
861
862	if (intel_dp->has_audio) {
863		DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
864				 pipe_name(intel_crtc->pipe));
865		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
866		intel_write_eld(encoder, adjusted_mode);
867	}
868
869	intel_dp_init_link_config(intel_dp);
870
871	/* Split out the IBX/CPU vs CPT settings */
872
873	if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
874		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
875			intel_dp->DP |= DP_SYNC_HS_HIGH;
876		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
877			intel_dp->DP |= DP_SYNC_VS_HIGH;
878		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
879
880		if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
881			intel_dp->DP |= DP_ENHANCED_FRAMING;
882
883		intel_dp->DP |= intel_crtc->pipe << 29;
884
885		/* don't miss out required setting for eDP */
886		if (adjusted_mode->clock < 200000)
887			intel_dp->DP |= DP_PLL_FREQ_160MHZ;
888		else
889			intel_dp->DP |= DP_PLL_FREQ_270MHZ;
890	} else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
891		if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
892			intel_dp->DP |= intel_dp->color_range;
893
894		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
895			intel_dp->DP |= DP_SYNC_HS_HIGH;
896		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
897			intel_dp->DP |= DP_SYNC_VS_HIGH;
898		intel_dp->DP |= DP_LINK_TRAIN_OFF;
899
900		if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
901			intel_dp->DP |= DP_ENHANCED_FRAMING;
902
903		if (intel_crtc->pipe == 1)
904			intel_dp->DP |= DP_PIPEB_SELECT;
905
906		if (port == PORT_A && !IS_VALLEYVIEW(dev)) {
907			/* don't miss out required setting for eDP */
908			if (adjusted_mode->clock < 200000)
909				intel_dp->DP |= DP_PLL_FREQ_160MHZ;
910			else
911				intel_dp->DP |= DP_PLL_FREQ_270MHZ;
912		}
913	} else {
914		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
915	}
916
917	if (port == PORT_A && !IS_VALLEYVIEW(dev))
918		ironlake_set_pll_edp(crtc, adjusted_mode->clock);
919}
920
921#define IDLE_ON_MASK		(PP_ON | 0 	  | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
922#define IDLE_ON_VALUE   	(PP_ON | 0 	  | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
923
924#define IDLE_OFF_MASK		(PP_ON | 0        | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
925#define IDLE_OFF_VALUE		(0     | 0        | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
926
927#define IDLE_CYCLE_MASK		(PP_ON | 0        | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
928#define IDLE_CYCLE_VALUE	(0     | 0        | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
929
930static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
931				       u32 mask,
932				       u32 value)
933{
934	struct drm_device *dev = intel_dp_to_dev(intel_dp);
935	struct drm_i915_private *dev_priv = dev->dev_private;
936	u32 pp_stat_reg, pp_ctrl_reg;
937
938	pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
939	pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
940
941	DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
942			mask, value,
943			I915_READ(pp_stat_reg),
944			I915_READ(pp_ctrl_reg));
945
946	if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
947		DRM_ERROR("Panel status timeout: status %08x control %08x\n",
948				I915_READ(pp_stat_reg),
949				I915_READ(pp_ctrl_reg));
950	}
951}
952
953static void ironlake_wait_panel_on(struct intel_dp *intel_dp)
954{
955	DRM_DEBUG_KMS("Wait for panel power on\n");
956	ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
957}
958
959static void ironlake_wait_panel_off(struct intel_dp *intel_dp)
960{
961	DRM_DEBUG_KMS("Wait for panel power off time\n");
962	ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
963}
964
965static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp)
966{
967	DRM_DEBUG_KMS("Wait for panel power cycle\n");
968	ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
969}
970
971
972/* Read the current pp_control value, unlocking the register if it
973 * is locked
974 */
975
976static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
977{
978	struct drm_device *dev = intel_dp_to_dev(intel_dp);
979	struct drm_i915_private *dev_priv = dev->dev_private;
980	u32 control;
981	u32 pp_ctrl_reg;
982
983	pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
984	control = I915_READ(pp_ctrl_reg);
985
986	control &= ~PANEL_UNLOCK_MASK;
987	control |= PANEL_UNLOCK_REGS;
988	return control;
989}
990
991void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
992{
993	struct drm_device *dev = intel_dp_to_dev(intel_dp);
994	struct drm_i915_private *dev_priv = dev->dev_private;
995	u32 pp;
996	u32 pp_stat_reg, pp_ctrl_reg;
997
998	if (!is_edp(intel_dp))
999		return;
1000	DRM_DEBUG_KMS("Turn eDP VDD on\n");
1001
1002	WARN(intel_dp->want_panel_vdd,
1003	     "eDP VDD already requested on\n");
1004
1005	intel_dp->want_panel_vdd = true;
1006
1007	if (ironlake_edp_have_panel_vdd(intel_dp)) {
1008		DRM_DEBUG_KMS("eDP VDD already on\n");
1009		return;
1010	}
1011
1012	if (!ironlake_edp_have_panel_power(intel_dp))
1013		ironlake_wait_panel_power_cycle(intel_dp);
1014
1015	pp = ironlake_get_pp_control(intel_dp);
1016	pp |= EDP_FORCE_VDD;
1017
1018	pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
1019	pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
1020
1021	I915_WRITE(pp_ctrl_reg, pp);
1022	POSTING_READ(pp_ctrl_reg);
1023	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1024			I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1025	/*
1026	 * If the panel wasn't on, delay before accessing aux channel
1027	 */
1028	if (!ironlake_edp_have_panel_power(intel_dp)) {
1029		DRM_DEBUG_KMS("eDP was not running\n");
1030		msleep(intel_dp->panel_power_up_delay);
1031	}
1032}
1033
1034static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
1035{
1036	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1037	struct drm_i915_private *dev_priv = dev->dev_private;
1038	u32 pp;
1039	u32 pp_stat_reg, pp_ctrl_reg;
1040
1041	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
1042
1043	if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
1044		pp = ironlake_get_pp_control(intel_dp);
1045		pp &= ~EDP_FORCE_VDD;
1046
1047		pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
1048		pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
1049
1050		I915_WRITE(pp_ctrl_reg, pp);
1051		POSTING_READ(pp_ctrl_reg);
1052
1053		/* Make sure sequencer is idle before allowing subsequent activity */
1054		DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1055		I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1056		msleep(intel_dp->panel_power_down_delay);
1057	}
1058}
1059
1060static void ironlake_panel_vdd_work(struct work_struct *__work)
1061{
1062	struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1063						 struct intel_dp, panel_vdd_work);
1064	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1065
1066	mutex_lock(&dev->mode_config.mutex);
1067	ironlake_panel_vdd_off_sync(intel_dp);
1068	mutex_unlock(&dev->mode_config.mutex);
1069}
1070
1071void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1072{
1073	if (!is_edp(intel_dp))
1074		return;
1075
1076	DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd);
1077	WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
1078
1079	intel_dp->want_panel_vdd = false;
1080
1081	if (sync) {
1082		ironlake_panel_vdd_off_sync(intel_dp);
1083	} else {
1084		/*
1085		 * Queue the timer to fire a long
1086		 * time from now (relative to the power down delay)
1087		 * to keep the panel power up across a sequence of operations
1088		 */
1089		schedule_delayed_work(&intel_dp->panel_vdd_work,
1090				      msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
1091	}
1092}
1093
1094void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1095{
1096	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1097	struct drm_i915_private *dev_priv = dev->dev_private;
1098	u32 pp;
1099	u32 pp_ctrl_reg;
1100
1101	if (!is_edp(intel_dp))
1102		return;
1103
1104	DRM_DEBUG_KMS("Turn eDP power on\n");
1105
1106	if (ironlake_edp_have_panel_power(intel_dp)) {
1107		DRM_DEBUG_KMS("eDP power already on\n");
1108		return;
1109	}
1110
1111	ironlake_wait_panel_power_cycle(intel_dp);
1112
1113	pp = ironlake_get_pp_control(intel_dp);
1114	if (IS_GEN5(dev)) {
1115		/* ILK workaround: disable reset around power sequence */
1116		pp &= ~PANEL_POWER_RESET;
1117		I915_WRITE(PCH_PP_CONTROL, pp);
1118		POSTING_READ(PCH_PP_CONTROL);
1119	}
1120
1121	pp |= POWER_TARGET_ON;
1122	if (!IS_GEN5(dev))
1123		pp |= PANEL_POWER_RESET;
1124
1125	pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
1126
1127	I915_WRITE(pp_ctrl_reg, pp);
1128	POSTING_READ(pp_ctrl_reg);
1129
1130	ironlake_wait_panel_on(intel_dp);
1131
1132	if (IS_GEN5(dev)) {
1133		pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1134		I915_WRITE(PCH_PP_CONTROL, pp);
1135		POSTING_READ(PCH_PP_CONTROL);
1136	}
1137}
1138
1139void ironlake_edp_panel_off(struct intel_dp *intel_dp)
1140{
1141	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1142	struct drm_i915_private *dev_priv = dev->dev_private;
1143	u32 pp;
1144	u32 pp_ctrl_reg;
1145
1146	if (!is_edp(intel_dp))
1147		return;
1148
1149	DRM_DEBUG_KMS("Turn eDP power off\n");
1150
1151	WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
1152
1153	pp = ironlake_get_pp_control(intel_dp);
1154	/* We need to switch off panel power _and_ force vdd, for otherwise some
1155	 * panels get very unhappy and cease to work. */
1156	pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
1157
1158	pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
1159
1160	I915_WRITE(pp_ctrl_reg, pp);
1161	POSTING_READ(pp_ctrl_reg);
1162
1163	intel_dp->want_panel_vdd = false;
1164
1165	ironlake_wait_panel_off(intel_dp);
1166}
1167
1168void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
1169{
1170	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1171	struct drm_device *dev = intel_dig_port->base.base.dev;
1172	struct drm_i915_private *dev_priv = dev->dev_private;
1173	int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe;
1174	u32 pp;
1175	u32 pp_ctrl_reg;
1176
1177	if (!is_edp(intel_dp))
1178		return;
1179
1180	DRM_DEBUG_KMS("\n");
1181	/*
1182	 * If we enable the backlight right away following a panel power
1183	 * on, we may see slight flicker as the panel syncs with the eDP
1184	 * link.  So delay a bit to make sure the image is solid before
1185	 * allowing it to appear.
1186	 */
1187	msleep(intel_dp->backlight_on_delay);
1188	pp = ironlake_get_pp_control(intel_dp);
1189	pp |= EDP_BLC_ENABLE;
1190
1191	pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
1192
1193	I915_WRITE(pp_ctrl_reg, pp);
1194	POSTING_READ(pp_ctrl_reg);
1195
1196	intel_panel_enable_backlight(dev, pipe);
1197}
1198
1199void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
1200{
1201	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1202	struct drm_i915_private *dev_priv = dev->dev_private;
1203	u32 pp;
1204	u32 pp_ctrl_reg;
1205
1206	if (!is_edp(intel_dp))
1207		return;
1208
1209	intel_panel_disable_backlight(dev);
1210
1211	DRM_DEBUG_KMS("\n");
1212	pp = ironlake_get_pp_control(intel_dp);
1213	pp &= ~EDP_BLC_ENABLE;
1214
1215	pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
1216
1217	I915_WRITE(pp_ctrl_reg, pp);
1218	POSTING_READ(pp_ctrl_reg);
1219	msleep(intel_dp->backlight_off_delay);
1220}
1221
1222static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
1223{
1224	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1225	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1226	struct drm_device *dev = crtc->dev;
1227	struct drm_i915_private *dev_priv = dev->dev_private;
1228	u32 dpa_ctl;
1229
1230	assert_pipe_disabled(dev_priv,
1231			     to_intel_crtc(crtc)->pipe);
1232
1233	DRM_DEBUG_KMS("\n");
1234	dpa_ctl = I915_READ(DP_A);
1235	WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
1236	WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1237
1238	/* We don't adjust intel_dp->DP while tearing down the link, to
1239	 * facilitate link retraining (e.g. after hotplug). Hence clear all
1240	 * enable bits here to ensure that we don't enable too much. */
1241	intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
1242	intel_dp->DP |= DP_PLL_ENABLE;
1243	I915_WRITE(DP_A, intel_dp->DP);
1244	POSTING_READ(DP_A);
1245	udelay(200);
1246}
1247
1248static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
1249{
1250	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1251	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1252	struct drm_device *dev = crtc->dev;
1253	struct drm_i915_private *dev_priv = dev->dev_private;
1254	u32 dpa_ctl;
1255
1256	assert_pipe_disabled(dev_priv,
1257			     to_intel_crtc(crtc)->pipe);
1258
1259	dpa_ctl = I915_READ(DP_A);
1260	WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
1261	     "dp pll off, should be on\n");
1262	WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1263
1264	/* We can't rely on the value tracked for the DP register in
1265	 * intel_dp->DP because link_down must not change that (otherwise link
1266	 * re-training will fail. */
1267	dpa_ctl &= ~DP_PLL_ENABLE;
1268	I915_WRITE(DP_A, dpa_ctl);
1269	POSTING_READ(DP_A);
1270	udelay(200);
1271}
1272
1273/* If the sink supports it, try to set the power state appropriately */
1274void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
1275{
1276	int ret, i;
1277
1278	/* Should have a valid DPCD by this point */
1279	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
1280		return;
1281
1282	if (mode != DRM_MODE_DPMS_ON) {
1283		ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER,
1284						  DP_SET_POWER_D3);
1285		if (ret != 1)
1286			DRM_DEBUG_DRIVER("failed to write sink power state\n");
1287	} else {
1288		/*
1289		 * When turning on, we need to retry for 1ms to give the sink
1290		 * time to wake up.
1291		 */
1292		for (i = 0; i < 3; i++) {
1293			ret = intel_dp_aux_native_write_1(intel_dp,
1294							  DP_SET_POWER,
1295							  DP_SET_POWER_D0);
1296			if (ret == 1)
1297				break;
1298			msleep(1);
1299		}
1300	}
1301}
1302
1303static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
1304				  enum pipe *pipe)
1305{
1306	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1307	enum port port = dp_to_dig_port(intel_dp)->port;
1308	struct drm_device *dev = encoder->base.dev;
1309	struct drm_i915_private *dev_priv = dev->dev_private;
1310	u32 tmp = I915_READ(intel_dp->output_reg);
1311
1312	if (!(tmp & DP_PORT_EN))
1313		return false;
1314
1315	if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1316		*pipe = PORT_TO_PIPE_CPT(tmp);
1317	} else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
1318		*pipe = PORT_TO_PIPE(tmp);
1319	} else {
1320		u32 trans_sel;
1321		u32 trans_dp;
1322		int i;
1323
1324		switch (intel_dp->output_reg) {
1325		case PCH_DP_B:
1326			trans_sel = TRANS_DP_PORT_SEL_B;
1327			break;
1328		case PCH_DP_C:
1329			trans_sel = TRANS_DP_PORT_SEL_C;
1330			break;
1331		case PCH_DP_D:
1332			trans_sel = TRANS_DP_PORT_SEL_D;
1333			break;
1334		default:
1335			return true;
1336		}
1337
1338		for_each_pipe(i) {
1339			trans_dp = I915_READ(TRANS_DP_CTL(i));
1340			if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
1341				*pipe = i;
1342				return true;
1343			}
1344		}
1345
1346		DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
1347			      intel_dp->output_reg);
1348	}
1349
1350	return true;
1351}
1352
1353static void intel_dp_get_config(struct intel_encoder *encoder,
1354				struct intel_crtc_config *pipe_config)
1355{
1356	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1357	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
1358	u32 tmp, flags = 0;
1359
1360	tmp = I915_READ(intel_dp->output_reg);
1361
1362	if (tmp & DP_SYNC_HS_HIGH)
1363		flags |= DRM_MODE_FLAG_PHSYNC;
1364	else
1365		flags |= DRM_MODE_FLAG_NHSYNC;
1366
1367	if (tmp & DP_SYNC_VS_HIGH)
1368		flags |= DRM_MODE_FLAG_PVSYNC;
1369	else
1370		flags |= DRM_MODE_FLAG_NVSYNC;
1371
1372	pipe_config->adjusted_mode.flags |= flags;
1373}
1374
1375static void intel_disable_dp(struct intel_encoder *encoder)
1376{
1377	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1378	enum port port = dp_to_dig_port(intel_dp)->port;
1379	struct drm_device *dev = encoder->base.dev;
1380
1381	/* Make sure the panel is off before trying to change the mode. But also
1382	 * ensure that we have vdd while we switch off the panel. */
1383	ironlake_edp_panel_vdd_on(intel_dp);
1384	ironlake_edp_backlight_off(intel_dp);
1385	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1386	ironlake_edp_panel_off(intel_dp);
1387
1388	/* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
1389	if (!(port == PORT_A || IS_VALLEYVIEW(dev)))
1390		intel_dp_link_down(intel_dp);
1391}
1392
1393static void intel_post_disable_dp(struct intel_encoder *encoder)
1394{
1395	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1396	enum port port = dp_to_dig_port(intel_dp)->port;
1397	struct drm_device *dev = encoder->base.dev;
1398
1399	if (port == PORT_A || IS_VALLEYVIEW(dev)) {
1400		intel_dp_link_down(intel_dp);
1401		if (!IS_VALLEYVIEW(dev))
1402			ironlake_edp_pll_off(intel_dp);
1403	}
1404}
1405
1406static void intel_enable_dp(struct intel_encoder *encoder)
1407{
1408	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1409	struct drm_device *dev = encoder->base.dev;
1410	struct drm_i915_private *dev_priv = dev->dev_private;
1411	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
1412
1413	if (WARN_ON(dp_reg & DP_PORT_EN))
1414		return;
1415
1416	ironlake_edp_panel_vdd_on(intel_dp);
1417	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1418	intel_dp_start_link_train(intel_dp);
1419	ironlake_edp_panel_on(intel_dp);
1420	ironlake_edp_panel_vdd_off(intel_dp, true);
1421	intel_dp_complete_link_train(intel_dp);
1422	intel_dp_stop_link_train(intel_dp);
1423	ironlake_edp_backlight_on(intel_dp);
1424
1425	if (IS_VALLEYVIEW(dev)) {
1426		struct intel_digital_port *dport =
1427			enc_to_dig_port(&encoder->base);
1428		int channel = vlv_dport_to_channel(dport);
1429
1430		vlv_wait_port_ready(dev_priv, channel);
1431	}
1432}
1433
1434static void intel_pre_enable_dp(struct intel_encoder *encoder)
1435{
1436	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1437	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
1438	struct drm_device *dev = encoder->base.dev;
1439	struct drm_i915_private *dev_priv = dev->dev_private;
1440
1441	if (dport->port == PORT_A && !IS_VALLEYVIEW(dev))
1442		ironlake_edp_pll_on(intel_dp);
1443
1444	if (IS_VALLEYVIEW(dev)) {
1445		struct intel_crtc *intel_crtc =
1446			to_intel_crtc(encoder->base.crtc);
1447		int port = vlv_dport_to_channel(dport);
1448		int pipe = intel_crtc->pipe;
1449		u32 val;
1450
1451		val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
1452		val = 0;
1453		if (pipe)
1454			val |= (1<<21);
1455		else
1456			val &= ~(1<<21);
1457		val |= 0x001000c4;
1458		vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val);
1459
1460		vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port),
1461				 0x00760018);
1462		vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port),
1463				 0x00400888);
1464	}
1465}
1466
1467static void intel_dp_pre_pll_enable(struct intel_encoder *encoder)
1468{
1469	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1470	struct drm_device *dev = encoder->base.dev;
1471	struct drm_i915_private *dev_priv = dev->dev_private;
1472	int port = vlv_dport_to_channel(dport);
1473
1474	if (!IS_VALLEYVIEW(dev))
1475		return;
1476
1477	/* Program Tx lane resets to default */
1478	vlv_dpio_write(dev_priv, DPIO_PCS_TX(port),
1479			 DPIO_PCS_TX_LANE2_RESET |
1480			 DPIO_PCS_TX_LANE1_RESET);
1481	vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port),
1482			 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
1483			 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
1484			 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
1485				 DPIO_PCS_CLK_SOFT_RESET);
1486
1487	/* Fix up inter-pair skew failure */
1488	vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00);
1489	vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500);
1490	vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000);
1491}
1492
1493/*
1494 * Native read with retry for link status and receiver capability reads for
1495 * cases where the sink may still be asleep.
1496 */
1497static bool
1498intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
1499			       uint8_t *recv, int recv_bytes)
1500{
1501	int ret, i;
1502
1503	/*
1504	 * Sinks are *supposed* to come up within 1ms from an off state,
1505	 * but we're also supposed to retry 3 times per the spec.
1506	 */
1507	for (i = 0; i < 3; i++) {
1508		ret = intel_dp_aux_native_read(intel_dp, address, recv,
1509					       recv_bytes);
1510		if (ret == recv_bytes)
1511			return true;
1512		msleep(1);
1513	}
1514
1515	return false;
1516}
1517
1518/*
1519 * Fetch AUX CH registers 0x202 - 0x207 which contain
1520 * link status information
1521 */
1522static bool
1523intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
1524{
1525	return intel_dp_aux_native_read_retry(intel_dp,
1526					      DP_LANE0_1_STATUS,
1527					      link_status,
1528					      DP_LINK_STATUS_SIZE);
1529}
1530
1531#if 0
1532static char	*voltage_names[] = {
1533	"0.4V", "0.6V", "0.8V", "1.2V"
1534};
1535static char	*pre_emph_names[] = {
1536	"0dB", "3.5dB", "6dB", "9.5dB"
1537};
1538static char	*link_train_names[] = {
1539	"pattern 1", "pattern 2", "idle", "off"
1540};
1541#endif
1542
1543/*
1544 * These are source-specific values; current Intel hardware supports
1545 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
1546 */
1547
1548static uint8_t
1549intel_dp_voltage_max(struct intel_dp *intel_dp)
1550{
1551	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1552	enum port port = dp_to_dig_port(intel_dp)->port;
1553
1554	if (IS_VALLEYVIEW(dev))
1555		return DP_TRAIN_VOLTAGE_SWING_1200;
1556	else if (IS_GEN7(dev) && port == PORT_A)
1557		return DP_TRAIN_VOLTAGE_SWING_800;
1558	else if (HAS_PCH_CPT(dev) && port != PORT_A)
1559		return DP_TRAIN_VOLTAGE_SWING_1200;
1560	else
1561		return DP_TRAIN_VOLTAGE_SWING_800;
1562}
1563
1564static uint8_t
1565intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
1566{
1567	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1568	enum port port = dp_to_dig_port(intel_dp)->port;
1569
1570	if (HAS_DDI(dev)) {
1571		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1572		case DP_TRAIN_VOLTAGE_SWING_400:
1573			return DP_TRAIN_PRE_EMPHASIS_9_5;
1574		case DP_TRAIN_VOLTAGE_SWING_600:
1575			return DP_TRAIN_PRE_EMPHASIS_6;
1576		case DP_TRAIN_VOLTAGE_SWING_800:
1577			return DP_TRAIN_PRE_EMPHASIS_3_5;
1578		case DP_TRAIN_VOLTAGE_SWING_1200:
1579		default:
1580			return DP_TRAIN_PRE_EMPHASIS_0;
1581		}
1582	} else if (IS_VALLEYVIEW(dev)) {
1583		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1584		case DP_TRAIN_VOLTAGE_SWING_400:
1585			return DP_TRAIN_PRE_EMPHASIS_9_5;
1586		case DP_TRAIN_VOLTAGE_SWING_600:
1587			return DP_TRAIN_PRE_EMPHASIS_6;
1588		case DP_TRAIN_VOLTAGE_SWING_800:
1589			return DP_TRAIN_PRE_EMPHASIS_3_5;
1590		case DP_TRAIN_VOLTAGE_SWING_1200:
1591		default:
1592			return DP_TRAIN_PRE_EMPHASIS_0;
1593		}
1594	} else if (IS_GEN7(dev) && port == PORT_A) {
1595		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1596		case DP_TRAIN_VOLTAGE_SWING_400:
1597			return DP_TRAIN_PRE_EMPHASIS_6;
1598		case DP_TRAIN_VOLTAGE_SWING_600:
1599		case DP_TRAIN_VOLTAGE_SWING_800:
1600			return DP_TRAIN_PRE_EMPHASIS_3_5;
1601		default:
1602			return DP_TRAIN_PRE_EMPHASIS_0;
1603		}
1604	} else {
1605		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1606		case DP_TRAIN_VOLTAGE_SWING_400:
1607			return DP_TRAIN_PRE_EMPHASIS_6;
1608		case DP_TRAIN_VOLTAGE_SWING_600:
1609			return DP_TRAIN_PRE_EMPHASIS_6;
1610		case DP_TRAIN_VOLTAGE_SWING_800:
1611			return DP_TRAIN_PRE_EMPHASIS_3_5;
1612		case DP_TRAIN_VOLTAGE_SWING_1200:
1613		default:
1614			return DP_TRAIN_PRE_EMPHASIS_0;
1615		}
1616	}
1617}
1618
1619static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
1620{
1621	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1622	struct drm_i915_private *dev_priv = dev->dev_private;
1623	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
1624	unsigned long demph_reg_value, preemph_reg_value,
1625		uniqtranscale_reg_value;
1626	uint8_t train_set = intel_dp->train_set[0];
1627	int port = vlv_dport_to_channel(dport);
1628
1629	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
1630	case DP_TRAIN_PRE_EMPHASIS_0:
1631		preemph_reg_value = 0x0004000;
1632		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
1633		case DP_TRAIN_VOLTAGE_SWING_400:
1634			demph_reg_value = 0x2B405555;
1635			uniqtranscale_reg_value = 0x552AB83A;
1636			break;
1637		case DP_TRAIN_VOLTAGE_SWING_600:
1638			demph_reg_value = 0x2B404040;
1639			uniqtranscale_reg_value = 0x5548B83A;
1640			break;
1641		case DP_TRAIN_VOLTAGE_SWING_800:
1642			demph_reg_value = 0x2B245555;
1643			uniqtranscale_reg_value = 0x5560B83A;
1644			break;
1645		case DP_TRAIN_VOLTAGE_SWING_1200:
1646			demph_reg_value = 0x2B405555;
1647			uniqtranscale_reg_value = 0x5598DA3A;
1648			break;
1649		default:
1650			return 0;
1651		}
1652		break;
1653	case DP_TRAIN_PRE_EMPHASIS_3_5:
1654		preemph_reg_value = 0x0002000;
1655		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
1656		case DP_TRAIN_VOLTAGE_SWING_400:
1657			demph_reg_value = 0x2B404040;
1658			uniqtranscale_reg_value = 0x5552B83A;
1659			break;
1660		case DP_TRAIN_VOLTAGE_SWING_600:
1661			demph_reg_value = 0x2B404848;
1662			uniqtranscale_reg_value = 0x5580B83A;
1663			break;
1664		case DP_TRAIN_VOLTAGE_SWING_800:
1665			demph_reg_value = 0x2B404040;
1666			uniqtranscale_reg_value = 0x55ADDA3A;
1667			break;
1668		default:
1669			return 0;
1670		}
1671		break;
1672	case DP_TRAIN_PRE_EMPHASIS_6:
1673		preemph_reg_value = 0x0000000;
1674		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
1675		case DP_TRAIN_VOLTAGE_SWING_400:
1676			demph_reg_value = 0x2B305555;
1677			uniqtranscale_reg_value = 0x5570B83A;
1678			break;
1679		case DP_TRAIN_VOLTAGE_SWING_600:
1680			demph_reg_value = 0x2B2B4040;
1681			uniqtranscale_reg_value = 0x55ADDA3A;
1682			break;
1683		default:
1684			return 0;
1685		}
1686		break;
1687	case DP_TRAIN_PRE_EMPHASIS_9_5:
1688		preemph_reg_value = 0x0006000;
1689		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
1690		case DP_TRAIN_VOLTAGE_SWING_400:
1691			demph_reg_value = 0x1B405555;
1692			uniqtranscale_reg_value = 0x55ADDA3A;
1693			break;
1694		default:
1695			return 0;
1696		}
1697		break;
1698	default:
1699		return 0;
1700	}
1701
1702	vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x00000000);
1703	vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port), demph_reg_value);
1704	vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port),
1705			 uniqtranscale_reg_value);
1706	vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL3(port), 0x0C782040);
1707	vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000);
1708	vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port), preemph_reg_value);
1709	vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x80000000);
1710
1711	return 0;
1712}
1713
1714static void
1715intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
1716{
1717	uint8_t v = 0;
1718	uint8_t p = 0;
1719	int lane;
1720	uint8_t voltage_max;
1721	uint8_t preemph_max;
1722
1723	for (lane = 0; lane < intel_dp->lane_count; lane++) {
1724		uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
1725		uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
1726
1727		if (this_v > v)
1728			v = this_v;
1729		if (this_p > p)
1730			p = this_p;
1731	}
1732
1733	voltage_max = intel_dp_voltage_max(intel_dp);
1734	if (v >= voltage_max)
1735		v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
1736
1737	preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
1738	if (p >= preemph_max)
1739		p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
1740
1741	for (lane = 0; lane < 4; lane++)
1742		intel_dp->train_set[lane] = v | p;
1743}
1744
1745static uint32_t
1746intel_gen4_signal_levels(uint8_t train_set)
1747{
1748	uint32_t	signal_levels = 0;
1749
1750	switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
1751	case DP_TRAIN_VOLTAGE_SWING_400:
1752	default:
1753		signal_levels |= DP_VOLTAGE_0_4;
1754		break;
1755	case DP_TRAIN_VOLTAGE_SWING_600:
1756		signal_levels |= DP_VOLTAGE_0_6;
1757		break;
1758	case DP_TRAIN_VOLTAGE_SWING_800:
1759		signal_levels |= DP_VOLTAGE_0_8;
1760		break;
1761	case DP_TRAIN_VOLTAGE_SWING_1200:
1762		signal_levels |= DP_VOLTAGE_1_2;
1763		break;
1764	}
1765	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
1766	case DP_TRAIN_PRE_EMPHASIS_0:
1767	default:
1768		signal_levels |= DP_PRE_EMPHASIS_0;
1769		break;
1770	case DP_TRAIN_PRE_EMPHASIS_3_5:
1771		signal_levels |= DP_PRE_EMPHASIS_3_5;
1772		break;
1773	case DP_TRAIN_PRE_EMPHASIS_6:
1774		signal_levels |= DP_PRE_EMPHASIS_6;
1775		break;
1776	case DP_TRAIN_PRE_EMPHASIS_9_5:
1777		signal_levels |= DP_PRE_EMPHASIS_9_5;
1778		break;
1779	}
1780	return signal_levels;
1781}
1782
1783/* Gen6's DP voltage swing and pre-emphasis control */
1784static uint32_t
1785intel_gen6_edp_signal_levels(uint8_t train_set)
1786{
1787	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
1788					 DP_TRAIN_PRE_EMPHASIS_MASK);
1789	switch (signal_levels) {
1790	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
1791	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
1792		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
1793	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
1794		return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
1795	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
1796	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
1797		return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
1798	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
1799	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
1800		return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
1801	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
1802	case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
1803		return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
1804	default:
1805		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
1806			      "0x%x\n", signal_levels);
1807		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
1808	}
1809}
1810
1811/* Gen7's DP voltage swing and pre-emphasis control */
1812static uint32_t
1813intel_gen7_edp_signal_levels(uint8_t train_set)
1814{
1815	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
1816					 DP_TRAIN_PRE_EMPHASIS_MASK);
1817	switch (signal_levels) {
1818	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
1819		return EDP_LINK_TRAIN_400MV_0DB_IVB;
1820	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
1821		return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
1822	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
1823		return EDP_LINK_TRAIN_400MV_6DB_IVB;
1824
1825	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
1826		return EDP_LINK_TRAIN_600MV_0DB_IVB;
1827	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
1828		return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
1829
1830	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
1831		return EDP_LINK_TRAIN_800MV_0DB_IVB;
1832	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
1833		return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
1834
1835	default:
1836		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
1837			      "0x%x\n", signal_levels);
1838		return EDP_LINK_TRAIN_500MV_0DB_IVB;
1839	}
1840}
1841
1842/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
1843static uint32_t
1844intel_hsw_signal_levels(uint8_t train_set)
1845{
1846	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
1847					 DP_TRAIN_PRE_EMPHASIS_MASK);
1848	switch (signal_levels) {
1849	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
1850		return DDI_BUF_EMP_400MV_0DB_HSW;
1851	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
1852		return DDI_BUF_EMP_400MV_3_5DB_HSW;
1853	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
1854		return DDI_BUF_EMP_400MV_6DB_HSW;
1855	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5:
1856		return DDI_BUF_EMP_400MV_9_5DB_HSW;
1857
1858	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
1859		return DDI_BUF_EMP_600MV_0DB_HSW;
1860	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
1861		return DDI_BUF_EMP_600MV_3_5DB_HSW;
1862	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
1863		return DDI_BUF_EMP_600MV_6DB_HSW;
1864
1865	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
1866		return DDI_BUF_EMP_800MV_0DB_HSW;
1867	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
1868		return DDI_BUF_EMP_800MV_3_5DB_HSW;
1869	default:
1870		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
1871			      "0x%x\n", signal_levels);
1872		return DDI_BUF_EMP_400MV_0DB_HSW;
1873	}
1874}
1875
1876/* Properly updates "DP" with the correct signal levels. */
1877static void
1878intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
1879{
1880	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1881	enum port port = intel_dig_port->port;
1882	struct drm_device *dev = intel_dig_port->base.base.dev;
1883	uint32_t signal_levels, mask;
1884	uint8_t train_set = intel_dp->train_set[0];
1885
1886	if (HAS_DDI(dev)) {
1887		signal_levels = intel_hsw_signal_levels(train_set);
1888		mask = DDI_BUF_EMP_MASK;
1889	} else if (IS_VALLEYVIEW(dev)) {
1890		signal_levels = intel_vlv_signal_levels(intel_dp);
1891		mask = 0;
1892	} else if (IS_GEN7(dev) && port == PORT_A) {
1893		signal_levels = intel_gen7_edp_signal_levels(train_set);
1894		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
1895	} else if (IS_GEN6(dev) && port == PORT_A) {
1896		signal_levels = intel_gen6_edp_signal_levels(train_set);
1897		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
1898	} else {
1899		signal_levels = intel_gen4_signal_levels(train_set);
1900		mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
1901	}
1902
1903	DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
1904
1905	*DP = (*DP & ~mask) | signal_levels;
1906}
1907
1908static bool
1909intel_dp_set_link_train(struct intel_dp *intel_dp,
1910			uint32_t dp_reg_value,
1911			uint8_t dp_train_pat)
1912{
1913	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1914	struct drm_device *dev = intel_dig_port->base.base.dev;
1915	struct drm_i915_private *dev_priv = dev->dev_private;
1916	enum port port = intel_dig_port->port;
1917	int ret;
1918
1919	if (HAS_DDI(dev)) {
1920		uint32_t temp = I915_READ(DP_TP_CTL(port));
1921
1922		if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
1923			temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
1924		else
1925			temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
1926
1927		temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
1928		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
1929		case DP_TRAINING_PATTERN_DISABLE:
1930			temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
1931
1932			break;
1933		case DP_TRAINING_PATTERN_1:
1934			temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
1935			break;
1936		case DP_TRAINING_PATTERN_2:
1937			temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
1938			break;
1939		case DP_TRAINING_PATTERN_3:
1940			temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
1941			break;
1942		}
1943		I915_WRITE(DP_TP_CTL(port), temp);
1944
1945	} else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
1946		dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
1947
1948		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
1949		case DP_TRAINING_PATTERN_DISABLE:
1950			dp_reg_value |= DP_LINK_TRAIN_OFF_CPT;
1951			break;
1952		case DP_TRAINING_PATTERN_1:
1953			dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT;
1954			break;
1955		case DP_TRAINING_PATTERN_2:
1956			dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
1957			break;
1958		case DP_TRAINING_PATTERN_3:
1959			DRM_ERROR("DP training pattern 3 not supported\n");
1960			dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
1961			break;
1962		}
1963
1964	} else {
1965		dp_reg_value &= ~DP_LINK_TRAIN_MASK;
1966
1967		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
1968		case DP_TRAINING_PATTERN_DISABLE:
1969			dp_reg_value |= DP_LINK_TRAIN_OFF;
1970			break;
1971		case DP_TRAINING_PATTERN_1:
1972			dp_reg_value |= DP_LINK_TRAIN_PAT_1;
1973			break;
1974		case DP_TRAINING_PATTERN_2:
1975			dp_reg_value |= DP_LINK_TRAIN_PAT_2;
1976			break;
1977		case DP_TRAINING_PATTERN_3:
1978			DRM_ERROR("DP training pattern 3 not supported\n");
1979			dp_reg_value |= DP_LINK_TRAIN_PAT_2;
1980			break;
1981		}
1982	}
1983
1984	I915_WRITE(intel_dp->output_reg, dp_reg_value);
1985	POSTING_READ(intel_dp->output_reg);
1986
1987	intel_dp_aux_native_write_1(intel_dp,
1988				    DP_TRAINING_PATTERN_SET,
1989				    dp_train_pat);
1990
1991	if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) !=
1992	    DP_TRAINING_PATTERN_DISABLE) {
1993		ret = intel_dp_aux_native_write(intel_dp,
1994						DP_TRAINING_LANE0_SET,
1995						intel_dp->train_set,
1996						intel_dp->lane_count);
1997		if (ret != intel_dp->lane_count)
1998			return false;
1999	}
2000
2001	return true;
2002}
2003
2004static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
2005{
2006	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2007	struct drm_device *dev = intel_dig_port->base.base.dev;
2008	struct drm_i915_private *dev_priv = dev->dev_private;
2009	enum port port = intel_dig_port->port;
2010	uint32_t val;
2011
2012	if (!HAS_DDI(dev))
2013		return;
2014
2015	val = I915_READ(DP_TP_CTL(port));
2016	val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2017	val |= DP_TP_CTL_LINK_TRAIN_IDLE;
2018	I915_WRITE(DP_TP_CTL(port), val);
2019
2020	/*
2021	 * On PORT_A we can have only eDP in SST mode. There the only reason
2022	 * we need to set idle transmission mode is to work around a HW issue
2023	 * where we enable the pipe while not in idle link-training mode.
2024	 * In this case there is requirement to wait for a minimum number of
2025	 * idle patterns to be sent.
2026	 */
2027	if (port == PORT_A)
2028		return;
2029
2030	if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
2031		     1))
2032		DRM_ERROR("Timed out waiting for DP idle patterns\n");
2033}
2034
2035/* Enable corresponding port and start training pattern 1 */
2036void
2037intel_dp_start_link_train(struct intel_dp *intel_dp)
2038{
2039	struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
2040	struct drm_device *dev = encoder->dev;
2041	int i;
2042	uint8_t voltage;
2043	bool clock_recovery = false;
2044	int voltage_tries, loop_tries;
2045	uint32_t DP = intel_dp->DP;
2046
2047	if (HAS_DDI(dev))
2048		intel_ddi_prepare_link_retrain(encoder);
2049
2050	/* Write the link configuration data */
2051	intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
2052				  intel_dp->link_configuration,
2053				  DP_LINK_CONFIGURATION_SIZE);
2054
2055	DP |= DP_PORT_EN;
2056
2057	memset(intel_dp->train_set, 0, 4);
2058	voltage = 0xff;
2059	voltage_tries = 0;
2060	loop_tries = 0;
2061	clock_recovery = false;
2062	for (;;) {
2063		/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
2064		uint8_t	    link_status[DP_LINK_STATUS_SIZE];
2065
2066		intel_dp_set_signal_levels(intel_dp, &DP);
2067
2068		/* Set training pattern 1 */
2069		if (!intel_dp_set_link_train(intel_dp, DP,
2070					     DP_TRAINING_PATTERN_1 |
2071					     DP_LINK_SCRAMBLING_DISABLE))
2072			break;
2073
2074		drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
2075		if (!intel_dp_get_link_status(intel_dp, link_status)) {
2076			DRM_ERROR("failed to get link status\n");
2077			break;
2078		}
2079
2080		if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
2081			DRM_DEBUG_KMS("clock recovery OK\n");
2082			clock_recovery = true;
2083			break;
2084		}
2085
2086		/* Check to see if we've tried the max voltage */
2087		for (i = 0; i < intel_dp->lane_count; i++)
2088			if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
2089				break;
2090		if (i == intel_dp->lane_count) {
2091			++loop_tries;
2092			if (loop_tries == 5) {
2093				DRM_DEBUG_KMS("too many full retries, give up\n");
2094				break;
2095			}
2096			memset(intel_dp->train_set, 0, 4);
2097			voltage_tries = 0;
2098			continue;
2099		}
2100
2101		/* Check to see if we've tried the same voltage 5 times */
2102		if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
2103			++voltage_tries;
2104			if (voltage_tries == 5) {
2105				DRM_DEBUG_KMS("too many voltage retries, give up\n");
2106				break;
2107			}
2108		} else
2109			voltage_tries = 0;
2110		voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
2111
2112		/* Compute new intel_dp->train_set as requested by target */
2113		intel_get_adjust_train(intel_dp, link_status);
2114	}
2115
2116	intel_dp->DP = DP;
2117}
2118
2119void
2120intel_dp_complete_link_train(struct intel_dp *intel_dp)
2121{
2122	bool channel_eq = false;
2123	int tries, cr_tries;
2124	uint32_t DP = intel_dp->DP;
2125
2126	/* channel equalization */
2127	tries = 0;
2128	cr_tries = 0;
2129	channel_eq = false;
2130	for (;;) {
2131		uint8_t	    link_status[DP_LINK_STATUS_SIZE];
2132
2133		if (cr_tries > 5) {
2134			DRM_ERROR("failed to train DP, aborting\n");
2135			intel_dp_link_down(intel_dp);
2136			break;
2137		}
2138
2139		intel_dp_set_signal_levels(intel_dp, &DP);
2140
2141		/* channel eq pattern */
2142		if (!intel_dp_set_link_train(intel_dp, DP,
2143					     DP_TRAINING_PATTERN_2 |
2144					     DP_LINK_SCRAMBLING_DISABLE))
2145			break;
2146
2147		drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
2148		if (!intel_dp_get_link_status(intel_dp, link_status))
2149			break;
2150
2151		/* Make sure clock is still ok */
2152		if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
2153			intel_dp_start_link_train(intel_dp);
2154			cr_tries++;
2155			continue;
2156		}
2157
2158		if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
2159			channel_eq = true;
2160			break;
2161		}
2162
2163		/* Try 5 times, then try clock recovery if that fails */
2164		if (tries > 5) {
2165			intel_dp_link_down(intel_dp);
2166			intel_dp_start_link_train(intel_dp);
2167			tries = 0;
2168			cr_tries++;
2169			continue;
2170		}
2171
2172		/* Compute new intel_dp->train_set as requested by target */
2173		intel_get_adjust_train(intel_dp, link_status);
2174		++tries;
2175	}
2176
2177	intel_dp_set_idle_link_train(intel_dp);
2178
2179	intel_dp->DP = DP;
2180
2181	if (channel_eq)
2182		DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
2183
2184}
2185
2186void intel_dp_stop_link_train(struct intel_dp *intel_dp)
2187{
2188	intel_dp_set_link_train(intel_dp, intel_dp->DP,
2189				DP_TRAINING_PATTERN_DISABLE);
2190}
2191
2192static void
2193intel_dp_link_down(struct intel_dp *intel_dp)
2194{
2195	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2196	enum port port = intel_dig_port->port;
2197	struct drm_device *dev = intel_dig_port->base.base.dev;
2198	struct drm_i915_private *dev_priv = dev->dev_private;
2199	struct intel_crtc *intel_crtc =
2200		to_intel_crtc(intel_dig_port->base.base.crtc);
2201	uint32_t DP = intel_dp->DP;
2202
2203	/*
2204	 * DDI code has a strict mode set sequence and we should try to respect
2205	 * it, otherwise we might hang the machine in many different ways. So we
2206	 * really should be disabling the port only on a complete crtc_disable
2207	 * sequence. This function is just called under two conditions on DDI
2208	 * code:
2209	 * - Link train failed while doing crtc_enable, and on this case we
2210	 *   really should respect the mode set sequence and wait for a
2211	 *   crtc_disable.
2212	 * - Someone turned the monitor off and intel_dp_check_link_status
2213	 *   called us. We don't need to disable the whole port on this case, so
2214	 *   when someone turns the monitor on again,
2215	 *   intel_ddi_prepare_link_retrain will take care of redoing the link
2216	 *   train.
2217	 */
2218	if (HAS_DDI(dev))
2219		return;
2220
2221	if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
2222		return;
2223
2224	DRM_DEBUG_KMS("\n");
2225
2226	if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2227		DP &= ~DP_LINK_TRAIN_MASK_CPT;
2228		I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
2229	} else {
2230		DP &= ~DP_LINK_TRAIN_MASK;
2231		I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
2232	}
2233	POSTING_READ(intel_dp->output_reg);
2234
2235	/* We don't really know why we're doing this */
2236	intel_wait_for_vblank(dev, intel_crtc->pipe);
2237
2238	if (HAS_PCH_IBX(dev) &&
2239	    I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
2240		struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2241
2242		/* Hardware workaround: leaving our transcoder select
2243		 * set to transcoder B while it's off will prevent the
2244		 * corresponding HDMI output on transcoder A.
2245		 *
2246		 * Combine this with another hardware workaround:
2247		 * transcoder select bit can only be cleared while the
2248		 * port is enabled.
2249		 */
2250		DP &= ~DP_PIPEB_SELECT;
2251		I915_WRITE(intel_dp->output_reg, DP);
2252
2253		/* Changes to enable or select take place the vblank
2254		 * after being written.
2255		 */
2256		if (WARN_ON(crtc == NULL)) {
2257			/* We should never try to disable a port without a crtc
2258			 * attached. For paranoia keep the code around for a
2259			 * bit. */
2260			POSTING_READ(intel_dp->output_reg);
2261			msleep(50);
2262		} else
2263			intel_wait_for_vblank(dev, intel_crtc->pipe);
2264	}
2265
2266	DP &= ~DP_AUDIO_OUTPUT_ENABLE;
2267	I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
2268	POSTING_READ(intel_dp->output_reg);
2269	msleep(intel_dp->panel_power_down_delay);
2270}
2271
2272static bool
2273intel_dp_get_dpcd(struct intel_dp *intel_dp)
2274{
2275	char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
2276
2277	if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
2278					   sizeof(intel_dp->dpcd)) == 0)
2279		return false; /* aux transfer failed */
2280
2281	hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
2282			   32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
2283	DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
2284
2285	if (intel_dp->dpcd[DP_DPCD_REV] == 0)
2286		return false; /* DPCD not present */
2287
2288	if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
2289	      DP_DWN_STRM_PORT_PRESENT))
2290		return true; /* native DP sink */
2291
2292	if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
2293		return true; /* no per-port downstream info */
2294
2295	if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0,
2296					   intel_dp->downstream_ports,
2297					   DP_MAX_DOWNSTREAM_PORTS) == 0)
2298		return false; /* downstream port status fetch failed */
2299
2300	return true;
2301}
2302
2303static void
2304intel_dp_probe_oui(struct intel_dp *intel_dp)
2305{
2306	u8 buf[3];
2307
2308	if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
2309		return;
2310
2311	ironlake_edp_panel_vdd_on(intel_dp);
2312
2313	if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
2314		DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
2315			      buf[0], buf[1], buf[2]);
2316
2317	if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3))
2318		DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
2319			      buf[0], buf[1], buf[2]);
2320
2321	ironlake_edp_panel_vdd_off(intel_dp, false);
2322}
2323
2324static bool
2325intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
2326{
2327	int ret;
2328
2329	ret = intel_dp_aux_native_read_retry(intel_dp,
2330					     DP_DEVICE_SERVICE_IRQ_VECTOR,
2331					     sink_irq_vector, 1);
2332	if (!ret)
2333		return false;
2334
2335	return true;
2336}
2337
2338static void
2339intel_dp_handle_test_request(struct intel_dp *intel_dp)
2340{
2341	/* NAK by default */
2342	intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK);
2343}
2344
2345/*
2346 * According to DP spec
2347 * 5.1.2:
2348 *  1. Read DPCD
2349 *  2. Configure link according to Receiver Capabilities
2350 *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
2351 *  4. Check link status on receipt of hot-plug interrupt
2352 */
2353
2354void
2355intel_dp_check_link_status(struct intel_dp *intel_dp)
2356{
2357	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
2358	u8 sink_irq_vector;
2359	u8 link_status[DP_LINK_STATUS_SIZE];
2360
2361	if (!intel_encoder->connectors_active)
2362		return;
2363
2364	if (WARN_ON(!intel_encoder->base.crtc))
2365		return;
2366
2367	/* Try to read receiver status if the link appears to be up */
2368	if (!intel_dp_get_link_status(intel_dp, link_status)) {
2369		intel_dp_link_down(intel_dp);
2370		return;
2371	}
2372
2373	/* Now read the DPCD to see if it's actually running */
2374	if (!intel_dp_get_dpcd(intel_dp)) {
2375		intel_dp_link_down(intel_dp);
2376		return;
2377	}
2378
2379	/* Try to read the source of the interrupt */
2380	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
2381	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
2382		/* Clear interrupt source */
2383		intel_dp_aux_native_write_1(intel_dp,
2384					    DP_DEVICE_SERVICE_IRQ_VECTOR,
2385					    sink_irq_vector);
2386
2387		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
2388			intel_dp_handle_test_request(intel_dp);
2389		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
2390			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
2391	}
2392
2393	if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
2394		DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
2395			      drm_get_encoder_name(&intel_encoder->base));
2396		intel_dp_start_link_train(intel_dp);
2397		intel_dp_complete_link_train(intel_dp);
2398		intel_dp_stop_link_train(intel_dp);
2399	}
2400}
2401
2402/* XXX this is probably wrong for multiple downstream ports */
2403static enum drm_connector_status
2404intel_dp_detect_dpcd(struct intel_dp *intel_dp)
2405{
2406	uint8_t *dpcd = intel_dp->dpcd;
2407	bool hpd;
2408	uint8_t type;
2409
2410	if (!intel_dp_get_dpcd(intel_dp))
2411		return connector_status_disconnected;
2412
2413	/* if there's no downstream port, we're done */
2414	if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
2415		return connector_status_connected;
2416
2417	/* If we're HPD-aware, SINK_COUNT changes dynamically */
2418	hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD);
2419	if (hpd) {
2420		uint8_t reg;
2421		if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT,
2422						    &reg, 1))
2423			return connector_status_unknown;
2424		return DP_GET_SINK_COUNT(reg) ? connector_status_connected
2425					      : connector_status_disconnected;
2426	}
2427
2428	/* If no HPD, poke DDC gently */
2429	if (drm_probe_ddc(&intel_dp->adapter))
2430		return connector_status_connected;
2431
2432	/* Well we tried, say unknown for unreliable port types */
2433	type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
2434	if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID)
2435		return connector_status_unknown;
2436
2437	/* Anything else is out of spec, warn and ignore */
2438	DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
2439	return connector_status_disconnected;
2440}
2441
2442static enum drm_connector_status
2443ironlake_dp_detect(struct intel_dp *intel_dp)
2444{
2445	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2446	struct drm_i915_private *dev_priv = dev->dev_private;
2447	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2448	enum drm_connector_status status;
2449
2450	/* Can't disconnect eDP, but you can close the lid... */
2451	if (is_edp(intel_dp)) {
2452		status = intel_panel_detect(dev);
2453		if (status == connector_status_unknown)
2454			status = connector_status_connected;
2455		return status;
2456	}
2457
2458	if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
2459		return connector_status_disconnected;
2460
2461	return intel_dp_detect_dpcd(intel_dp);
2462}
2463
2464static enum drm_connector_status
2465g4x_dp_detect(struct intel_dp *intel_dp)
2466{
2467	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2468	struct drm_i915_private *dev_priv = dev->dev_private;
2469	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2470	uint32_t bit;
2471
2472	/* Can't disconnect eDP, but you can close the lid... */
2473	if (is_edp(intel_dp)) {
2474		enum drm_connector_status status;
2475
2476		status = intel_panel_detect(dev);
2477		if (status == connector_status_unknown)
2478			status = connector_status_connected;
2479		return status;
2480	}
2481
2482	switch (intel_dig_port->port) {
2483	case PORT_B:
2484		bit = PORTB_HOTPLUG_LIVE_STATUS;
2485		break;
2486	case PORT_C:
2487		bit = PORTC_HOTPLUG_LIVE_STATUS;
2488		break;
2489	case PORT_D:
2490		bit = PORTD_HOTPLUG_LIVE_STATUS;
2491		break;
2492	default:
2493		return connector_status_unknown;
2494	}
2495
2496	if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
2497		return connector_status_disconnected;
2498
2499	return intel_dp_detect_dpcd(intel_dp);
2500}
2501
2502static struct edid *
2503intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
2504{
2505	struct intel_connector *intel_connector = to_intel_connector(connector);
2506
2507	/* use cached edid if we have one */
2508	if (intel_connector->edid) {
2509		struct edid *edid;
2510		int size;
2511
2512		/* invalid edid */
2513		if (IS_ERR(intel_connector->edid))
2514			return NULL;
2515
2516		size = (intel_connector->edid->extensions + 1) * EDID_LENGTH;
2517		edid = kmemdup(intel_connector->edid, size, GFP_KERNEL);
2518		if (!edid)
2519			return NULL;
2520
2521		return edid;
2522	}
2523
2524	return drm_get_edid(connector, adapter);
2525}
2526
2527static int
2528intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter)
2529{
2530	struct intel_connector *intel_connector = to_intel_connector(connector);
2531
2532	/* use cached edid if we have one */
2533	if (intel_connector->edid) {
2534		/* invalid edid */
2535		if (IS_ERR(intel_connector->edid))
2536			return 0;
2537
2538		return intel_connector_update_modes(connector,
2539						    intel_connector->edid);
2540	}
2541
2542	return intel_ddc_get_modes(connector, adapter);
2543}
2544
2545static enum drm_connector_status
2546intel_dp_detect(struct drm_connector *connector, bool force)
2547{
2548	struct intel_dp *intel_dp = intel_attached_dp(connector);
2549	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2550	struct intel_encoder *intel_encoder = &intel_dig_port->base;
2551	struct drm_device *dev = connector->dev;
2552	enum drm_connector_status status;
2553	struct edid *edid = NULL;
2554
2555	intel_dp->has_audio = false;
2556
2557	if (HAS_PCH_SPLIT(dev))
2558		status = ironlake_dp_detect(intel_dp);
2559	else
2560		status = g4x_dp_detect(intel_dp);
2561
2562	if (status != connector_status_connected)
2563		return status;
2564
2565	intel_dp_probe_oui(intel_dp);
2566
2567	if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
2568		intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
2569	} else {
2570		edid = intel_dp_get_edid(connector, &intel_dp->adapter);
2571		if (edid) {
2572			intel_dp->has_audio = drm_detect_monitor_audio(edid);
2573			kfree(edid);
2574		}
2575	}
2576
2577	if (intel_encoder->type != INTEL_OUTPUT_EDP)
2578		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
2579	return connector_status_connected;
2580}
2581
2582static int intel_dp_get_modes(struct drm_connector *connector)
2583{
2584	struct intel_dp *intel_dp = intel_attached_dp(connector);
2585	struct intel_connector *intel_connector = to_intel_connector(connector);
2586	struct drm_device *dev = connector->dev;
2587	int ret;
2588
2589	/* We should parse the EDID data and find out if it has an audio sink
2590	 */
2591
2592	ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter);
2593	if (ret)
2594		return ret;
2595
2596	/* if eDP has no EDID, fall back to fixed mode */
2597	if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
2598		struct drm_display_mode *mode;
2599		mode = drm_mode_duplicate(dev,
2600					  intel_connector->panel.fixed_mode);
2601		if (mode) {
2602			drm_mode_probed_add(connector, mode);
2603			return 1;
2604		}
2605	}
2606	return 0;
2607}
2608
2609static bool
2610intel_dp_detect_audio(struct drm_connector *connector)
2611{
2612	struct intel_dp *intel_dp = intel_attached_dp(connector);
2613	struct edid *edid;
2614	bool has_audio = false;
2615
2616	edid = intel_dp_get_edid(connector, &intel_dp->adapter);
2617	if (edid) {
2618		has_audio = drm_detect_monitor_audio(edid);
2619		kfree(edid);
2620	}
2621
2622	return has_audio;
2623}
2624
2625static int
2626intel_dp_set_property(struct drm_connector *connector,
2627		      struct drm_property *property,
2628		      uint64_t val)
2629{
2630	struct drm_i915_private *dev_priv = connector->dev->dev_private;
2631	struct intel_connector *intel_connector = to_intel_connector(connector);
2632	struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
2633	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2634	int ret;
2635
2636	ret = drm_object_property_set_value(&connector->base, property, val);
2637	if (ret)
2638		return ret;
2639
2640	if (property == dev_priv->force_audio_property) {
2641		int i = val;
2642		bool has_audio;
2643
2644		if (i == intel_dp->force_audio)
2645			return 0;
2646
2647		intel_dp->force_audio = i;
2648
2649		if (i == HDMI_AUDIO_AUTO)
2650			has_audio = intel_dp_detect_audio(connector);
2651		else
2652			has_audio = (i == HDMI_AUDIO_ON);
2653
2654		if (has_audio == intel_dp->has_audio)
2655			return 0;
2656
2657		intel_dp->has_audio = has_audio;
2658		goto done;
2659	}
2660
2661	if (property == dev_priv->broadcast_rgb_property) {
2662		bool old_auto = intel_dp->color_range_auto;
2663		uint32_t old_range = intel_dp->color_range;
2664
2665		switch (val) {
2666		case INTEL_BROADCAST_RGB_AUTO:
2667			intel_dp->color_range_auto = true;
2668			break;
2669		case INTEL_BROADCAST_RGB_FULL:
2670			intel_dp->color_range_auto = false;
2671			intel_dp->color_range = 0;
2672			break;
2673		case INTEL_BROADCAST_RGB_LIMITED:
2674			intel_dp->color_range_auto = false;
2675			intel_dp->color_range = DP_COLOR_RANGE_16_235;
2676			break;
2677		default:
2678			return -EINVAL;
2679		}
2680
2681		if (old_auto == intel_dp->color_range_auto &&
2682		    old_range == intel_dp->color_range)
2683			return 0;
2684
2685		goto done;
2686	}
2687
2688	if (is_edp(intel_dp) &&
2689	    property == connector->dev->mode_config.scaling_mode_property) {
2690		if (val == DRM_MODE_SCALE_NONE) {
2691			DRM_DEBUG_KMS("no scaling not supported\n");
2692			return -EINVAL;
2693		}
2694
2695		if (intel_connector->panel.fitting_mode == val) {
2696			/* the eDP scaling property is not changed */
2697			return 0;
2698		}
2699		intel_connector->panel.fitting_mode = val;
2700
2701		goto done;
2702	}
2703
2704	return -EINVAL;
2705
2706done:
2707	if (intel_encoder->base.crtc)
2708		intel_crtc_restore_mode(intel_encoder->base.crtc);
2709
2710	return 0;
2711}
2712
2713static void
2714intel_dp_destroy(struct drm_connector *connector)
2715{
2716	struct intel_dp *intel_dp = intel_attached_dp(connector);
2717	struct intel_connector *intel_connector = to_intel_connector(connector);
2718
2719	if (!IS_ERR_OR_NULL(intel_connector->edid))
2720		kfree(intel_connector->edid);
2721
2722	if (is_edp(intel_dp))
2723		intel_panel_fini(&intel_connector->panel);
2724
2725	drm_sysfs_connector_remove(connector);
2726	drm_connector_cleanup(connector);
2727	kfree(connector);
2728}
2729
2730void intel_dp_encoder_destroy(struct drm_encoder *encoder)
2731{
2732	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
2733	struct intel_dp *intel_dp = &intel_dig_port->dp;
2734	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2735
2736	i2c_del_adapter(&intel_dp->adapter);
2737	drm_encoder_cleanup(encoder);
2738	if (is_edp(intel_dp)) {
2739		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
2740		mutex_lock(&dev->mode_config.mutex);
2741		ironlake_panel_vdd_off_sync(intel_dp);
2742		mutex_unlock(&dev->mode_config.mutex);
2743	}
2744	kfree(intel_dig_port);
2745}
2746
2747static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
2748	.mode_set = intel_dp_mode_set,
2749};
2750
2751static const struct drm_connector_funcs intel_dp_connector_funcs = {
2752	.dpms = intel_connector_dpms,
2753	.detect = intel_dp_detect,
2754	.fill_modes = drm_helper_probe_single_connector_modes,
2755	.set_property = intel_dp_set_property,
2756	.destroy = intel_dp_destroy,
2757};
2758
2759static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
2760	.get_modes = intel_dp_get_modes,
2761	.mode_valid = intel_dp_mode_valid,
2762	.best_encoder = intel_best_encoder,
2763};
2764
2765static const struct drm_encoder_funcs intel_dp_enc_funcs = {
2766	.destroy = intel_dp_encoder_destroy,
2767};
2768
2769static void
2770intel_dp_hot_plug(struct intel_encoder *intel_encoder)
2771{
2772	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2773
2774	intel_dp_check_link_status(intel_dp);
2775}
2776
2777/* Return which DP Port should be selected for Transcoder DP control */
2778int
2779intel_trans_dp_port_sel(struct drm_crtc *crtc)
2780{
2781	struct drm_device *dev = crtc->dev;
2782	struct intel_encoder *intel_encoder;
2783	struct intel_dp *intel_dp;
2784
2785	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
2786		intel_dp = enc_to_intel_dp(&intel_encoder->base);
2787
2788		if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
2789		    intel_encoder->type == INTEL_OUTPUT_EDP)
2790			return intel_dp->output_reg;
2791	}
2792
2793	return -1;
2794}
2795
2796/* check the VBT to see whether the eDP is on DP-D port */
2797bool intel_dpd_is_edp(struct drm_device *dev)
2798{
2799	struct drm_i915_private *dev_priv = dev->dev_private;
2800	struct child_device_config *p_child;
2801	int i;
2802
2803	if (!dev_priv->vbt.child_dev_num)
2804		return false;
2805
2806	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
2807		p_child = dev_priv->vbt.child_dev + i;
2808
2809		if (p_child->dvo_port == PORT_IDPD &&
2810		    p_child->device_type == DEVICE_TYPE_eDP)
2811			return true;
2812	}
2813	return false;
2814}
2815
2816static void
2817intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
2818{
2819	struct intel_connector *intel_connector = to_intel_connector(connector);
2820
2821	intel_attach_force_audio_property(connector);
2822	intel_attach_broadcast_rgb_property(connector);
2823	intel_dp->color_range_auto = true;
2824
2825	if (is_edp(intel_dp)) {
2826		drm_mode_create_scaling_mode_property(connector->dev);
2827		drm_object_attach_property(
2828			&connector->base,
2829			connector->dev->mode_config.scaling_mode_property,
2830			DRM_MODE_SCALE_ASPECT);
2831		intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
2832	}
2833}
2834
2835static void
2836intel_dp_init_panel_power_sequencer(struct drm_device *dev,
2837				    struct intel_dp *intel_dp,
2838				    struct edp_power_seq *out)
2839{
2840	struct drm_i915_private *dev_priv = dev->dev_private;
2841	struct edp_power_seq cur, vbt, spec, final;
2842	u32 pp_on, pp_off, pp_div, pp;
2843	int pp_control_reg, pp_on_reg, pp_off_reg, pp_div_reg;
2844
2845	if (HAS_PCH_SPLIT(dev)) {
2846		pp_control_reg = PCH_PP_CONTROL;
2847		pp_on_reg = PCH_PP_ON_DELAYS;
2848		pp_off_reg = PCH_PP_OFF_DELAYS;
2849		pp_div_reg = PCH_PP_DIVISOR;
2850	} else {
2851		pp_control_reg = PIPEA_PP_CONTROL;
2852		pp_on_reg = PIPEA_PP_ON_DELAYS;
2853		pp_off_reg = PIPEA_PP_OFF_DELAYS;
2854		pp_div_reg = PIPEA_PP_DIVISOR;
2855	}
2856
2857	/* Workaround: Need to write PP_CONTROL with the unlock key as
2858	 * the very first thing. */
2859	pp = ironlake_get_pp_control(intel_dp);
2860	I915_WRITE(pp_control_reg, pp);
2861
2862	pp_on = I915_READ(pp_on_reg);
2863	pp_off = I915_READ(pp_off_reg);
2864	pp_div = I915_READ(pp_div_reg);
2865
2866	/* Pull timing values out of registers */
2867	cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
2868		PANEL_POWER_UP_DELAY_SHIFT;
2869
2870	cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
2871		PANEL_LIGHT_ON_DELAY_SHIFT;
2872
2873	cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
2874		PANEL_LIGHT_OFF_DELAY_SHIFT;
2875
2876	cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
2877		PANEL_POWER_DOWN_DELAY_SHIFT;
2878
2879	cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
2880		       PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
2881
2882	DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
2883		      cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
2884
2885	vbt = dev_priv->vbt.edp_pps;
2886
2887	/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
2888	 * our hw here, which are all in 100usec. */
2889	spec.t1_t3 = 210 * 10;
2890	spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
2891	spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
2892	spec.t10 = 500 * 10;
2893	/* This one is special and actually in units of 100ms, but zero
2894	 * based in the hw (so we need to add 100 ms). But the sw vbt
2895	 * table multiplies it with 1000 to make it in units of 100usec,
2896	 * too. */
2897	spec.t11_t12 = (510 + 100) * 10;
2898
2899	DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
2900		      vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
2901
2902	/* Use the max of the register settings and vbt. If both are
2903	 * unset, fall back to the spec limits. */
2904#define assign_final(field)	final.field = (max(cur.field, vbt.field) == 0 ? \
2905				       spec.field : \
2906				       max(cur.field, vbt.field))
2907	assign_final(t1_t3);
2908	assign_final(t8);
2909	assign_final(t9);
2910	assign_final(t10);
2911	assign_final(t11_t12);
2912#undef assign_final
2913
2914#define get_delay(field)	(DIV_ROUND_UP(final.field, 10))
2915	intel_dp->panel_power_up_delay = get_delay(t1_t3);
2916	intel_dp->backlight_on_delay = get_delay(t8);
2917	intel_dp->backlight_off_delay = get_delay(t9);
2918	intel_dp->panel_power_down_delay = get_delay(t10);
2919	intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
2920#undef get_delay
2921
2922	DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
2923		      intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
2924		      intel_dp->panel_power_cycle_delay);
2925
2926	DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
2927		      intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
2928
2929	if (out)
2930		*out = final;
2931}
2932
2933static void
2934intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
2935					      struct intel_dp *intel_dp,
2936					      struct edp_power_seq *seq)
2937{
2938	struct drm_i915_private *dev_priv = dev->dev_private;
2939	u32 pp_on, pp_off, pp_div, port_sel = 0;
2940	int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
2941	int pp_on_reg, pp_off_reg, pp_div_reg;
2942
2943	if (HAS_PCH_SPLIT(dev)) {
2944		pp_on_reg = PCH_PP_ON_DELAYS;
2945		pp_off_reg = PCH_PP_OFF_DELAYS;
2946		pp_div_reg = PCH_PP_DIVISOR;
2947	} else {
2948		pp_on_reg = PIPEA_PP_ON_DELAYS;
2949		pp_off_reg = PIPEA_PP_OFF_DELAYS;
2950		pp_div_reg = PIPEA_PP_DIVISOR;
2951	}
2952
2953	/* And finally store the new values in the power sequencer. */
2954	pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
2955		(seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
2956	pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
2957		 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
2958	/* Compute the divisor for the pp clock, simply match the Bspec
2959	 * formula. */
2960	pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
2961	pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
2962			<< PANEL_POWER_CYCLE_DELAY_SHIFT);
2963
2964	/* Haswell doesn't have any port selection bits for the panel
2965	 * power sequencer any more. */
2966	if (IS_VALLEYVIEW(dev)) {
2967		port_sel = I915_READ(pp_on_reg) & 0xc0000000;
2968	} else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
2969		if (dp_to_dig_port(intel_dp)->port == PORT_A)
2970			port_sel = PANEL_POWER_PORT_DP_A;
2971		else
2972			port_sel = PANEL_POWER_PORT_DP_D;
2973	}
2974
2975	pp_on |= port_sel;
2976
2977	I915_WRITE(pp_on_reg, pp_on);
2978	I915_WRITE(pp_off_reg, pp_off);
2979	I915_WRITE(pp_div_reg, pp_div);
2980
2981	DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
2982		      I915_READ(pp_on_reg),
2983		      I915_READ(pp_off_reg),
2984		      I915_READ(pp_div_reg));
2985}
2986
2987void
2988intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
2989			struct intel_connector *intel_connector)
2990{
2991	struct drm_connector *connector = &intel_connector->base;
2992	struct intel_dp *intel_dp = &intel_dig_port->dp;
2993	struct intel_encoder *intel_encoder = &intel_dig_port->base;
2994	struct drm_device *dev = intel_encoder->base.dev;
2995	struct drm_i915_private *dev_priv = dev->dev_private;
2996	struct drm_display_mode *fixed_mode = NULL;
2997	struct edp_power_seq power_seq = { 0 };
2998	enum port port = intel_dig_port->port;
2999	const char *name = NULL;
3000	int type;
3001
3002	/* Preserve the current hw state. */
3003	intel_dp->DP = I915_READ(intel_dp->output_reg);
3004	intel_dp->attached_connector = intel_connector;
3005
3006	type = DRM_MODE_CONNECTOR_DisplayPort;
3007	/*
3008	 * FIXME : We need to initialize built-in panels before external panels.
3009	 * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup
3010	 */
3011	switch (port) {
3012	case PORT_A:
3013		type = DRM_MODE_CONNECTOR_eDP;
3014		break;
3015	case PORT_C:
3016		if (IS_VALLEYVIEW(dev))
3017			type = DRM_MODE_CONNECTOR_eDP;
3018		break;
3019	case PORT_D:
3020		if (HAS_PCH_SPLIT(dev) && intel_dpd_is_edp(dev))
3021			type = DRM_MODE_CONNECTOR_eDP;
3022		break;
3023	default:	/* silence GCC warning */
3024		break;
3025	}
3026
3027	/*
3028	 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
3029	 * for DP the encoder type can be set by the caller to
3030	 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
3031	 */
3032	if (type == DRM_MODE_CONNECTOR_eDP)
3033		intel_encoder->type = INTEL_OUTPUT_EDP;
3034
3035	DRM_DEBUG_KMS("Adding %s connector on port %c\n",
3036			type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
3037			port_name(port));
3038
3039	drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
3040	drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
3041
3042	connector->interlace_allowed = true;
3043	connector->doublescan_allowed = 0;
3044
3045	INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
3046			  ironlake_panel_vdd_work);
3047
3048	intel_connector_attach_encoder(intel_connector, intel_encoder);
3049	drm_sysfs_connector_add(connector);
3050
3051	if (HAS_DDI(dev))
3052		intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
3053	else
3054		intel_connector->get_hw_state = intel_connector_get_hw_state;
3055
3056	intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
3057	if (HAS_DDI(dev)) {
3058		switch (intel_dig_port->port) {
3059		case PORT_A:
3060			intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
3061			break;
3062		case PORT_B:
3063			intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
3064			break;
3065		case PORT_C:
3066			intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
3067			break;
3068		case PORT_D:
3069			intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
3070			break;
3071		default:
3072			BUG();
3073		}
3074	}
3075
3076	/* Set up the DDC bus. */
3077	switch (port) {
3078	case PORT_A:
3079		intel_encoder->hpd_pin = HPD_PORT_A;
3080		name = "DPDDC-A";
3081		break;
3082	case PORT_B:
3083		intel_encoder->hpd_pin = HPD_PORT_B;
3084		name = "DPDDC-B";
3085		break;
3086	case PORT_C:
3087		intel_encoder->hpd_pin = HPD_PORT_C;
3088		name = "DPDDC-C";
3089		break;
3090	case PORT_D:
3091		intel_encoder->hpd_pin = HPD_PORT_D;
3092		name = "DPDDC-D";
3093		break;
3094	default:
3095		BUG();
3096	}
3097
3098	if (is_edp(intel_dp))
3099		intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
3100
3101	intel_dp_i2c_init(intel_dp, intel_connector, name);
3102
3103	/* Cache DPCD and EDID for edp. */
3104	if (is_edp(intel_dp)) {
3105		bool ret;
3106		struct drm_display_mode *scan;
3107		struct edid *edid;
3108
3109		ironlake_edp_panel_vdd_on(intel_dp);
3110		ret = intel_dp_get_dpcd(intel_dp);
3111		ironlake_edp_panel_vdd_off(intel_dp, false);
3112
3113		if (ret) {
3114			if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
3115				dev_priv->no_aux_handshake =
3116					intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
3117					DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
3118		} else {
3119			/* if this fails, presume the device is a ghost */
3120			DRM_INFO("failed to retrieve link info, disabling eDP\n");
3121			intel_dp_encoder_destroy(&intel_encoder->base);
3122			intel_dp_destroy(connector);
3123			return;
3124		}
3125
3126		/* We now know it's not a ghost, init power sequence regs. */
3127		intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
3128							      &power_seq);
3129
3130		ironlake_edp_panel_vdd_on(intel_dp);
3131		edid = drm_get_edid(connector, &intel_dp->adapter);
3132		if (edid) {
3133			if (drm_add_edid_modes(connector, edid)) {
3134				drm_mode_connector_update_edid_property(connector, edid);
3135				drm_edid_to_eld(connector, edid);
3136			} else {
3137				kfree(edid);
3138				edid = ERR_PTR(-EINVAL);
3139			}
3140		} else {
3141			edid = ERR_PTR(-ENOENT);
3142		}
3143		intel_connector->edid = edid;
3144
3145		/* prefer fixed mode from EDID if available */
3146		list_for_each_entry(scan, &connector->probed_modes, head) {
3147			if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
3148				fixed_mode = drm_mode_duplicate(dev, scan);
3149				break;
3150			}
3151		}
3152
3153		/* fallback to VBT if available for eDP */
3154		if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
3155			fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode);
3156			if (fixed_mode)
3157				fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
3158		}
3159
3160		ironlake_edp_panel_vdd_off(intel_dp, false);
3161	}
3162
3163	if (is_edp(intel_dp)) {
3164		intel_panel_init(&intel_connector->panel, fixed_mode);
3165		intel_panel_setup_backlight(connector);
3166	}
3167
3168	intel_dp_add_properties(intel_dp, connector);
3169
3170	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
3171	 * 0xd.  Failure to do so will result in spurious interrupts being
3172	 * generated on the port when a cable is not attached.
3173	 */
3174	if (IS_G4X(dev) && !IS_GM45(dev)) {
3175		u32 temp = I915_READ(PEG_BAND_GAP_DATA);
3176		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
3177	}
3178}
3179
3180void
3181intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
3182{
3183	struct intel_digital_port *intel_dig_port;
3184	struct intel_encoder *intel_encoder;
3185	struct drm_encoder *encoder;
3186	struct intel_connector *intel_connector;
3187
3188	intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
3189	if (!intel_dig_port)
3190		return;
3191
3192	intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
3193	if (!intel_connector) {
3194		kfree(intel_dig_port);
3195		return;
3196	}
3197
3198	intel_encoder = &intel_dig_port->base;
3199	encoder = &intel_encoder->base;
3200
3201	drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
3202			 DRM_MODE_ENCODER_TMDS);
3203	drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
3204
3205	intel_encoder->compute_config = intel_dp_compute_config;
3206	intel_encoder->enable = intel_enable_dp;
3207	intel_encoder->pre_enable = intel_pre_enable_dp;
3208	intel_encoder->disable = intel_disable_dp;
3209	intel_encoder->post_disable = intel_post_disable_dp;
3210	intel_encoder->get_hw_state = intel_dp_get_hw_state;
3211	intel_encoder->get_config = intel_dp_get_config;
3212	if (IS_VALLEYVIEW(dev))
3213		intel_encoder->pre_pll_enable = intel_dp_pre_pll_enable;
3214
3215	intel_dig_port->port = port;
3216	intel_dig_port->dp.output_reg = output_reg;
3217
3218	intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
3219	intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
3220	intel_encoder->cloneable = false;
3221	intel_encoder->hot_plug = intel_dp_hot_plug;
3222
3223	intel_dp_init_connector(intel_dig_port, intel_connector);
3224}
3225