intel_dp.c revision 0f037bdee1a12947a0c55b21a05f57793332bc07
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 *    Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
29#include <linux/slab.h>
30#include <linux/export.h>
31#include <drm/drmP.h>
32#include <drm/drm_crtc.h>
33#include <drm/drm_crtc_helper.h>
34#include <drm/drm_edid.h>
35#include "intel_drv.h"
36#include <drm/i915_drm.h>
37#include "i915_drv.h"
38
39#define DP_RECEIVER_CAP_SIZE	0xf
40#define DP_LINK_CHECK_TIMEOUT	(10 * 1000)
41
42/**
43 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
44 * @intel_dp: DP struct
45 *
46 * If a CPU or PCH DP output is attached to an eDP panel, this function
47 * will return true, and false otherwise.
48 */
49static bool is_edp(struct intel_dp *intel_dp)
50{
51	return intel_dp->base.type == INTEL_OUTPUT_EDP;
52}
53
54/**
55 * is_pch_edp - is the port on the PCH and attached to an eDP panel?
56 * @intel_dp: DP struct
57 *
58 * Returns true if the given DP struct corresponds to a PCH DP port attached
59 * to an eDP panel, false otherwise.  Helpful for determining whether we
60 * may need FDI resources for a given DP output or not.
61 */
62static bool is_pch_edp(struct intel_dp *intel_dp)
63{
64	return intel_dp->is_pch_edp;
65}
66
67/**
68 * is_cpu_edp - is the port on the CPU and attached to an eDP panel?
69 * @intel_dp: DP struct
70 *
71 * Returns true if the given DP struct corresponds to a CPU eDP port.
72 */
73static bool is_cpu_edp(struct intel_dp *intel_dp)
74{
75	return is_edp(intel_dp) && !is_pch_edp(intel_dp);
76}
77
78static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
79{
80	return container_of(intel_attached_encoder(connector),
81			    struct intel_dp, base);
82}
83
84/**
85 * intel_encoder_is_pch_edp - is the given encoder a PCH attached eDP?
86 * @encoder: DRM encoder
87 *
88 * Return true if @encoder corresponds to a PCH attached eDP panel.  Needed
89 * by intel_display.c.
90 */
91bool intel_encoder_is_pch_edp(struct drm_encoder *encoder)
92{
93	struct intel_dp *intel_dp;
94
95	if (!encoder)
96		return false;
97
98	intel_dp = enc_to_intel_dp(encoder);
99
100	return is_pch_edp(intel_dp);
101}
102
103static void intel_dp_link_down(struct intel_dp *intel_dp);
104
105void
106intel_edp_link_config(struct intel_encoder *intel_encoder,
107		       int *lane_num, int *link_bw)
108{
109	struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
110
111	*lane_num = intel_dp->lane_count;
112	if (intel_dp->link_bw == DP_LINK_BW_1_62)
113		*link_bw = 162000;
114	else if (intel_dp->link_bw == DP_LINK_BW_2_7)
115		*link_bw = 270000;
116}
117
118int
119intel_edp_target_clock(struct intel_encoder *intel_encoder,
120		       struct drm_display_mode *mode)
121{
122	struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
123	struct intel_connector *intel_connector = intel_dp->attached_connector;
124
125	if (intel_connector->panel.fixed_mode)
126		return intel_connector->panel.fixed_mode->clock;
127	else
128		return mode->clock;
129}
130
131static int
132intel_dp_max_lane_count(struct intel_dp *intel_dp)
133{
134	int max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
135	switch (max_lane_count) {
136	case 1: case 2: case 4:
137		break;
138	default:
139		max_lane_count = 4;
140	}
141	return max_lane_count;
142}
143
144static int
145intel_dp_max_link_bw(struct intel_dp *intel_dp)
146{
147	int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
148
149	switch (max_link_bw) {
150	case DP_LINK_BW_1_62:
151	case DP_LINK_BW_2_7:
152		break;
153	default:
154		max_link_bw = DP_LINK_BW_1_62;
155		break;
156	}
157	return max_link_bw;
158}
159
160static int
161intel_dp_link_clock(uint8_t link_bw)
162{
163	if (link_bw == DP_LINK_BW_2_7)
164		return 270000;
165	else
166		return 162000;
167}
168
169/*
170 * The units on the numbers in the next two are... bizarre.  Examples will
171 * make it clearer; this one parallels an example in the eDP spec.
172 *
173 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
174 *
175 *     270000 * 1 * 8 / 10 == 216000
176 *
177 * The actual data capacity of that configuration is 2.16Gbit/s, so the
178 * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
179 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
180 * 119000.  At 18bpp that's 2142000 kilobits per second.
181 *
182 * Thus the strange-looking division by 10 in intel_dp_link_required, to
183 * get the result in decakilobits instead of kilobits.
184 */
185
186static int
187intel_dp_link_required(int pixel_clock, int bpp)
188{
189	return (pixel_clock * bpp + 9) / 10;
190}
191
192static int
193intel_dp_max_data_rate(int max_link_clock, int max_lanes)
194{
195	return (max_link_clock * max_lanes * 8) / 10;
196}
197
198static bool
199intel_dp_adjust_dithering(struct intel_dp *intel_dp,
200			  struct drm_display_mode *mode,
201			  bool adjust_mode)
202{
203	int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
204	int max_lanes = intel_dp_max_lane_count(intel_dp);
205	int max_rate, mode_rate;
206
207	mode_rate = intel_dp_link_required(mode->clock, 24);
208	max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
209
210	if (mode_rate > max_rate) {
211		mode_rate = intel_dp_link_required(mode->clock, 18);
212		if (mode_rate > max_rate)
213			return false;
214
215		if (adjust_mode)
216			mode->private_flags
217				|= INTEL_MODE_DP_FORCE_6BPC;
218
219		return true;
220	}
221
222	return true;
223}
224
225static int
226intel_dp_mode_valid(struct drm_connector *connector,
227		    struct drm_display_mode *mode)
228{
229	struct intel_dp *intel_dp = intel_attached_dp(connector);
230	struct intel_connector *intel_connector = to_intel_connector(connector);
231	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
232
233	if (is_edp(intel_dp) && fixed_mode) {
234		if (mode->hdisplay > fixed_mode->hdisplay)
235			return MODE_PANEL;
236
237		if (mode->vdisplay > fixed_mode->vdisplay)
238			return MODE_PANEL;
239	}
240
241	if (!intel_dp_adjust_dithering(intel_dp, mode, false))
242		return MODE_CLOCK_HIGH;
243
244	if (mode->clock < 10000)
245		return MODE_CLOCK_LOW;
246
247	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
248		return MODE_H_ILLEGAL;
249
250	return MODE_OK;
251}
252
253static uint32_t
254pack_aux(uint8_t *src, int src_bytes)
255{
256	int	i;
257	uint32_t v = 0;
258
259	if (src_bytes > 4)
260		src_bytes = 4;
261	for (i = 0; i < src_bytes; i++)
262		v |= ((uint32_t) src[i]) << ((3-i) * 8);
263	return v;
264}
265
266static void
267unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
268{
269	int i;
270	if (dst_bytes > 4)
271		dst_bytes = 4;
272	for (i = 0; i < dst_bytes; i++)
273		dst[i] = src >> ((3-i) * 8);
274}
275
276/* hrawclock is 1/4 the FSB frequency */
277static int
278intel_hrawclk(struct drm_device *dev)
279{
280	struct drm_i915_private *dev_priv = dev->dev_private;
281	uint32_t clkcfg;
282
283	/* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
284	if (IS_VALLEYVIEW(dev))
285		return 200;
286
287	clkcfg = I915_READ(CLKCFG);
288	switch (clkcfg & CLKCFG_FSB_MASK) {
289	case CLKCFG_FSB_400:
290		return 100;
291	case CLKCFG_FSB_533:
292		return 133;
293	case CLKCFG_FSB_667:
294		return 166;
295	case CLKCFG_FSB_800:
296		return 200;
297	case CLKCFG_FSB_1067:
298		return 266;
299	case CLKCFG_FSB_1333:
300		return 333;
301	/* these two are just a guess; one of them might be right */
302	case CLKCFG_FSB_1600:
303	case CLKCFG_FSB_1600_ALT:
304		return 400;
305	default:
306		return 133;
307	}
308}
309
310static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
311{
312	struct drm_device *dev = intel_dp->base.base.dev;
313	struct drm_i915_private *dev_priv = dev->dev_private;
314
315	return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0;
316}
317
318static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
319{
320	struct drm_device *dev = intel_dp->base.base.dev;
321	struct drm_i915_private *dev_priv = dev->dev_private;
322
323	return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0;
324}
325
326static void
327intel_dp_check_edp(struct intel_dp *intel_dp)
328{
329	struct drm_device *dev = intel_dp->base.base.dev;
330	struct drm_i915_private *dev_priv = dev->dev_private;
331
332	if (!is_edp(intel_dp))
333		return;
334	if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
335		WARN(1, "eDP powered off while attempting aux channel communication.\n");
336		DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
337			      I915_READ(PCH_PP_STATUS),
338			      I915_READ(PCH_PP_CONTROL));
339	}
340}
341
342static int
343intel_dp_aux_ch(struct intel_dp *intel_dp,
344		uint8_t *send, int send_bytes,
345		uint8_t *recv, int recv_size)
346{
347	uint32_t output_reg = intel_dp->output_reg;
348	struct drm_device *dev = intel_dp->base.base.dev;
349	struct drm_i915_private *dev_priv = dev->dev_private;
350	uint32_t ch_ctl = output_reg + 0x10;
351	uint32_t ch_data = ch_ctl + 4;
352	int i;
353	int recv_bytes;
354	uint32_t status;
355	uint32_t aux_clock_divider;
356	int try, precharge;
357
358	if (IS_HASWELL(dev)) {
359		switch (intel_dp->port) {
360		case PORT_A:
361			ch_ctl = DPA_AUX_CH_CTL;
362			ch_data = DPA_AUX_CH_DATA1;
363			break;
364		case PORT_B:
365			ch_ctl = PCH_DPB_AUX_CH_CTL;
366			ch_data = PCH_DPB_AUX_CH_DATA1;
367			break;
368		case PORT_C:
369			ch_ctl = PCH_DPC_AUX_CH_CTL;
370			ch_data = PCH_DPC_AUX_CH_DATA1;
371			break;
372		case PORT_D:
373			ch_ctl = PCH_DPD_AUX_CH_CTL;
374			ch_data = PCH_DPD_AUX_CH_DATA1;
375			break;
376		default:
377			BUG();
378		}
379	}
380
381	intel_dp_check_edp(intel_dp);
382	/* The clock divider is based off the hrawclk,
383	 * and would like to run at 2MHz. So, take the
384	 * hrawclk value and divide by 2 and use that
385	 *
386	 * Note that PCH attached eDP panels should use a 125MHz input
387	 * clock divider.
388	 */
389	if (is_cpu_edp(intel_dp)) {
390		if (IS_VALLEYVIEW(dev))
391			aux_clock_divider = 100;
392		else if (IS_GEN6(dev) || IS_GEN7(dev))
393			aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
394		else
395			aux_clock_divider = 225; /* eDP input clock at 450Mhz */
396	} else if (HAS_PCH_SPLIT(dev))
397		aux_clock_divider = 63; /* IRL input clock fixed at 125Mhz */
398	else
399		aux_clock_divider = intel_hrawclk(dev) / 2;
400
401	if (IS_GEN6(dev))
402		precharge = 3;
403	else
404		precharge = 5;
405
406	/* Try to wait for any previous AUX channel activity */
407	for (try = 0; try < 3; try++) {
408		status = I915_READ(ch_ctl);
409		if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
410			break;
411		msleep(1);
412	}
413
414	if (try == 3) {
415		WARN(1, "dp_aux_ch not started status 0x%08x\n",
416		     I915_READ(ch_ctl));
417		return -EBUSY;
418	}
419
420	/* Must try at least 3 times according to DP spec */
421	for (try = 0; try < 5; try++) {
422		/* Load the send data into the aux channel data registers */
423		for (i = 0; i < send_bytes; i += 4)
424			I915_WRITE(ch_data + i,
425				   pack_aux(send + i, send_bytes - i));
426
427		/* Send the command and wait for it to complete */
428		I915_WRITE(ch_ctl,
429			   DP_AUX_CH_CTL_SEND_BUSY |
430			   DP_AUX_CH_CTL_TIME_OUT_400us |
431			   (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
432			   (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
433			   (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
434			   DP_AUX_CH_CTL_DONE |
435			   DP_AUX_CH_CTL_TIME_OUT_ERROR |
436			   DP_AUX_CH_CTL_RECEIVE_ERROR);
437		for (;;) {
438			status = I915_READ(ch_ctl);
439			if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
440				break;
441			udelay(100);
442		}
443
444		/* Clear done status and any errors */
445		I915_WRITE(ch_ctl,
446			   status |
447			   DP_AUX_CH_CTL_DONE |
448			   DP_AUX_CH_CTL_TIME_OUT_ERROR |
449			   DP_AUX_CH_CTL_RECEIVE_ERROR);
450
451		if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
452			      DP_AUX_CH_CTL_RECEIVE_ERROR))
453			continue;
454		if (status & DP_AUX_CH_CTL_DONE)
455			break;
456	}
457
458	if ((status & DP_AUX_CH_CTL_DONE) == 0) {
459		DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
460		return -EBUSY;
461	}
462
463	/* Check for timeout or receive error.
464	 * Timeouts occur when the sink is not connected
465	 */
466	if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
467		DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
468		return -EIO;
469	}
470
471	/* Timeouts occur when the device isn't connected, so they're
472	 * "normal" -- don't fill the kernel log with these */
473	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
474		DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
475		return -ETIMEDOUT;
476	}
477
478	/* Unload any bytes sent back from the other side */
479	recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
480		      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
481	if (recv_bytes > recv_size)
482		recv_bytes = recv_size;
483
484	for (i = 0; i < recv_bytes; i += 4)
485		unpack_aux(I915_READ(ch_data + i),
486			   recv + i, recv_bytes - i);
487
488	return recv_bytes;
489}
490
491/* Write data to the aux channel in native mode */
492static int
493intel_dp_aux_native_write(struct intel_dp *intel_dp,
494			  uint16_t address, uint8_t *send, int send_bytes)
495{
496	int ret;
497	uint8_t	msg[20];
498	int msg_bytes;
499	uint8_t	ack;
500
501	intel_dp_check_edp(intel_dp);
502	if (send_bytes > 16)
503		return -1;
504	msg[0] = AUX_NATIVE_WRITE << 4;
505	msg[1] = address >> 8;
506	msg[2] = address & 0xff;
507	msg[3] = send_bytes - 1;
508	memcpy(&msg[4], send, send_bytes);
509	msg_bytes = send_bytes + 4;
510	for (;;) {
511		ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
512		if (ret < 0)
513			return ret;
514		if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
515			break;
516		else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
517			udelay(100);
518		else
519			return -EIO;
520	}
521	return send_bytes;
522}
523
524/* Write a single byte to the aux channel in native mode */
525static int
526intel_dp_aux_native_write_1(struct intel_dp *intel_dp,
527			    uint16_t address, uint8_t byte)
528{
529	return intel_dp_aux_native_write(intel_dp, address, &byte, 1);
530}
531
532/* read bytes from a native aux channel */
533static int
534intel_dp_aux_native_read(struct intel_dp *intel_dp,
535			 uint16_t address, uint8_t *recv, int recv_bytes)
536{
537	uint8_t msg[4];
538	int msg_bytes;
539	uint8_t reply[20];
540	int reply_bytes;
541	uint8_t ack;
542	int ret;
543
544	intel_dp_check_edp(intel_dp);
545	msg[0] = AUX_NATIVE_READ << 4;
546	msg[1] = address >> 8;
547	msg[2] = address & 0xff;
548	msg[3] = recv_bytes - 1;
549
550	msg_bytes = 4;
551	reply_bytes = recv_bytes + 1;
552
553	for (;;) {
554		ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
555				      reply, reply_bytes);
556		if (ret == 0)
557			return -EPROTO;
558		if (ret < 0)
559			return ret;
560		ack = reply[0];
561		if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) {
562			memcpy(recv, reply + 1, ret - 1);
563			return ret - 1;
564		}
565		else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
566			udelay(100);
567		else
568			return -EIO;
569	}
570}
571
572static int
573intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
574		    uint8_t write_byte, uint8_t *read_byte)
575{
576	struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
577	struct intel_dp *intel_dp = container_of(adapter,
578						struct intel_dp,
579						adapter);
580	uint16_t address = algo_data->address;
581	uint8_t msg[5];
582	uint8_t reply[2];
583	unsigned retry;
584	int msg_bytes;
585	int reply_bytes;
586	int ret;
587
588	intel_dp_check_edp(intel_dp);
589	/* Set up the command byte */
590	if (mode & MODE_I2C_READ)
591		msg[0] = AUX_I2C_READ << 4;
592	else
593		msg[0] = AUX_I2C_WRITE << 4;
594
595	if (!(mode & MODE_I2C_STOP))
596		msg[0] |= AUX_I2C_MOT << 4;
597
598	msg[1] = address >> 8;
599	msg[2] = address;
600
601	switch (mode) {
602	case MODE_I2C_WRITE:
603		msg[3] = 0;
604		msg[4] = write_byte;
605		msg_bytes = 5;
606		reply_bytes = 1;
607		break;
608	case MODE_I2C_READ:
609		msg[3] = 0;
610		msg_bytes = 4;
611		reply_bytes = 2;
612		break;
613	default:
614		msg_bytes = 3;
615		reply_bytes = 1;
616		break;
617	}
618
619	for (retry = 0; retry < 5; retry++) {
620		ret = intel_dp_aux_ch(intel_dp,
621				      msg, msg_bytes,
622				      reply, reply_bytes);
623		if (ret < 0) {
624			DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
625			return ret;
626		}
627
628		switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
629		case AUX_NATIVE_REPLY_ACK:
630			/* I2C-over-AUX Reply field is only valid
631			 * when paired with AUX ACK.
632			 */
633			break;
634		case AUX_NATIVE_REPLY_NACK:
635			DRM_DEBUG_KMS("aux_ch native nack\n");
636			return -EREMOTEIO;
637		case AUX_NATIVE_REPLY_DEFER:
638			udelay(100);
639			continue;
640		default:
641			DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
642				  reply[0]);
643			return -EREMOTEIO;
644		}
645
646		switch (reply[0] & AUX_I2C_REPLY_MASK) {
647		case AUX_I2C_REPLY_ACK:
648			if (mode == MODE_I2C_READ) {
649				*read_byte = reply[1];
650			}
651			return reply_bytes - 1;
652		case AUX_I2C_REPLY_NACK:
653			DRM_DEBUG_KMS("aux_i2c nack\n");
654			return -EREMOTEIO;
655		case AUX_I2C_REPLY_DEFER:
656			DRM_DEBUG_KMS("aux_i2c defer\n");
657			udelay(100);
658			break;
659		default:
660			DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
661			return -EREMOTEIO;
662		}
663	}
664
665	DRM_ERROR("too many retries, giving up\n");
666	return -EREMOTEIO;
667}
668
669static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
670static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
671
672static int
673intel_dp_i2c_init(struct intel_dp *intel_dp,
674		  struct intel_connector *intel_connector, const char *name)
675{
676	int	ret;
677
678	DRM_DEBUG_KMS("i2c_init %s\n", name);
679	intel_dp->algo.running = false;
680	intel_dp->algo.address = 0;
681	intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch;
682
683	memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter));
684	intel_dp->adapter.owner = THIS_MODULE;
685	intel_dp->adapter.class = I2C_CLASS_DDC;
686	strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
687	intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
688	intel_dp->adapter.algo_data = &intel_dp->algo;
689	intel_dp->adapter.dev.parent = &intel_connector->base.kdev;
690
691	ironlake_edp_panel_vdd_on(intel_dp);
692	ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
693	ironlake_edp_panel_vdd_off(intel_dp, false);
694	return ret;
695}
696
697static bool
698intel_dp_mode_fixup(struct drm_encoder *encoder,
699		    const struct drm_display_mode *mode,
700		    struct drm_display_mode *adjusted_mode)
701{
702	struct drm_device *dev = encoder->dev;
703	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
704	struct intel_connector *intel_connector = intel_dp->attached_connector;
705	int lane_count, clock;
706	int max_lane_count = intel_dp_max_lane_count(intel_dp);
707	int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
708	int bpp, mode_rate;
709	static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
710
711	if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
712		intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
713				       adjusted_mode);
714		intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN,
715					mode, adjusted_mode);
716	}
717
718	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
719		return false;
720
721	DRM_DEBUG_KMS("DP link computation with max lane count %i "
722		      "max bw %02x pixel clock %iKHz\n",
723		      max_lane_count, bws[max_clock], adjusted_mode->clock);
724
725	if (!intel_dp_adjust_dithering(intel_dp, adjusted_mode, true))
726		return false;
727
728	bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
729	mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
730
731	for (clock = 0; clock <= max_clock; clock++) {
732		for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
733			int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
734
735			if (mode_rate <= link_avail) {
736				intel_dp->link_bw = bws[clock];
737				intel_dp->lane_count = lane_count;
738				adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
739				DRM_DEBUG_KMS("DP link bw %02x lane "
740						"count %d clock %d bpp %d\n",
741				       intel_dp->link_bw, intel_dp->lane_count,
742				       adjusted_mode->clock, bpp);
743				DRM_DEBUG_KMS("DP link bw required %i available %i\n",
744					      mode_rate, link_avail);
745				return true;
746			}
747		}
748	}
749
750	return false;
751}
752
753struct intel_dp_m_n {
754	uint32_t	tu;
755	uint32_t	gmch_m;
756	uint32_t	gmch_n;
757	uint32_t	link_m;
758	uint32_t	link_n;
759};
760
761static void
762intel_reduce_ratio(uint32_t *num, uint32_t *den)
763{
764	while (*num > 0xffffff || *den > 0xffffff) {
765		*num >>= 1;
766		*den >>= 1;
767	}
768}
769
770static void
771intel_dp_compute_m_n(int bpp,
772		     int nlanes,
773		     int pixel_clock,
774		     int link_clock,
775		     struct intel_dp_m_n *m_n)
776{
777	m_n->tu = 64;
778	m_n->gmch_m = (pixel_clock * bpp) >> 3;
779	m_n->gmch_n = link_clock * nlanes;
780	intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
781	m_n->link_m = pixel_clock;
782	m_n->link_n = link_clock;
783	intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
784}
785
786void
787intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
788		 struct drm_display_mode *adjusted_mode)
789{
790	struct drm_device *dev = crtc->dev;
791	struct intel_encoder *encoder;
792	struct drm_i915_private *dev_priv = dev->dev_private;
793	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
794	int lane_count = 4;
795	struct intel_dp_m_n m_n;
796	int pipe = intel_crtc->pipe;
797
798	/*
799	 * Find the lane count in the intel_encoder private
800	 */
801	for_each_encoder_on_crtc(dev, crtc, encoder) {
802		struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
803
804		if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
805		    intel_dp->base.type == INTEL_OUTPUT_EDP)
806		{
807			lane_count = intel_dp->lane_count;
808			break;
809		}
810	}
811
812	/*
813	 * Compute the GMCH and Link ratios. The '3' here is
814	 * the number of bytes_per_pixel post-LUT, which we always
815	 * set up for 8-bits of R/G/B, or 3 bytes total.
816	 */
817	intel_dp_compute_m_n(intel_crtc->bpp, lane_count,
818			     mode->clock, adjusted_mode->clock, &m_n);
819
820	if (IS_HASWELL(dev)) {
821		I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
822		I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
823		I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
824		I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
825	} else if (HAS_PCH_SPLIT(dev)) {
826		I915_WRITE(TRANSDATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
827		I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n);
828		I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m);
829		I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n);
830	} else if (IS_VALLEYVIEW(dev)) {
831		I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
832		I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
833		I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
834		I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
835	} else {
836		I915_WRITE(PIPE_GMCH_DATA_M(pipe),
837			   TU_SIZE(m_n.tu) | m_n.gmch_m);
838		I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n);
839		I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m);
840		I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n);
841	}
842}
843
844void intel_dp_init_link_config(struct intel_dp *intel_dp)
845{
846	memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
847	intel_dp->link_configuration[0] = intel_dp->link_bw;
848	intel_dp->link_configuration[1] = intel_dp->lane_count;
849	intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
850	/*
851	 * Check for DPCD version > 1.1 and enhanced framing support
852	 */
853	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
854	    (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
855		intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
856	}
857}
858
859static void
860intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
861		  struct drm_display_mode *adjusted_mode)
862{
863	struct drm_device *dev = encoder->dev;
864	struct drm_i915_private *dev_priv = dev->dev_private;
865	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
866	struct drm_crtc *crtc = intel_dp->base.base.crtc;
867	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
868
869	/*
870	 * There are four kinds of DP registers:
871	 *
872	 * 	IBX PCH
873	 * 	SNB CPU
874	 *	IVB CPU
875	 * 	CPT PCH
876	 *
877	 * IBX PCH and CPU are the same for almost everything,
878	 * except that the CPU DP PLL is configured in this
879	 * register
880	 *
881	 * CPT PCH is quite different, having many bits moved
882	 * to the TRANS_DP_CTL register instead. That
883	 * configuration happens (oddly) in ironlake_pch_enable
884	 */
885
886	/* Preserve the BIOS-computed detected bit. This is
887	 * supposed to be read-only.
888	 */
889	intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
890
891	/* Handle DP bits in common between all three register formats */
892	intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
893
894	switch (intel_dp->lane_count) {
895	case 1:
896		intel_dp->DP |= DP_PORT_WIDTH_1;
897		break;
898	case 2:
899		intel_dp->DP |= DP_PORT_WIDTH_2;
900		break;
901	case 4:
902		intel_dp->DP |= DP_PORT_WIDTH_4;
903		break;
904	}
905	if (intel_dp->has_audio) {
906		DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
907				 pipe_name(intel_crtc->pipe));
908		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
909		intel_write_eld(encoder, adjusted_mode);
910	}
911
912	intel_dp_init_link_config(intel_dp);
913
914	/* Split out the IBX/CPU vs CPT settings */
915
916	if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
917		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
918			intel_dp->DP |= DP_SYNC_HS_HIGH;
919		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
920			intel_dp->DP |= DP_SYNC_VS_HIGH;
921		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
922
923		if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
924			intel_dp->DP |= DP_ENHANCED_FRAMING;
925
926		intel_dp->DP |= intel_crtc->pipe << 29;
927
928		/* don't miss out required setting for eDP */
929		if (adjusted_mode->clock < 200000)
930			intel_dp->DP |= DP_PLL_FREQ_160MHZ;
931		else
932			intel_dp->DP |= DP_PLL_FREQ_270MHZ;
933	} else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
934		intel_dp->DP |= intel_dp->color_range;
935
936		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
937			intel_dp->DP |= DP_SYNC_HS_HIGH;
938		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
939			intel_dp->DP |= DP_SYNC_VS_HIGH;
940		intel_dp->DP |= DP_LINK_TRAIN_OFF;
941
942		if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
943			intel_dp->DP |= DP_ENHANCED_FRAMING;
944
945		if (intel_crtc->pipe == 1)
946			intel_dp->DP |= DP_PIPEB_SELECT;
947
948		if (is_cpu_edp(intel_dp)) {
949			/* don't miss out required setting for eDP */
950			if (adjusted_mode->clock < 200000)
951				intel_dp->DP |= DP_PLL_FREQ_160MHZ;
952			else
953				intel_dp->DP |= DP_PLL_FREQ_270MHZ;
954		}
955	} else {
956		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
957	}
958}
959
960#define IDLE_ON_MASK		(PP_ON | 0 	  | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
961#define IDLE_ON_VALUE   	(PP_ON | 0 	  | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
962
963#define IDLE_OFF_MASK		(PP_ON | 0        | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
964#define IDLE_OFF_VALUE		(0     | 0        | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
965
966#define IDLE_CYCLE_MASK		(PP_ON | 0        | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
967#define IDLE_CYCLE_VALUE	(0     | 0        | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
968
969static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
970				       u32 mask,
971				       u32 value)
972{
973	struct drm_device *dev = intel_dp->base.base.dev;
974	struct drm_i915_private *dev_priv = dev->dev_private;
975
976	DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
977		      mask, value,
978		      I915_READ(PCH_PP_STATUS),
979		      I915_READ(PCH_PP_CONTROL));
980
981	if (_wait_for((I915_READ(PCH_PP_STATUS) & mask) == value, 5000, 10)) {
982		DRM_ERROR("Panel status timeout: status %08x control %08x\n",
983			  I915_READ(PCH_PP_STATUS),
984			  I915_READ(PCH_PP_CONTROL));
985	}
986}
987
988static void ironlake_wait_panel_on(struct intel_dp *intel_dp)
989{
990	DRM_DEBUG_KMS("Wait for panel power on\n");
991	ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
992}
993
994static void ironlake_wait_panel_off(struct intel_dp *intel_dp)
995{
996	DRM_DEBUG_KMS("Wait for panel power off time\n");
997	ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
998}
999
1000static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp)
1001{
1002	DRM_DEBUG_KMS("Wait for panel power cycle\n");
1003	ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1004}
1005
1006
1007/* Read the current pp_control value, unlocking the register if it
1008 * is locked
1009 */
1010
1011static  u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv)
1012{
1013	u32	control = I915_READ(PCH_PP_CONTROL);
1014
1015	control &= ~PANEL_UNLOCK_MASK;
1016	control |= PANEL_UNLOCK_REGS;
1017	return control;
1018}
1019
1020static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
1021{
1022	struct drm_device *dev = intel_dp->base.base.dev;
1023	struct drm_i915_private *dev_priv = dev->dev_private;
1024	u32 pp;
1025
1026	if (!is_edp(intel_dp))
1027		return;
1028	DRM_DEBUG_KMS("Turn eDP VDD on\n");
1029
1030	WARN(intel_dp->want_panel_vdd,
1031	     "eDP VDD already requested on\n");
1032
1033	intel_dp->want_panel_vdd = true;
1034
1035	if (ironlake_edp_have_panel_vdd(intel_dp)) {
1036		DRM_DEBUG_KMS("eDP VDD already on\n");
1037		return;
1038	}
1039
1040	if (!ironlake_edp_have_panel_power(intel_dp))
1041		ironlake_wait_panel_power_cycle(intel_dp);
1042
1043	pp = ironlake_get_pp_control(dev_priv);
1044	pp |= EDP_FORCE_VDD;
1045	I915_WRITE(PCH_PP_CONTROL, pp);
1046	POSTING_READ(PCH_PP_CONTROL);
1047	DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
1048		      I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL));
1049
1050	/*
1051	 * If the panel wasn't on, delay before accessing aux channel
1052	 */
1053	if (!ironlake_edp_have_panel_power(intel_dp)) {
1054		DRM_DEBUG_KMS("eDP was not running\n");
1055		msleep(intel_dp->panel_power_up_delay);
1056	}
1057}
1058
1059static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
1060{
1061	struct drm_device *dev = intel_dp->base.base.dev;
1062	struct drm_i915_private *dev_priv = dev->dev_private;
1063	u32 pp;
1064
1065	if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
1066		pp = ironlake_get_pp_control(dev_priv);
1067		pp &= ~EDP_FORCE_VDD;
1068		I915_WRITE(PCH_PP_CONTROL, pp);
1069		POSTING_READ(PCH_PP_CONTROL);
1070
1071		/* Make sure sequencer is idle before allowing subsequent activity */
1072		DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
1073			      I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL));
1074
1075		msleep(intel_dp->panel_power_down_delay);
1076	}
1077}
1078
1079static void ironlake_panel_vdd_work(struct work_struct *__work)
1080{
1081	struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1082						 struct intel_dp, panel_vdd_work);
1083	struct drm_device *dev = intel_dp->base.base.dev;
1084
1085	mutex_lock(&dev->mode_config.mutex);
1086	ironlake_panel_vdd_off_sync(intel_dp);
1087	mutex_unlock(&dev->mode_config.mutex);
1088}
1089
1090static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1091{
1092	if (!is_edp(intel_dp))
1093		return;
1094
1095	DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd);
1096	WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
1097
1098	intel_dp->want_panel_vdd = false;
1099
1100	if (sync) {
1101		ironlake_panel_vdd_off_sync(intel_dp);
1102	} else {
1103		/*
1104		 * Queue the timer to fire a long
1105		 * time from now (relative to the power down delay)
1106		 * to keep the panel power up across a sequence of operations
1107		 */
1108		schedule_delayed_work(&intel_dp->panel_vdd_work,
1109				      msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
1110	}
1111}
1112
1113static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1114{
1115	struct drm_device *dev = intel_dp->base.base.dev;
1116	struct drm_i915_private *dev_priv = dev->dev_private;
1117	u32 pp;
1118
1119	if (!is_edp(intel_dp))
1120		return;
1121
1122	DRM_DEBUG_KMS("Turn eDP power on\n");
1123
1124	if (ironlake_edp_have_panel_power(intel_dp)) {
1125		DRM_DEBUG_KMS("eDP power already on\n");
1126		return;
1127	}
1128
1129	ironlake_wait_panel_power_cycle(intel_dp);
1130
1131	pp = ironlake_get_pp_control(dev_priv);
1132	if (IS_GEN5(dev)) {
1133		/* ILK workaround: disable reset around power sequence */
1134		pp &= ~PANEL_POWER_RESET;
1135		I915_WRITE(PCH_PP_CONTROL, pp);
1136		POSTING_READ(PCH_PP_CONTROL);
1137	}
1138
1139	pp |= POWER_TARGET_ON;
1140	if (!IS_GEN5(dev))
1141		pp |= PANEL_POWER_RESET;
1142
1143	I915_WRITE(PCH_PP_CONTROL, pp);
1144	POSTING_READ(PCH_PP_CONTROL);
1145
1146	ironlake_wait_panel_on(intel_dp);
1147
1148	if (IS_GEN5(dev)) {
1149		pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1150		I915_WRITE(PCH_PP_CONTROL, pp);
1151		POSTING_READ(PCH_PP_CONTROL);
1152	}
1153}
1154
1155static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
1156{
1157	struct drm_device *dev = intel_dp->base.base.dev;
1158	struct drm_i915_private *dev_priv = dev->dev_private;
1159	u32 pp;
1160
1161	if (!is_edp(intel_dp))
1162		return;
1163
1164	DRM_DEBUG_KMS("Turn eDP power off\n");
1165
1166	WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
1167
1168	pp = ironlake_get_pp_control(dev_priv);
1169	/* We need to switch off panel power _and_ force vdd, for otherwise some
1170	 * panels get very unhappy and cease to work. */
1171	pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
1172	I915_WRITE(PCH_PP_CONTROL, pp);
1173	POSTING_READ(PCH_PP_CONTROL);
1174
1175	intel_dp->want_panel_vdd = false;
1176
1177	ironlake_wait_panel_off(intel_dp);
1178}
1179
1180static void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
1181{
1182	struct drm_device *dev = intel_dp->base.base.dev;
1183	struct drm_i915_private *dev_priv = dev->dev_private;
1184	u32 pp;
1185
1186	if (!is_edp(intel_dp))
1187		return;
1188
1189	DRM_DEBUG_KMS("\n");
1190	/*
1191	 * If we enable the backlight right away following a panel power
1192	 * on, we may see slight flicker as the panel syncs with the eDP
1193	 * link.  So delay a bit to make sure the image is solid before
1194	 * allowing it to appear.
1195	 */
1196	msleep(intel_dp->backlight_on_delay);
1197	pp = ironlake_get_pp_control(dev_priv);
1198	pp |= EDP_BLC_ENABLE;
1199	I915_WRITE(PCH_PP_CONTROL, pp);
1200	POSTING_READ(PCH_PP_CONTROL);
1201}
1202
1203static void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
1204{
1205	struct drm_device *dev = intel_dp->base.base.dev;
1206	struct drm_i915_private *dev_priv = dev->dev_private;
1207	u32 pp;
1208
1209	if (!is_edp(intel_dp))
1210		return;
1211
1212	DRM_DEBUG_KMS("\n");
1213	pp = ironlake_get_pp_control(dev_priv);
1214	pp &= ~EDP_BLC_ENABLE;
1215	I915_WRITE(PCH_PP_CONTROL, pp);
1216	POSTING_READ(PCH_PP_CONTROL);
1217	msleep(intel_dp->backlight_off_delay);
1218}
1219
1220static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
1221{
1222	struct drm_device *dev = intel_dp->base.base.dev;
1223	struct drm_crtc *crtc = intel_dp->base.base.crtc;
1224	struct drm_i915_private *dev_priv = dev->dev_private;
1225	u32 dpa_ctl;
1226
1227	assert_pipe_disabled(dev_priv,
1228			     to_intel_crtc(crtc)->pipe);
1229
1230	DRM_DEBUG_KMS("\n");
1231	dpa_ctl = I915_READ(DP_A);
1232	WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
1233	WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1234
1235	/* We don't adjust intel_dp->DP while tearing down the link, to
1236	 * facilitate link retraining (e.g. after hotplug). Hence clear all
1237	 * enable bits here to ensure that we don't enable too much. */
1238	intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
1239	intel_dp->DP |= DP_PLL_ENABLE;
1240	I915_WRITE(DP_A, intel_dp->DP);
1241	POSTING_READ(DP_A);
1242	udelay(200);
1243}
1244
1245static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
1246{
1247	struct drm_device *dev = intel_dp->base.base.dev;
1248	struct drm_crtc *crtc = intel_dp->base.base.crtc;
1249	struct drm_i915_private *dev_priv = dev->dev_private;
1250	u32 dpa_ctl;
1251
1252	assert_pipe_disabled(dev_priv,
1253			     to_intel_crtc(crtc)->pipe);
1254
1255	dpa_ctl = I915_READ(DP_A);
1256	WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
1257	     "dp pll off, should be on\n");
1258	WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1259
1260	/* We can't rely on the value tracked for the DP register in
1261	 * intel_dp->DP because link_down must not change that (otherwise link
1262	 * re-training will fail. */
1263	dpa_ctl &= ~DP_PLL_ENABLE;
1264	I915_WRITE(DP_A, dpa_ctl);
1265	POSTING_READ(DP_A);
1266	udelay(200);
1267}
1268
1269/* If the sink supports it, try to set the power state appropriately */
1270void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
1271{
1272	int ret, i;
1273
1274	/* Should have a valid DPCD by this point */
1275	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
1276		return;
1277
1278	if (mode != DRM_MODE_DPMS_ON) {
1279		ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER,
1280						  DP_SET_POWER_D3);
1281		if (ret != 1)
1282			DRM_DEBUG_DRIVER("failed to write sink power state\n");
1283	} else {
1284		/*
1285		 * When turning on, we need to retry for 1ms to give the sink
1286		 * time to wake up.
1287		 */
1288		for (i = 0; i < 3; i++) {
1289			ret = intel_dp_aux_native_write_1(intel_dp,
1290							  DP_SET_POWER,
1291							  DP_SET_POWER_D0);
1292			if (ret == 1)
1293				break;
1294			msleep(1);
1295		}
1296	}
1297}
1298
1299static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
1300				  enum pipe *pipe)
1301{
1302	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1303	struct drm_device *dev = encoder->base.dev;
1304	struct drm_i915_private *dev_priv = dev->dev_private;
1305	u32 tmp = I915_READ(intel_dp->output_reg);
1306
1307	if (!(tmp & DP_PORT_EN))
1308		return false;
1309
1310	if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
1311		*pipe = PORT_TO_PIPE_CPT(tmp);
1312	} else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
1313		*pipe = PORT_TO_PIPE(tmp);
1314	} else {
1315		u32 trans_sel;
1316		u32 trans_dp;
1317		int i;
1318
1319		switch (intel_dp->output_reg) {
1320		case PCH_DP_B:
1321			trans_sel = TRANS_DP_PORT_SEL_B;
1322			break;
1323		case PCH_DP_C:
1324			trans_sel = TRANS_DP_PORT_SEL_C;
1325			break;
1326		case PCH_DP_D:
1327			trans_sel = TRANS_DP_PORT_SEL_D;
1328			break;
1329		default:
1330			return true;
1331		}
1332
1333		for_each_pipe(i) {
1334			trans_dp = I915_READ(TRANS_DP_CTL(i));
1335			if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
1336				*pipe = i;
1337				return true;
1338			}
1339		}
1340	}
1341
1342	DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", intel_dp->output_reg);
1343
1344	return true;
1345}
1346
1347static void intel_disable_dp(struct intel_encoder *encoder)
1348{
1349	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1350
1351	/* Make sure the panel is off before trying to change the mode. But also
1352	 * ensure that we have vdd while we switch off the panel. */
1353	ironlake_edp_panel_vdd_on(intel_dp);
1354	ironlake_edp_backlight_off(intel_dp);
1355	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1356	ironlake_edp_panel_off(intel_dp);
1357
1358	/* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
1359	if (!is_cpu_edp(intel_dp))
1360		intel_dp_link_down(intel_dp);
1361}
1362
1363static void intel_post_disable_dp(struct intel_encoder *encoder)
1364{
1365	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1366
1367	if (is_cpu_edp(intel_dp)) {
1368		intel_dp_link_down(intel_dp);
1369		ironlake_edp_pll_off(intel_dp);
1370	}
1371}
1372
1373static void intel_enable_dp(struct intel_encoder *encoder)
1374{
1375	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1376	struct drm_device *dev = encoder->base.dev;
1377	struct drm_i915_private *dev_priv = dev->dev_private;
1378	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
1379
1380	if (WARN_ON(dp_reg & DP_PORT_EN))
1381		return;
1382
1383	ironlake_edp_panel_vdd_on(intel_dp);
1384	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1385	intel_dp_start_link_train(intel_dp);
1386	ironlake_edp_panel_on(intel_dp);
1387	ironlake_edp_panel_vdd_off(intel_dp, true);
1388	intel_dp_complete_link_train(intel_dp);
1389	ironlake_edp_backlight_on(intel_dp);
1390}
1391
1392static void intel_pre_enable_dp(struct intel_encoder *encoder)
1393{
1394	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1395
1396	if (is_cpu_edp(intel_dp))
1397		ironlake_edp_pll_on(intel_dp);
1398}
1399
1400/*
1401 * Native read with retry for link status and receiver capability reads for
1402 * cases where the sink may still be asleep.
1403 */
1404static bool
1405intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
1406			       uint8_t *recv, int recv_bytes)
1407{
1408	int ret, i;
1409
1410	/*
1411	 * Sinks are *supposed* to come up within 1ms from an off state,
1412	 * but we're also supposed to retry 3 times per the spec.
1413	 */
1414	for (i = 0; i < 3; i++) {
1415		ret = intel_dp_aux_native_read(intel_dp, address, recv,
1416					       recv_bytes);
1417		if (ret == recv_bytes)
1418			return true;
1419		msleep(1);
1420	}
1421
1422	return false;
1423}
1424
1425/*
1426 * Fetch AUX CH registers 0x202 - 0x207 which contain
1427 * link status information
1428 */
1429static bool
1430intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
1431{
1432	return intel_dp_aux_native_read_retry(intel_dp,
1433					      DP_LANE0_1_STATUS,
1434					      link_status,
1435					      DP_LINK_STATUS_SIZE);
1436}
1437
1438#if 0
1439static char	*voltage_names[] = {
1440	"0.4V", "0.6V", "0.8V", "1.2V"
1441};
1442static char	*pre_emph_names[] = {
1443	"0dB", "3.5dB", "6dB", "9.5dB"
1444};
1445static char	*link_train_names[] = {
1446	"pattern 1", "pattern 2", "idle", "off"
1447};
1448#endif
1449
1450/*
1451 * These are source-specific values; current Intel hardware supports
1452 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
1453 */
1454
1455static uint8_t
1456intel_dp_voltage_max(struct intel_dp *intel_dp)
1457{
1458	struct drm_device *dev = intel_dp->base.base.dev;
1459
1460	if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
1461		return DP_TRAIN_VOLTAGE_SWING_800;
1462	else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
1463		return DP_TRAIN_VOLTAGE_SWING_1200;
1464	else
1465		return DP_TRAIN_VOLTAGE_SWING_800;
1466}
1467
1468static uint8_t
1469intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
1470{
1471	struct drm_device *dev = intel_dp->base.base.dev;
1472
1473	if (IS_HASWELL(dev)) {
1474		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1475		case DP_TRAIN_VOLTAGE_SWING_400:
1476			return DP_TRAIN_PRE_EMPHASIS_9_5;
1477		case DP_TRAIN_VOLTAGE_SWING_600:
1478			return DP_TRAIN_PRE_EMPHASIS_6;
1479		case DP_TRAIN_VOLTAGE_SWING_800:
1480			return DP_TRAIN_PRE_EMPHASIS_3_5;
1481		case DP_TRAIN_VOLTAGE_SWING_1200:
1482		default:
1483			return DP_TRAIN_PRE_EMPHASIS_0;
1484		}
1485	} else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
1486		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1487		case DP_TRAIN_VOLTAGE_SWING_400:
1488			return DP_TRAIN_PRE_EMPHASIS_6;
1489		case DP_TRAIN_VOLTAGE_SWING_600:
1490		case DP_TRAIN_VOLTAGE_SWING_800:
1491			return DP_TRAIN_PRE_EMPHASIS_3_5;
1492		default:
1493			return DP_TRAIN_PRE_EMPHASIS_0;
1494		}
1495	} else {
1496		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1497		case DP_TRAIN_VOLTAGE_SWING_400:
1498			return DP_TRAIN_PRE_EMPHASIS_6;
1499		case DP_TRAIN_VOLTAGE_SWING_600:
1500			return DP_TRAIN_PRE_EMPHASIS_6;
1501		case DP_TRAIN_VOLTAGE_SWING_800:
1502			return DP_TRAIN_PRE_EMPHASIS_3_5;
1503		case DP_TRAIN_VOLTAGE_SWING_1200:
1504		default:
1505			return DP_TRAIN_PRE_EMPHASIS_0;
1506		}
1507	}
1508}
1509
1510static void
1511intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
1512{
1513	uint8_t v = 0;
1514	uint8_t p = 0;
1515	int lane;
1516	uint8_t voltage_max;
1517	uint8_t preemph_max;
1518
1519	for (lane = 0; lane < intel_dp->lane_count; lane++) {
1520		uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
1521		uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
1522
1523		if (this_v > v)
1524			v = this_v;
1525		if (this_p > p)
1526			p = this_p;
1527	}
1528
1529	voltage_max = intel_dp_voltage_max(intel_dp);
1530	if (v >= voltage_max)
1531		v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
1532
1533	preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
1534	if (p >= preemph_max)
1535		p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
1536
1537	for (lane = 0; lane < 4; lane++)
1538		intel_dp->train_set[lane] = v | p;
1539}
1540
1541static uint32_t
1542intel_dp_signal_levels(uint8_t train_set)
1543{
1544	uint32_t	signal_levels = 0;
1545
1546	switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
1547	case DP_TRAIN_VOLTAGE_SWING_400:
1548	default:
1549		signal_levels |= DP_VOLTAGE_0_4;
1550		break;
1551	case DP_TRAIN_VOLTAGE_SWING_600:
1552		signal_levels |= DP_VOLTAGE_0_6;
1553		break;
1554	case DP_TRAIN_VOLTAGE_SWING_800:
1555		signal_levels |= DP_VOLTAGE_0_8;
1556		break;
1557	case DP_TRAIN_VOLTAGE_SWING_1200:
1558		signal_levels |= DP_VOLTAGE_1_2;
1559		break;
1560	}
1561	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
1562	case DP_TRAIN_PRE_EMPHASIS_0:
1563	default:
1564		signal_levels |= DP_PRE_EMPHASIS_0;
1565		break;
1566	case DP_TRAIN_PRE_EMPHASIS_3_5:
1567		signal_levels |= DP_PRE_EMPHASIS_3_5;
1568		break;
1569	case DP_TRAIN_PRE_EMPHASIS_6:
1570		signal_levels |= DP_PRE_EMPHASIS_6;
1571		break;
1572	case DP_TRAIN_PRE_EMPHASIS_9_5:
1573		signal_levels |= DP_PRE_EMPHASIS_9_5;
1574		break;
1575	}
1576	return signal_levels;
1577}
1578
1579/* Gen6's DP voltage swing and pre-emphasis control */
1580static uint32_t
1581intel_gen6_edp_signal_levels(uint8_t train_set)
1582{
1583	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
1584					 DP_TRAIN_PRE_EMPHASIS_MASK);
1585	switch (signal_levels) {
1586	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
1587	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
1588		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
1589	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
1590		return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
1591	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
1592	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
1593		return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
1594	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
1595	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
1596		return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
1597	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
1598	case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
1599		return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
1600	default:
1601		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
1602			      "0x%x\n", signal_levels);
1603		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
1604	}
1605}
1606
1607/* Gen7's DP voltage swing and pre-emphasis control */
1608static uint32_t
1609intel_gen7_edp_signal_levels(uint8_t train_set)
1610{
1611	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
1612					 DP_TRAIN_PRE_EMPHASIS_MASK);
1613	switch (signal_levels) {
1614	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
1615		return EDP_LINK_TRAIN_400MV_0DB_IVB;
1616	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
1617		return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
1618	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
1619		return EDP_LINK_TRAIN_400MV_6DB_IVB;
1620
1621	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
1622		return EDP_LINK_TRAIN_600MV_0DB_IVB;
1623	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
1624		return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
1625
1626	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
1627		return EDP_LINK_TRAIN_800MV_0DB_IVB;
1628	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
1629		return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
1630
1631	default:
1632		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
1633			      "0x%x\n", signal_levels);
1634		return EDP_LINK_TRAIN_500MV_0DB_IVB;
1635	}
1636}
1637
1638/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
1639static uint32_t
1640intel_dp_signal_levels_hsw(uint8_t train_set)
1641{
1642	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
1643					 DP_TRAIN_PRE_EMPHASIS_MASK);
1644	switch (signal_levels) {
1645	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
1646		return DDI_BUF_EMP_400MV_0DB_HSW;
1647	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
1648		return DDI_BUF_EMP_400MV_3_5DB_HSW;
1649	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
1650		return DDI_BUF_EMP_400MV_6DB_HSW;
1651	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5:
1652		return DDI_BUF_EMP_400MV_9_5DB_HSW;
1653
1654	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
1655		return DDI_BUF_EMP_600MV_0DB_HSW;
1656	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
1657		return DDI_BUF_EMP_600MV_3_5DB_HSW;
1658	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
1659		return DDI_BUF_EMP_600MV_6DB_HSW;
1660
1661	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
1662		return DDI_BUF_EMP_800MV_0DB_HSW;
1663	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
1664		return DDI_BUF_EMP_800MV_3_5DB_HSW;
1665	default:
1666		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
1667			      "0x%x\n", signal_levels);
1668		return DDI_BUF_EMP_400MV_0DB_HSW;
1669	}
1670}
1671
1672static bool
1673intel_dp_set_link_train(struct intel_dp *intel_dp,
1674			uint32_t dp_reg_value,
1675			uint8_t dp_train_pat)
1676{
1677	struct drm_device *dev = intel_dp->base.base.dev;
1678	struct drm_i915_private *dev_priv = dev->dev_private;
1679	int ret;
1680	uint32_t temp;
1681
1682	if (IS_HASWELL(dev)) {
1683		temp = I915_READ(DP_TP_CTL(intel_dp->port));
1684
1685		if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
1686			temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
1687		else
1688			temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
1689
1690		temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
1691		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
1692		case DP_TRAINING_PATTERN_DISABLE:
1693			temp |= DP_TP_CTL_LINK_TRAIN_IDLE;
1694			I915_WRITE(DP_TP_CTL(intel_dp->port), temp);
1695
1696			if (wait_for((I915_READ(DP_TP_STATUS(intel_dp->port)) &
1697				      DP_TP_STATUS_IDLE_DONE), 1))
1698				DRM_ERROR("Timed out waiting for DP idle patterns\n");
1699
1700			temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
1701			temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
1702
1703			break;
1704		case DP_TRAINING_PATTERN_1:
1705			temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
1706			break;
1707		case DP_TRAINING_PATTERN_2:
1708			temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
1709			break;
1710		case DP_TRAINING_PATTERN_3:
1711			temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
1712			break;
1713		}
1714		I915_WRITE(DP_TP_CTL(intel_dp->port), temp);
1715
1716	} else if (HAS_PCH_CPT(dev) &&
1717		   (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
1718		dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
1719
1720		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
1721		case DP_TRAINING_PATTERN_DISABLE:
1722			dp_reg_value |= DP_LINK_TRAIN_OFF_CPT;
1723			break;
1724		case DP_TRAINING_PATTERN_1:
1725			dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT;
1726			break;
1727		case DP_TRAINING_PATTERN_2:
1728			dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
1729			break;
1730		case DP_TRAINING_PATTERN_3:
1731			DRM_ERROR("DP training pattern 3 not supported\n");
1732			dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
1733			break;
1734		}
1735
1736	} else {
1737		dp_reg_value &= ~DP_LINK_TRAIN_MASK;
1738
1739		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
1740		case DP_TRAINING_PATTERN_DISABLE:
1741			dp_reg_value |= DP_LINK_TRAIN_OFF;
1742			break;
1743		case DP_TRAINING_PATTERN_1:
1744			dp_reg_value |= DP_LINK_TRAIN_PAT_1;
1745			break;
1746		case DP_TRAINING_PATTERN_2:
1747			dp_reg_value |= DP_LINK_TRAIN_PAT_2;
1748			break;
1749		case DP_TRAINING_PATTERN_3:
1750			DRM_ERROR("DP training pattern 3 not supported\n");
1751			dp_reg_value |= DP_LINK_TRAIN_PAT_2;
1752			break;
1753		}
1754	}
1755
1756	I915_WRITE(intel_dp->output_reg, dp_reg_value);
1757	POSTING_READ(intel_dp->output_reg);
1758
1759	intel_dp_aux_native_write_1(intel_dp,
1760				    DP_TRAINING_PATTERN_SET,
1761				    dp_train_pat);
1762
1763	if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) !=
1764	    DP_TRAINING_PATTERN_DISABLE) {
1765		ret = intel_dp_aux_native_write(intel_dp,
1766						DP_TRAINING_LANE0_SET,
1767						intel_dp->train_set,
1768						intel_dp->lane_count);
1769		if (ret != intel_dp->lane_count)
1770			return false;
1771	}
1772
1773	return true;
1774}
1775
1776/* Enable corresponding port and start training pattern 1 */
1777void
1778intel_dp_start_link_train(struct intel_dp *intel_dp)
1779{
1780	struct drm_encoder *encoder = &intel_dp->base.base;
1781	struct drm_device *dev = encoder->dev;
1782	int i;
1783	uint8_t voltage;
1784	bool clock_recovery = false;
1785	int voltage_tries, loop_tries;
1786	uint32_t DP = intel_dp->DP;
1787
1788	if (IS_HASWELL(dev))
1789		intel_ddi_prepare_link_retrain(encoder);
1790
1791	/* Write the link configuration data */
1792	intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
1793				  intel_dp->link_configuration,
1794				  DP_LINK_CONFIGURATION_SIZE);
1795
1796	DP |= DP_PORT_EN;
1797
1798	memset(intel_dp->train_set, 0, 4);
1799	voltage = 0xff;
1800	voltage_tries = 0;
1801	loop_tries = 0;
1802	clock_recovery = false;
1803	for (;;) {
1804		/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
1805		uint8_t	    link_status[DP_LINK_STATUS_SIZE];
1806		uint32_t    signal_levels;
1807
1808		if (IS_HASWELL(dev)) {
1809			signal_levels = intel_dp_signal_levels_hsw(
1810							intel_dp->train_set[0]);
1811			DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
1812		} else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
1813			signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
1814			DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
1815		} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
1816			signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
1817			DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1818		} else {
1819			signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
1820			DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1821		}
1822		DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n",
1823			      signal_levels);
1824
1825		if (!intel_dp_set_link_train(intel_dp, DP,
1826					     DP_TRAINING_PATTERN_1 |
1827					     DP_LINK_SCRAMBLING_DISABLE))
1828			break;
1829		/* Set training pattern 1 */
1830
1831		udelay(100);
1832		if (!intel_dp_get_link_status(intel_dp, link_status)) {
1833			DRM_ERROR("failed to get link status\n");
1834			break;
1835		}
1836
1837		if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
1838			DRM_DEBUG_KMS("clock recovery OK\n");
1839			clock_recovery = true;
1840			break;
1841		}
1842
1843		/* Check to see if we've tried the max voltage */
1844		for (i = 0; i < intel_dp->lane_count; i++)
1845			if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
1846				break;
1847		if (i == intel_dp->lane_count && voltage_tries == 5) {
1848			if (++loop_tries == 5) {
1849				DRM_DEBUG_KMS("too many full retries, give up\n");
1850				break;
1851			}
1852			memset(intel_dp->train_set, 0, 4);
1853			voltage_tries = 0;
1854			continue;
1855		}
1856
1857		/* Check to see if we've tried the same voltage 5 times */
1858		if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) != voltage) {
1859			voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
1860			voltage_tries = 0;
1861		} else
1862			++voltage_tries;
1863
1864		/* Compute new intel_dp->train_set as requested by target */
1865		intel_get_adjust_train(intel_dp, link_status);
1866	}
1867
1868	intel_dp->DP = DP;
1869}
1870
1871void
1872intel_dp_complete_link_train(struct intel_dp *intel_dp)
1873{
1874	struct drm_device *dev = intel_dp->base.base.dev;
1875	bool channel_eq = false;
1876	int tries, cr_tries;
1877	uint32_t DP = intel_dp->DP;
1878
1879	/* channel equalization */
1880	tries = 0;
1881	cr_tries = 0;
1882	channel_eq = false;
1883	for (;;) {
1884		/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
1885		uint32_t    signal_levels;
1886		uint8_t	    link_status[DP_LINK_STATUS_SIZE];
1887
1888		if (cr_tries > 5) {
1889			DRM_ERROR("failed to train DP, aborting\n");
1890			intel_dp_link_down(intel_dp);
1891			break;
1892		}
1893
1894		if (IS_HASWELL(dev)) {
1895			signal_levels = intel_dp_signal_levels_hsw(intel_dp->train_set[0]);
1896			DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
1897		} else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
1898			signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
1899			DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
1900		} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
1901			signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
1902			DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1903		} else {
1904			signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
1905			DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1906		}
1907
1908		/* channel eq pattern */
1909		if (!intel_dp_set_link_train(intel_dp, DP,
1910					     DP_TRAINING_PATTERN_2 |
1911					     DP_LINK_SCRAMBLING_DISABLE))
1912			break;
1913
1914		udelay(400);
1915		if (!intel_dp_get_link_status(intel_dp, link_status))
1916			break;
1917
1918		/* Make sure clock is still ok */
1919		if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
1920			intel_dp_start_link_train(intel_dp);
1921			cr_tries++;
1922			continue;
1923		}
1924
1925		if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
1926			channel_eq = true;
1927			break;
1928		}
1929
1930		/* Try 5 times, then try clock recovery if that fails */
1931		if (tries > 5) {
1932			intel_dp_link_down(intel_dp);
1933			intel_dp_start_link_train(intel_dp);
1934			tries = 0;
1935			cr_tries++;
1936			continue;
1937		}
1938
1939		/* Compute new intel_dp->train_set as requested by target */
1940		intel_get_adjust_train(intel_dp, link_status);
1941		++tries;
1942	}
1943
1944	if (channel_eq)
1945		DRM_DEBUG_KMS("Channel EQ done. DP Training successfull\n");
1946
1947	intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE);
1948}
1949
1950static void
1951intel_dp_link_down(struct intel_dp *intel_dp)
1952{
1953	struct drm_device *dev = intel_dp->base.base.dev;
1954	struct drm_i915_private *dev_priv = dev->dev_private;
1955	uint32_t DP = intel_dp->DP;
1956
1957	/*
1958	 * DDI code has a strict mode set sequence and we should try to respect
1959	 * it, otherwise we might hang the machine in many different ways. So we
1960	 * really should be disabling the port only on a complete crtc_disable
1961	 * sequence. This function is just called under two conditions on DDI
1962	 * code:
1963	 * - Link train failed while doing crtc_enable, and on this case we
1964	 *   really should respect the mode set sequence and wait for a
1965	 *   crtc_disable.
1966	 * - Someone turned the monitor off and intel_dp_check_link_status
1967	 *   called us. We don't need to disable the whole port on this case, so
1968	 *   when someone turns the monitor on again,
1969	 *   intel_ddi_prepare_link_retrain will take care of redoing the link
1970	 *   train.
1971	 */
1972	if (IS_HASWELL(dev))
1973		return;
1974
1975	if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1976		return;
1977
1978	DRM_DEBUG_KMS("\n");
1979
1980	if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
1981		DP &= ~DP_LINK_TRAIN_MASK_CPT;
1982		I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
1983	} else {
1984		DP &= ~DP_LINK_TRAIN_MASK;
1985		I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
1986	}
1987	POSTING_READ(intel_dp->output_reg);
1988
1989	msleep(17);
1990
1991	if (HAS_PCH_IBX(dev) &&
1992	    I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
1993		struct drm_crtc *crtc = intel_dp->base.base.crtc;
1994
1995		/* Hardware workaround: leaving our transcoder select
1996		 * set to transcoder B while it's off will prevent the
1997		 * corresponding HDMI output on transcoder A.
1998		 *
1999		 * Combine this with another hardware workaround:
2000		 * transcoder select bit can only be cleared while the
2001		 * port is enabled.
2002		 */
2003		DP &= ~DP_PIPEB_SELECT;
2004		I915_WRITE(intel_dp->output_reg, DP);
2005
2006		/* Changes to enable or select take place the vblank
2007		 * after being written.
2008		 */
2009		if (crtc == NULL) {
2010			/* We can arrive here never having been attached
2011			 * to a CRTC, for instance, due to inheriting
2012			 * random state from the BIOS.
2013			 *
2014			 * If the pipe is not running, play safe and
2015			 * wait for the clocks to stabilise before
2016			 * continuing.
2017			 */
2018			POSTING_READ(intel_dp->output_reg);
2019			msleep(50);
2020		} else
2021			intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
2022	}
2023
2024	DP &= ~DP_AUDIO_OUTPUT_ENABLE;
2025	I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
2026	POSTING_READ(intel_dp->output_reg);
2027	msleep(intel_dp->panel_power_down_delay);
2028}
2029
2030static bool
2031intel_dp_get_dpcd(struct intel_dp *intel_dp)
2032{
2033	if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
2034					   sizeof(intel_dp->dpcd)) == 0)
2035		return false; /* aux transfer failed */
2036
2037	if (intel_dp->dpcd[DP_DPCD_REV] == 0)
2038		return false; /* DPCD not present */
2039
2040	if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
2041	      DP_DWN_STRM_PORT_PRESENT))
2042		return true; /* native DP sink */
2043
2044	if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
2045		return true; /* no per-port downstream info */
2046
2047	if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0,
2048					   intel_dp->downstream_ports,
2049					   DP_MAX_DOWNSTREAM_PORTS) == 0)
2050		return false; /* downstream port status fetch failed */
2051
2052	return true;
2053}
2054
2055static void
2056intel_dp_probe_oui(struct intel_dp *intel_dp)
2057{
2058	u8 buf[3];
2059
2060	if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
2061		return;
2062
2063	ironlake_edp_panel_vdd_on(intel_dp);
2064
2065	if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
2066		DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
2067			      buf[0], buf[1], buf[2]);
2068
2069	if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3))
2070		DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
2071			      buf[0], buf[1], buf[2]);
2072
2073	ironlake_edp_panel_vdd_off(intel_dp, false);
2074}
2075
2076static bool
2077intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
2078{
2079	int ret;
2080
2081	ret = intel_dp_aux_native_read_retry(intel_dp,
2082					     DP_DEVICE_SERVICE_IRQ_VECTOR,
2083					     sink_irq_vector, 1);
2084	if (!ret)
2085		return false;
2086
2087	return true;
2088}
2089
2090static void
2091intel_dp_handle_test_request(struct intel_dp *intel_dp)
2092{
2093	/* NAK by default */
2094	intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_ACK);
2095}
2096
2097/*
2098 * According to DP spec
2099 * 5.1.2:
2100 *  1. Read DPCD
2101 *  2. Configure link according to Receiver Capabilities
2102 *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
2103 *  4. Check link status on receipt of hot-plug interrupt
2104 */
2105
2106static void
2107intel_dp_check_link_status(struct intel_dp *intel_dp)
2108{
2109	u8 sink_irq_vector;
2110	u8 link_status[DP_LINK_STATUS_SIZE];
2111
2112	if (!intel_dp->base.connectors_active)
2113		return;
2114
2115	if (WARN_ON(!intel_dp->base.base.crtc))
2116		return;
2117
2118	/* Try to read receiver status if the link appears to be up */
2119	if (!intel_dp_get_link_status(intel_dp, link_status)) {
2120		intel_dp_link_down(intel_dp);
2121		return;
2122	}
2123
2124	/* Now read the DPCD to see if it's actually running */
2125	if (!intel_dp_get_dpcd(intel_dp)) {
2126		intel_dp_link_down(intel_dp);
2127		return;
2128	}
2129
2130	/* Try to read the source of the interrupt */
2131	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
2132	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
2133		/* Clear interrupt source */
2134		intel_dp_aux_native_write_1(intel_dp,
2135					    DP_DEVICE_SERVICE_IRQ_VECTOR,
2136					    sink_irq_vector);
2137
2138		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
2139			intel_dp_handle_test_request(intel_dp);
2140		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
2141			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
2142	}
2143
2144	if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
2145		DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
2146			      drm_get_encoder_name(&intel_dp->base.base));
2147		intel_dp_start_link_train(intel_dp);
2148		intel_dp_complete_link_train(intel_dp);
2149	}
2150}
2151
2152/* XXX this is probably wrong for multiple downstream ports */
2153static enum drm_connector_status
2154intel_dp_detect_dpcd(struct intel_dp *intel_dp)
2155{
2156	uint8_t *dpcd = intel_dp->dpcd;
2157	bool hpd;
2158	uint8_t type;
2159
2160	if (!intel_dp_get_dpcd(intel_dp))
2161		return connector_status_disconnected;
2162
2163	/* if there's no downstream port, we're done */
2164	if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
2165		return connector_status_connected;
2166
2167	/* If we're HPD-aware, SINK_COUNT changes dynamically */
2168	hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD);
2169	if (hpd) {
2170		uint8_t reg;
2171		if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT,
2172						    &reg, 1))
2173			return connector_status_unknown;
2174		return DP_GET_SINK_COUNT(reg) ? connector_status_connected
2175					      : connector_status_disconnected;
2176	}
2177
2178	/* If no HPD, poke DDC gently */
2179	if (drm_probe_ddc(&intel_dp->adapter))
2180		return connector_status_connected;
2181
2182	/* Well we tried, say unknown for unreliable port types */
2183	type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
2184	if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID)
2185		return connector_status_unknown;
2186
2187	/* Anything else is out of spec, warn and ignore */
2188	DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
2189	return connector_status_disconnected;
2190}
2191
2192static enum drm_connector_status
2193ironlake_dp_detect(struct intel_dp *intel_dp)
2194{
2195	enum drm_connector_status status;
2196
2197	/* Can't disconnect eDP, but you can close the lid... */
2198	if (is_edp(intel_dp)) {
2199		status = intel_panel_detect(intel_dp->base.base.dev);
2200		if (status == connector_status_unknown)
2201			status = connector_status_connected;
2202		return status;
2203	}
2204
2205	return intel_dp_detect_dpcd(intel_dp);
2206}
2207
2208static enum drm_connector_status
2209g4x_dp_detect(struct intel_dp *intel_dp)
2210{
2211	struct drm_device *dev = intel_dp->base.base.dev;
2212	struct drm_i915_private *dev_priv = dev->dev_private;
2213	uint32_t bit;
2214
2215	switch (intel_dp->output_reg) {
2216	case DP_B:
2217		bit = DPB_HOTPLUG_LIVE_STATUS;
2218		break;
2219	case DP_C:
2220		bit = DPC_HOTPLUG_LIVE_STATUS;
2221		break;
2222	case DP_D:
2223		bit = DPD_HOTPLUG_LIVE_STATUS;
2224		break;
2225	default:
2226		return connector_status_unknown;
2227	}
2228
2229	if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
2230		return connector_status_disconnected;
2231
2232	return intel_dp_detect_dpcd(intel_dp);
2233}
2234
2235static struct edid *
2236intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
2237{
2238	struct intel_connector *intel_connector = to_intel_connector(connector);
2239
2240	/* use cached edid if we have one */
2241	if (intel_connector->edid) {
2242		struct edid *edid;
2243		int size;
2244
2245		/* invalid edid */
2246		if (IS_ERR(intel_connector->edid))
2247			return NULL;
2248
2249		size = (intel_connector->edid->extensions + 1) * EDID_LENGTH;
2250		edid = kmalloc(size, GFP_KERNEL);
2251		if (!edid)
2252			return NULL;
2253
2254		memcpy(edid, intel_connector->edid, size);
2255		return edid;
2256	}
2257
2258	return drm_get_edid(connector, adapter);
2259}
2260
2261static int
2262intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter)
2263{
2264	struct intel_connector *intel_connector = to_intel_connector(connector);
2265
2266	/* use cached edid if we have one */
2267	if (intel_connector->edid) {
2268		/* invalid edid */
2269		if (IS_ERR(intel_connector->edid))
2270			return 0;
2271
2272		return intel_connector_update_modes(connector,
2273						    intel_connector->edid);
2274	}
2275
2276	return intel_ddc_get_modes(connector, adapter);
2277}
2278
2279
2280/**
2281 * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
2282 *
2283 * \return true if DP port is connected.
2284 * \return false if DP port is disconnected.
2285 */
2286static enum drm_connector_status
2287intel_dp_detect(struct drm_connector *connector, bool force)
2288{
2289	struct intel_dp *intel_dp = intel_attached_dp(connector);
2290	struct drm_device *dev = intel_dp->base.base.dev;
2291	enum drm_connector_status status;
2292	struct edid *edid = NULL;
2293
2294	intel_dp->has_audio = false;
2295
2296	if (HAS_PCH_SPLIT(dev))
2297		status = ironlake_dp_detect(intel_dp);
2298	else
2299		status = g4x_dp_detect(intel_dp);
2300
2301	DRM_DEBUG_KMS("DPCD: %02hx%02hx%02hx%02hx%02hx%02hx%02hx%02hx\n",
2302		      intel_dp->dpcd[0], intel_dp->dpcd[1], intel_dp->dpcd[2],
2303		      intel_dp->dpcd[3], intel_dp->dpcd[4], intel_dp->dpcd[5],
2304		      intel_dp->dpcd[6], intel_dp->dpcd[7]);
2305
2306	if (status != connector_status_connected)
2307		return status;
2308
2309	intel_dp_probe_oui(intel_dp);
2310
2311	if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
2312		intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
2313	} else {
2314		edid = intel_dp_get_edid(connector, &intel_dp->adapter);
2315		if (edid) {
2316			intel_dp->has_audio = drm_detect_monitor_audio(edid);
2317			kfree(edid);
2318		}
2319	}
2320
2321	return connector_status_connected;
2322}
2323
2324static int intel_dp_get_modes(struct drm_connector *connector)
2325{
2326	struct intel_dp *intel_dp = intel_attached_dp(connector);
2327	struct intel_connector *intel_connector = to_intel_connector(connector);
2328	struct drm_device *dev = intel_dp->base.base.dev;
2329	int ret;
2330
2331	/* We should parse the EDID data and find out if it has an audio sink
2332	 */
2333
2334	ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter);
2335	if (ret)
2336		return ret;
2337
2338	/* if eDP has no EDID, fall back to fixed mode */
2339	if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
2340		struct drm_display_mode *mode;
2341		mode = drm_mode_duplicate(dev,
2342					  intel_connector->panel.fixed_mode);
2343		if (mode) {
2344			drm_mode_probed_add(connector, mode);
2345			return 1;
2346		}
2347	}
2348	return 0;
2349}
2350
2351static bool
2352intel_dp_detect_audio(struct drm_connector *connector)
2353{
2354	struct intel_dp *intel_dp = intel_attached_dp(connector);
2355	struct edid *edid;
2356	bool has_audio = false;
2357
2358	edid = intel_dp_get_edid(connector, &intel_dp->adapter);
2359	if (edid) {
2360		has_audio = drm_detect_monitor_audio(edid);
2361		kfree(edid);
2362	}
2363
2364	return has_audio;
2365}
2366
2367static int
2368intel_dp_set_property(struct drm_connector *connector,
2369		      struct drm_property *property,
2370		      uint64_t val)
2371{
2372	struct drm_i915_private *dev_priv = connector->dev->dev_private;
2373	struct intel_dp *intel_dp = intel_attached_dp(connector);
2374	int ret;
2375
2376	ret = drm_connector_property_set_value(connector, property, val);
2377	if (ret)
2378		return ret;
2379
2380	if (property == dev_priv->force_audio_property) {
2381		int i = val;
2382		bool has_audio;
2383
2384		if (i == intel_dp->force_audio)
2385			return 0;
2386
2387		intel_dp->force_audio = i;
2388
2389		if (i == HDMI_AUDIO_AUTO)
2390			has_audio = intel_dp_detect_audio(connector);
2391		else
2392			has_audio = (i == HDMI_AUDIO_ON);
2393
2394		if (has_audio == intel_dp->has_audio)
2395			return 0;
2396
2397		intel_dp->has_audio = has_audio;
2398		goto done;
2399	}
2400
2401	if (property == dev_priv->broadcast_rgb_property) {
2402		if (val == !!intel_dp->color_range)
2403			return 0;
2404
2405		intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0;
2406		goto done;
2407	}
2408
2409	return -EINVAL;
2410
2411done:
2412	if (intel_dp->base.base.crtc) {
2413		struct drm_crtc *crtc = intel_dp->base.base.crtc;
2414		intel_set_mode(crtc, &crtc->mode,
2415			       crtc->x, crtc->y, crtc->fb);
2416	}
2417
2418	return 0;
2419}
2420
2421static void
2422intel_dp_destroy(struct drm_connector *connector)
2423{
2424	struct drm_device *dev = connector->dev;
2425	struct intel_dp *intel_dp = intel_attached_dp(connector);
2426	struct intel_connector *intel_connector = to_intel_connector(connector);
2427
2428	if (!IS_ERR_OR_NULL(intel_connector->edid))
2429		kfree(intel_connector->edid);
2430
2431	if (is_edp(intel_dp)) {
2432		intel_panel_destroy_backlight(dev);
2433		intel_panel_fini(&intel_connector->panel);
2434	}
2435
2436	drm_sysfs_connector_remove(connector);
2437	drm_connector_cleanup(connector);
2438	kfree(connector);
2439}
2440
2441static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
2442{
2443	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2444
2445	i2c_del_adapter(&intel_dp->adapter);
2446	drm_encoder_cleanup(encoder);
2447	if (is_edp(intel_dp)) {
2448		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
2449		ironlake_panel_vdd_off_sync(intel_dp);
2450	}
2451	kfree(intel_dp);
2452}
2453
2454static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
2455	.mode_fixup = intel_dp_mode_fixup,
2456	.mode_set = intel_dp_mode_set,
2457	.disable = intel_encoder_noop,
2458};
2459
2460static const struct drm_encoder_helper_funcs intel_dp_helper_funcs_hsw = {
2461	.mode_fixup = intel_dp_mode_fixup,
2462	.mode_set = intel_ddi_mode_set,
2463	.disable = intel_encoder_noop,
2464};
2465
2466static const struct drm_connector_funcs intel_dp_connector_funcs = {
2467	.dpms = intel_connector_dpms,
2468	.detect = intel_dp_detect,
2469	.fill_modes = drm_helper_probe_single_connector_modes,
2470	.set_property = intel_dp_set_property,
2471	.destroy = intel_dp_destroy,
2472};
2473
2474static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
2475	.get_modes = intel_dp_get_modes,
2476	.mode_valid = intel_dp_mode_valid,
2477	.best_encoder = intel_best_encoder,
2478};
2479
2480static const struct drm_encoder_funcs intel_dp_enc_funcs = {
2481	.destroy = intel_dp_encoder_destroy,
2482};
2483
2484static void
2485intel_dp_hot_plug(struct intel_encoder *intel_encoder)
2486{
2487	struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
2488
2489	intel_dp_check_link_status(intel_dp);
2490}
2491
2492/* Return which DP Port should be selected for Transcoder DP control */
2493int
2494intel_trans_dp_port_sel(struct drm_crtc *crtc)
2495{
2496	struct drm_device *dev = crtc->dev;
2497	struct intel_encoder *encoder;
2498
2499	for_each_encoder_on_crtc(dev, crtc, encoder) {
2500		struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2501
2502		if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
2503		    intel_dp->base.type == INTEL_OUTPUT_EDP)
2504			return intel_dp->output_reg;
2505	}
2506
2507	return -1;
2508}
2509
2510/* check the VBT to see whether the eDP is on DP-D port */
2511bool intel_dpd_is_edp(struct drm_device *dev)
2512{
2513	struct drm_i915_private *dev_priv = dev->dev_private;
2514	struct child_device_config *p_child;
2515	int i;
2516
2517	if (!dev_priv->child_dev_num)
2518		return false;
2519
2520	for (i = 0; i < dev_priv->child_dev_num; i++) {
2521		p_child = dev_priv->child_dev + i;
2522
2523		if (p_child->dvo_port == PORT_IDPD &&
2524		    p_child->device_type == DEVICE_TYPE_eDP)
2525			return true;
2526	}
2527	return false;
2528}
2529
2530static void
2531intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
2532{
2533	intel_attach_force_audio_property(connector);
2534	intel_attach_broadcast_rgb_property(connector);
2535}
2536
2537void
2538intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
2539{
2540	struct drm_i915_private *dev_priv = dev->dev_private;
2541	struct drm_connector *connector;
2542	struct intel_dp *intel_dp;
2543	struct intel_encoder *intel_encoder;
2544	struct intel_connector *intel_connector;
2545	struct drm_display_mode *fixed_mode = NULL;
2546	const char *name = NULL;
2547	int type;
2548
2549	intel_dp = kzalloc(sizeof(struct intel_dp), GFP_KERNEL);
2550	if (!intel_dp)
2551		return;
2552
2553	intel_dp->output_reg = output_reg;
2554	intel_dp->port = port;
2555	/* Preserve the current hw state. */
2556	intel_dp->DP = I915_READ(intel_dp->output_reg);
2557
2558	intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
2559	if (!intel_connector) {
2560		kfree(intel_dp);
2561		return;
2562	}
2563	intel_encoder = &intel_dp->base;
2564	intel_dp->attached_connector = intel_connector;
2565
2566	if (HAS_PCH_SPLIT(dev) && output_reg == PCH_DP_D)
2567		if (intel_dpd_is_edp(dev))
2568			intel_dp->is_pch_edp = true;
2569
2570	/*
2571	 * FIXME : We need to initialize built-in panels before external panels.
2572	 * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup
2573	 */
2574	if (IS_VALLEYVIEW(dev) && output_reg == DP_C) {
2575		type = DRM_MODE_CONNECTOR_eDP;
2576		intel_encoder->type = INTEL_OUTPUT_EDP;
2577	} else if (output_reg == DP_A || is_pch_edp(intel_dp)) {
2578		type = DRM_MODE_CONNECTOR_eDP;
2579		intel_encoder->type = INTEL_OUTPUT_EDP;
2580	} else {
2581		type = DRM_MODE_CONNECTOR_DisplayPort;
2582		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
2583	}
2584
2585	connector = &intel_connector->base;
2586	drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
2587	drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
2588
2589	connector->polled = DRM_CONNECTOR_POLL_HPD;
2590
2591	intel_encoder->cloneable = false;
2592
2593	INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
2594			  ironlake_panel_vdd_work);
2595
2596	intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
2597
2598	connector->interlace_allowed = true;
2599	connector->doublescan_allowed = 0;
2600
2601	drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
2602			 DRM_MODE_ENCODER_TMDS);
2603
2604	if (IS_HASWELL(dev))
2605		drm_encoder_helper_add(&intel_encoder->base,
2606				       &intel_dp_helper_funcs_hsw);
2607	else
2608		drm_encoder_helper_add(&intel_encoder->base,
2609				       &intel_dp_helper_funcs);
2610
2611	intel_connector_attach_encoder(intel_connector, intel_encoder);
2612	drm_sysfs_connector_add(connector);
2613
2614	if (IS_HASWELL(dev)) {
2615		intel_encoder->enable = intel_enable_ddi;
2616		intel_encoder->pre_enable = intel_ddi_pre_enable;
2617		intel_encoder->disable = intel_disable_ddi;
2618		intel_encoder->post_disable = intel_ddi_post_disable;
2619		intel_encoder->get_hw_state = intel_ddi_get_hw_state;
2620	} else {
2621		intel_encoder->enable = intel_enable_dp;
2622		intel_encoder->pre_enable = intel_pre_enable_dp;
2623		intel_encoder->disable = intel_disable_dp;
2624		intel_encoder->post_disable = intel_post_disable_dp;
2625		intel_encoder->get_hw_state = intel_dp_get_hw_state;
2626	}
2627	intel_connector->get_hw_state = intel_connector_get_hw_state;
2628
2629	/* Set up the DDC bus. */
2630	switch (port) {
2631	case PORT_A:
2632		name = "DPDDC-A";
2633		break;
2634	case PORT_B:
2635		dev_priv->hotplug_supported_mask |= DPB_HOTPLUG_INT_STATUS;
2636		name = "DPDDC-B";
2637		break;
2638	case PORT_C:
2639		dev_priv->hotplug_supported_mask |= DPC_HOTPLUG_INT_STATUS;
2640		name = "DPDDC-C";
2641		break;
2642	case PORT_D:
2643		dev_priv->hotplug_supported_mask |= DPD_HOTPLUG_INT_STATUS;
2644		name = "DPDDC-D";
2645		break;
2646	default:
2647		WARN(1, "Invalid port %c\n", port_name(port));
2648		break;
2649	}
2650
2651	/* Cache some DPCD data in the eDP case */
2652	if (is_edp(intel_dp)) {
2653		struct edp_power_seq	cur, vbt;
2654		u32 pp_on, pp_off, pp_div;
2655
2656		pp_on = I915_READ(PCH_PP_ON_DELAYS);
2657		pp_off = I915_READ(PCH_PP_OFF_DELAYS);
2658		pp_div = I915_READ(PCH_PP_DIVISOR);
2659
2660		if (!pp_on || !pp_off || !pp_div) {
2661			DRM_INFO("bad panel power sequencing delays, disabling panel\n");
2662			intel_dp_encoder_destroy(&intel_dp->base.base);
2663			intel_dp_destroy(&intel_connector->base);
2664			return;
2665		}
2666
2667		/* Pull timing values out of registers */
2668		cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
2669			PANEL_POWER_UP_DELAY_SHIFT;
2670
2671		cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
2672			PANEL_LIGHT_ON_DELAY_SHIFT;
2673
2674		cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
2675			PANEL_LIGHT_OFF_DELAY_SHIFT;
2676
2677		cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
2678			PANEL_POWER_DOWN_DELAY_SHIFT;
2679
2680		cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
2681			       PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
2682
2683		DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
2684			      cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
2685
2686		vbt = dev_priv->edp.pps;
2687
2688		DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
2689			      vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
2690
2691#define get_delay(field)	((max(cur.field, vbt.field) + 9) / 10)
2692
2693		intel_dp->panel_power_up_delay = get_delay(t1_t3);
2694		intel_dp->backlight_on_delay = get_delay(t8);
2695		intel_dp->backlight_off_delay = get_delay(t9);
2696		intel_dp->panel_power_down_delay = get_delay(t10);
2697		intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
2698
2699		DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
2700			      intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
2701			      intel_dp->panel_power_cycle_delay);
2702
2703		DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
2704			      intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
2705	}
2706
2707	intel_dp_i2c_init(intel_dp, intel_connector, name);
2708
2709	if (is_edp(intel_dp)) {
2710		bool ret;
2711		struct drm_display_mode *scan;
2712		struct edid *edid;
2713
2714		ironlake_edp_panel_vdd_on(intel_dp);
2715		ret = intel_dp_get_dpcd(intel_dp);
2716		ironlake_edp_panel_vdd_off(intel_dp, false);
2717
2718		if (ret) {
2719			if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
2720				dev_priv->no_aux_handshake =
2721					intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
2722					DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
2723		} else {
2724			/* if this fails, presume the device is a ghost */
2725			DRM_INFO("failed to retrieve link info, disabling eDP\n");
2726			intel_dp_encoder_destroy(&intel_dp->base.base);
2727			intel_dp_destroy(&intel_connector->base);
2728			return;
2729		}
2730
2731		ironlake_edp_panel_vdd_on(intel_dp);
2732		edid = drm_get_edid(connector, &intel_dp->adapter);
2733		if (edid) {
2734			if (drm_add_edid_modes(connector, edid)) {
2735				drm_mode_connector_update_edid_property(connector, edid);
2736				drm_edid_to_eld(connector, edid);
2737			} else {
2738				kfree(edid);
2739				edid = ERR_PTR(-EINVAL);
2740			}
2741		} else {
2742			edid = ERR_PTR(-ENOENT);
2743		}
2744		intel_connector->edid = edid;
2745
2746		/* prefer fixed mode from EDID if available */
2747		list_for_each_entry(scan, &connector->probed_modes, head) {
2748			if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
2749				fixed_mode = drm_mode_duplicate(dev, scan);
2750				break;
2751			}
2752		}
2753
2754		/* fallback to VBT if available for eDP */
2755		if (!fixed_mode && dev_priv->lfp_lvds_vbt_mode) {
2756			fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
2757			if (fixed_mode)
2758				fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
2759		}
2760
2761		ironlake_edp_panel_vdd_off(intel_dp, false);
2762	}
2763
2764	intel_encoder->hot_plug = intel_dp_hot_plug;
2765
2766	if (is_edp(intel_dp)) {
2767		intel_panel_init(&intel_connector->panel, fixed_mode);
2768		intel_panel_setup_backlight(connector);
2769	}
2770
2771	intel_dp_add_properties(intel_dp, connector);
2772
2773	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
2774	 * 0xd.  Failure to do so will result in spurious interrupts being
2775	 * generated on the port when a cable is not attached.
2776	 */
2777	if (IS_G4X(dev) && !IS_GM45(dev)) {
2778		u32 temp = I915_READ(PEG_BAND_GAP_DATA);
2779		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
2780	}
2781}
2782