amd-xgbe-phy.c revision 5c10e5cb0fbdde6cc79ca406b8bdcb05aa0c9489
1/*
2 * AMD 10Gb Ethernet PHY driver
3 *
4 * This file is available to you under your choice of the following two
5 * licenses:
6 *
7 * License 1: GPLv2
8 *
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
10 *
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
23 *
24 *
25 * License 2: Modified BSD
26 *
27 * Copyright (c) 2014 Advanced Micro Devices, Inc.
28 * All rights reserved.
29 *
30 * Redistribution and use in source and binary forms, with or without
31 * modification, are permitted provided that the following conditions are met:
32 *     * Redistributions of source code must retain the above copyright
33 *       notice, this list of conditions and the following disclaimer.
34 *     * Redistributions in binary form must reproduce the above copyright
35 *       notice, this list of conditions and the following disclaimer in the
36 *       documentation and/or other materials provided with the distribution.
37 *     * Neither the name of Advanced Micro Devices, Inc. nor the
38 *       names of its contributors may be used to endorse or promote products
39 *       derived from this software without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
42 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
45 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
46 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
47 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
48 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
49 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
50 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51 */
52
53#include <linux/kernel.h>
54#include <linux/device.h>
55#include <linux/platform_device.h>
56#include <linux/string.h>
57#include <linux/errno.h>
58#include <linux/unistd.h>
59#include <linux/slab.h>
60#include <linux/interrupt.h>
61#include <linux/init.h>
62#include <linux/delay.h>
63#include <linux/netdevice.h>
64#include <linux/etherdevice.h>
65#include <linux/skbuff.h>
66#include <linux/mm.h>
67#include <linux/module.h>
68#include <linux/mii.h>
69#include <linux/ethtool.h>
70#include <linux/phy.h>
71#include <linux/mdio.h>
72#include <linux/io.h>
73#include <linux/of.h>
74#include <linux/of_platform.h>
75#include <linux/of_device.h>
76#include <linux/uaccess.h>
77
78
79MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
80MODULE_LICENSE("Dual BSD/GPL");
81MODULE_VERSION("1.0.0-a");
82MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
83
84#define XGBE_PHY_ID	0x000162d0
85#define XGBE_PHY_MASK	0xfffffff0
86
87#define XGBE_PHY_SPEEDSET_PROPERTY	"amd,speed-set"
88
89#define XGBE_AN_INT_CMPLT		0x01
90#define XGBE_AN_INC_LINK		0x02
91#define XGBE_AN_PG_RCV			0x04
92
93#define XNP_MCF_NULL_MESSAGE		0x001
94#define XNP_ACK_PROCESSED		(1 << 12)
95#define XNP_MP_FORMATTED		(1 << 13)
96#define XNP_NP_EXCHANGE			(1 << 15)
97
98#define XGBE_PHY_RATECHANGE_COUNT	100
99
100#ifndef MDIO_PMA_10GBR_PMD_CTRL
101#define MDIO_PMA_10GBR_PMD_CTRL		0x0096
102#endif
103#ifndef MDIO_PMA_10GBR_FEC_CTRL
104#define MDIO_PMA_10GBR_FEC_CTRL		0x00ab
105#endif
106#ifndef MDIO_AN_XNP
107#define MDIO_AN_XNP			0x0016
108#endif
109
110#ifndef MDIO_AN_INTMASK
111#define MDIO_AN_INTMASK			0x8001
112#endif
113#ifndef MDIO_AN_INT
114#define MDIO_AN_INT			0x8002
115#endif
116
117#ifndef MDIO_CTRL1_SPEED1G
118#define MDIO_CTRL1_SPEED1G		(MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
119#endif
120
121/* SerDes integration register offsets */
122#define SIR0_KR_RT_1			0x002c
123#define SIR0_STATUS			0x0040
124#define SIR1_SPEED			0x0000
125
126/* SerDes integration register entry bit positions and sizes */
127#define SIR0_KR_RT_1_RESET_INDEX	11
128#define SIR0_KR_RT_1_RESET_WIDTH	1
129#define SIR0_STATUS_RX_READY_INDEX	0
130#define SIR0_STATUS_RX_READY_WIDTH	1
131#define SIR0_STATUS_TX_READY_INDEX	8
132#define SIR0_STATUS_TX_READY_WIDTH	1
133#define SIR1_SPEED_DATARATE_INDEX	4
134#define SIR1_SPEED_DATARATE_WIDTH	2
135#define SIR1_SPEED_PI_SPD_SEL_INDEX	12
136#define SIR1_SPEED_PI_SPD_SEL_WIDTH	4
137#define SIR1_SPEED_PLLSEL_INDEX		3
138#define SIR1_SPEED_PLLSEL_WIDTH		1
139#define SIR1_SPEED_RATECHANGE_INDEX	6
140#define SIR1_SPEED_RATECHANGE_WIDTH	1
141#define SIR1_SPEED_TXAMP_INDEX		8
142#define SIR1_SPEED_TXAMP_WIDTH		4
143#define SIR1_SPEED_WORDMODE_INDEX	0
144#define SIR1_SPEED_WORDMODE_WIDTH	3
145
146#define SPEED_10000_CDR			0x7
147#define SPEED_10000_PLL			0x1
148#define SPEED_10000_RATE		0x0
149#define SPEED_10000_TXAMP		0xa
150#define SPEED_10000_WORD		0x7
151
152#define SPEED_2500_CDR			0x2
153#define SPEED_2500_PLL			0x0
154#define SPEED_2500_RATE			0x1
155#define SPEED_2500_TXAMP		0xf
156#define SPEED_2500_WORD			0x1
157
158#define SPEED_1000_CDR			0x2
159#define SPEED_1000_PLL			0x0
160#define SPEED_1000_RATE			0x3
161#define SPEED_1000_TXAMP		0xf
162#define SPEED_1000_WORD			0x1
163
164
165/* SerDes RxTx register offsets */
166#define RXTX_REG20			0x0050
167#define RXTX_REG114			0x01c8
168
169/* SerDes RxTx register entry bit positions and sizes */
170#define RXTX_REG20_BLWC_ENA_INDEX	2
171#define RXTX_REG20_BLWC_ENA_WIDTH	1
172#define RXTX_REG114_PQ_REG_INDEX	9
173#define RXTX_REG114_PQ_REG_WIDTH	7
174
175#define RXTX_10000_BLWC			0
176#define RXTX_10000_PQ			0x1e
177
178#define RXTX_2500_BLWC			1
179#define RXTX_2500_PQ			0xa
180
181#define RXTX_1000_BLWC			1
182#define RXTX_1000_PQ			0xa
183
184/* Bit setting and getting macros
185 *  The get macro will extract the current bit field value from within
186 *  the variable
187 *
188 *  The set macro will clear the current bit field value within the
189 *  variable and then set the bit field of the variable to the
190 *  specified value
191 */
192#define GET_BITS(_var, _index, _width)					\
193	(((_var) >> (_index)) & ((0x1 << (_width)) - 1))
194
195#define SET_BITS(_var, _index, _width, _val)				\
196do {									\
197	(_var) &= ~(((0x1 << (_width)) - 1) << (_index));		\
198	(_var) |= (((_val) & ((0x1 << (_width)) - 1)) << (_index));	\
199} while (0)
200
201#define XSIR_GET_BITS(_var, _prefix, _field)				\
202	GET_BITS((_var),						\
203		 _prefix##_##_field##_INDEX,				\
204		 _prefix##_##_field##_WIDTH)
205
206#define XSIR_SET_BITS(_var, _prefix, _field, _val)			\
207	SET_BITS((_var),						\
208		 _prefix##_##_field##_INDEX,				\
209		 _prefix##_##_field##_WIDTH, (_val))
210
211/* Macros for reading or writing SerDes integration registers
212 *  The ioread macros will get bit fields or full values using the
213 *  register definitions formed using the input names
214 *
215 *  The iowrite macros will set bit fields or full values using the
216 *  register definitions formed using the input names
217 */
218#define XSIR0_IOREAD(_priv, _reg)					\
219	ioread16((_priv)->sir0_regs + _reg)
220
221#define XSIR0_IOREAD_BITS(_priv, _reg, _field)				\
222	GET_BITS(XSIR0_IOREAD((_priv), _reg),				\
223		 _reg##_##_field##_INDEX,				\
224		 _reg##_##_field##_WIDTH)
225
226#define XSIR0_IOWRITE(_priv, _reg, _val)				\
227	iowrite16((_val), (_priv)->sir0_regs + _reg)
228
229#define XSIR0_IOWRITE_BITS(_priv, _reg, _field, _val)			\
230do {									\
231	u16 reg_val = XSIR0_IOREAD((_priv), _reg);			\
232	SET_BITS(reg_val,						\
233		 _reg##_##_field##_INDEX,				\
234		 _reg##_##_field##_WIDTH, (_val));			\
235	XSIR0_IOWRITE((_priv), _reg, reg_val);				\
236} while (0)
237
238#define XSIR1_IOREAD(_priv, _reg)					\
239	ioread16((_priv)->sir1_regs + _reg)
240
241#define XSIR1_IOREAD_BITS(_priv, _reg, _field)				\
242	GET_BITS(XSIR1_IOREAD((_priv), _reg),				\
243		 _reg##_##_field##_INDEX,				\
244		 _reg##_##_field##_WIDTH)
245
246#define XSIR1_IOWRITE(_priv, _reg, _val)				\
247	iowrite16((_val), (_priv)->sir1_regs + _reg)
248
249#define XSIR1_IOWRITE_BITS(_priv, _reg, _field, _val)			\
250do {									\
251	u16 reg_val = XSIR1_IOREAD((_priv), _reg);			\
252	SET_BITS(reg_val,						\
253		 _reg##_##_field##_INDEX,				\
254		 _reg##_##_field##_WIDTH, (_val));			\
255	XSIR1_IOWRITE((_priv), _reg, reg_val);				\
256} while (0)
257
258
259/* Macros for reading or writing SerDes RxTx registers
260 *  The ioread macros will get bit fields or full values using the
261 *  register definitions formed using the input names
262 *
263 *  The iowrite macros will set bit fields or full values using the
264 *  register definitions formed using the input names
265 */
266#define XRXTX_IOREAD(_priv, _reg)					\
267	ioread16((_priv)->rxtx_regs + _reg)
268
269#define XRXTX_IOREAD_BITS(_priv, _reg, _field)				\
270	GET_BITS(XRXTX_IOREAD((_priv), _reg),				\
271		 _reg##_##_field##_INDEX,				\
272		 _reg##_##_field##_WIDTH)
273
274#define XRXTX_IOWRITE(_priv, _reg, _val)				\
275	iowrite16((_val), (_priv)->rxtx_regs + _reg)
276
277#define XRXTX_IOWRITE_BITS(_priv, _reg, _field, _val)			\
278do {									\
279	u16 reg_val = XRXTX_IOREAD((_priv), _reg);			\
280	SET_BITS(reg_val,						\
281		 _reg##_##_field##_INDEX,				\
282		 _reg##_##_field##_WIDTH, (_val));			\
283	XRXTX_IOWRITE((_priv), _reg, reg_val);				\
284} while (0)
285
286
287enum amd_xgbe_phy_an {
288	AMD_XGBE_AN_READY = 0,
289	AMD_XGBE_AN_START,
290	AMD_XGBE_AN_EVENT,
291	AMD_XGBE_AN_PAGE_RECEIVED,
292	AMD_XGBE_AN_INCOMPAT_LINK,
293	AMD_XGBE_AN_COMPLETE,
294	AMD_XGBE_AN_NO_LINK,
295	AMD_XGBE_AN_EXIT,
296	AMD_XGBE_AN_ERROR,
297};
298
299enum amd_xgbe_phy_rx {
300	AMD_XGBE_RX_READY = 0,
301	AMD_XGBE_RX_BPA,
302	AMD_XGBE_RX_XNP,
303	AMD_XGBE_RX_COMPLETE,
304};
305
306enum amd_xgbe_phy_mode {
307	AMD_XGBE_MODE_KR,
308	AMD_XGBE_MODE_KX,
309};
310
311enum amd_xgbe_phy_speedset {
312	AMD_XGBE_PHY_SPEEDSET_1000_10000,
313	AMD_XGBE_PHY_SPEEDSET_2500_10000,
314};
315
316struct amd_xgbe_phy_priv {
317	struct platform_device *pdev;
318	struct device *dev;
319
320	struct phy_device *phydev;
321
322	/* SerDes related mmio resources */
323	struct resource *rxtx_res;
324	struct resource *sir0_res;
325	struct resource *sir1_res;
326
327	/* SerDes related mmio registers */
328	void __iomem *rxtx_regs;	/* SerDes Rx/Tx CSRs */
329	void __iomem *sir0_regs;	/* SerDes integration registers (1/2) */
330	void __iomem *sir1_regs;	/* SerDes integration registers (2/2) */
331
332	/* Maintain link status for re-starting auto-negotiation */
333	unsigned int link;
334	enum amd_xgbe_phy_mode mode;
335	unsigned int speed_set;
336
337	/* Auto-negotiation state machine support */
338	struct mutex an_mutex;
339	enum amd_xgbe_phy_an an_result;
340	enum amd_xgbe_phy_an an_state;
341	enum amd_xgbe_phy_rx kr_state;
342	enum amd_xgbe_phy_rx kx_state;
343	struct work_struct an_work;
344	struct workqueue_struct *an_workqueue;
345};
346
347static int amd_xgbe_an_enable_kr_training(struct phy_device *phydev)
348{
349	int ret;
350
351	ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
352	if (ret < 0)
353		return ret;
354
355	ret |= 0x02;
356	phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
357
358	return 0;
359}
360
361static int amd_xgbe_an_disable_kr_training(struct phy_device *phydev)
362{
363	int ret;
364
365	ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
366	if (ret < 0)
367		return ret;
368
369	ret &= ~0x02;
370	phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
371
372	return 0;
373}
374
375static int amd_xgbe_phy_pcs_power_cycle(struct phy_device *phydev)
376{
377	int ret;
378
379	ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
380	if (ret < 0)
381		return ret;
382
383	ret |= MDIO_CTRL1_LPOWER;
384	phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
385
386	usleep_range(75, 100);
387
388	ret &= ~MDIO_CTRL1_LPOWER;
389	phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
390
391	return 0;
392}
393
394static void amd_xgbe_phy_serdes_start_ratechange(struct phy_device *phydev)
395{
396	struct amd_xgbe_phy_priv *priv = phydev->priv;
397
398	/* Assert Rx and Tx ratechange */
399	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, RATECHANGE, 1);
400}
401
402static void amd_xgbe_phy_serdes_complete_ratechange(struct phy_device *phydev)
403{
404	struct amd_xgbe_phy_priv *priv = phydev->priv;
405	unsigned int wait;
406	u16 status;
407
408	/* Release Rx and Tx ratechange */
409	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, RATECHANGE, 0);
410
411	/* Wait for Rx and Tx ready */
412	wait = XGBE_PHY_RATECHANGE_COUNT;
413	while (wait--) {
414		usleep_range(10, 20);
415
416		status = XSIR0_IOREAD(priv, SIR0_STATUS);
417		if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
418		    XSIR_GET_BITS(status, SIR0_STATUS, TX_READY))
419			return;
420	}
421
422	netdev_err(phydev->attached_dev, "SerDes rx/tx not ready (%#hx)\n",
423		   status);
424}
425
426static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
427{
428	struct amd_xgbe_phy_priv *priv = phydev->priv;
429	int ret;
430
431	/* Enable KR training */
432	ret = amd_xgbe_an_enable_kr_training(phydev);
433	if (ret < 0)
434		return ret;
435
436	/* Set PCS to KR/10G speed */
437	ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
438	if (ret < 0)
439		return ret;
440
441	ret &= ~MDIO_PCS_CTRL2_TYPE;
442	ret |= MDIO_PCS_CTRL2_10GBR;
443	phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
444
445	ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
446	if (ret < 0)
447		return ret;
448
449	ret &= ~MDIO_CTRL1_SPEEDSEL;
450	ret |= MDIO_CTRL1_SPEED10G;
451	phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
452
453	ret = amd_xgbe_phy_pcs_power_cycle(phydev);
454	if (ret < 0)
455		return ret;
456
457	/* Set SerDes to 10G speed */
458	amd_xgbe_phy_serdes_start_ratechange(phydev);
459
460	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_10000_RATE);
461	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_10000_WORD);
462	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP, SPEED_10000_TXAMP);
463	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_10000_PLL);
464	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PI_SPD_SEL, SPEED_10000_CDR);
465
466	XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA, RXTX_10000_BLWC);
467	XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, RXTX_10000_PQ);
468
469	amd_xgbe_phy_serdes_complete_ratechange(phydev);
470
471	priv->mode = AMD_XGBE_MODE_KR;
472
473	return 0;
474}
475
476static int amd_xgbe_phy_gmii_2500_mode(struct phy_device *phydev)
477{
478	struct amd_xgbe_phy_priv *priv = phydev->priv;
479	int ret;
480
481	/* Disable KR training */
482	ret = amd_xgbe_an_disable_kr_training(phydev);
483	if (ret < 0)
484		return ret;
485
486	/* Set PCS to KX/1G speed */
487	ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
488	if (ret < 0)
489		return ret;
490
491	ret &= ~MDIO_PCS_CTRL2_TYPE;
492	ret |= MDIO_PCS_CTRL2_10GBX;
493	phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
494
495	ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
496	if (ret < 0)
497		return ret;
498
499	ret &= ~MDIO_CTRL1_SPEEDSEL;
500	ret |= MDIO_CTRL1_SPEED1G;
501	phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
502
503	ret = amd_xgbe_phy_pcs_power_cycle(phydev);
504	if (ret < 0)
505		return ret;
506
507	/* Set SerDes to 2.5G speed */
508	amd_xgbe_phy_serdes_start_ratechange(phydev);
509
510	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_2500_RATE);
511	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_2500_WORD);
512	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP, SPEED_2500_TXAMP);
513	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_2500_PLL);
514	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PI_SPD_SEL, SPEED_2500_CDR);
515
516	XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA, RXTX_2500_BLWC);
517	XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, RXTX_2500_PQ);
518
519	amd_xgbe_phy_serdes_complete_ratechange(phydev);
520
521	priv->mode = AMD_XGBE_MODE_KX;
522
523	return 0;
524}
525
526static int amd_xgbe_phy_gmii_mode(struct phy_device *phydev)
527{
528	struct amd_xgbe_phy_priv *priv = phydev->priv;
529	int ret;
530
531	/* Disable KR training */
532	ret = amd_xgbe_an_disable_kr_training(phydev);
533	if (ret < 0)
534		return ret;
535
536	/* Set PCS to KX/1G speed */
537	ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
538	if (ret < 0)
539		return ret;
540
541	ret &= ~MDIO_PCS_CTRL2_TYPE;
542	ret |= MDIO_PCS_CTRL2_10GBX;
543	phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
544
545	ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
546	if (ret < 0)
547		return ret;
548
549	ret &= ~MDIO_CTRL1_SPEEDSEL;
550	ret |= MDIO_CTRL1_SPEED1G;
551	phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
552
553	ret = amd_xgbe_phy_pcs_power_cycle(phydev);
554	if (ret < 0)
555		return ret;
556
557	/* Set SerDes to 1G speed */
558	amd_xgbe_phy_serdes_start_ratechange(phydev);
559
560	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_1000_RATE);
561	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_1000_WORD);
562	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP, SPEED_1000_TXAMP);
563	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_1000_PLL);
564	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PI_SPD_SEL, SPEED_1000_CDR);
565
566	XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA, RXTX_1000_BLWC);
567	XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, RXTX_1000_PQ);
568
569	amd_xgbe_phy_serdes_complete_ratechange(phydev);
570
571	priv->mode = AMD_XGBE_MODE_KX;
572
573	return 0;
574}
575
576static int amd_xgbe_phy_switch_mode(struct phy_device *phydev)
577{
578	struct amd_xgbe_phy_priv *priv = phydev->priv;
579	int ret;
580
581	/* If we are in KR switch to KX, and vice-versa */
582	if (priv->mode == AMD_XGBE_MODE_KR) {
583		if (priv->speed_set == AMD_XGBE_PHY_SPEEDSET_1000_10000)
584			ret = amd_xgbe_phy_gmii_mode(phydev);
585		else
586			ret = amd_xgbe_phy_gmii_2500_mode(phydev);
587	} else {
588		ret = amd_xgbe_phy_xgmii_mode(phydev);
589	}
590
591	return ret;
592}
593
594static enum amd_xgbe_phy_an amd_xgbe_an_switch_mode(struct phy_device *phydev)
595{
596	int ret;
597
598	ret = amd_xgbe_phy_switch_mode(phydev);
599	if (ret < 0)
600		return AMD_XGBE_AN_ERROR;
601
602	return AMD_XGBE_AN_START;
603}
604
605static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev,
606						    enum amd_xgbe_phy_rx *state)
607{
608	struct amd_xgbe_phy_priv *priv = phydev->priv;
609	int ad_reg, lp_reg, ret;
610
611	*state = AMD_XGBE_RX_COMPLETE;
612
613	/* If we're in KX mode then we're done */
614	if (priv->mode == AMD_XGBE_MODE_KX)
615		return AMD_XGBE_AN_EVENT;
616
617	/* Enable/Disable FEC */
618	ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
619	if (ad_reg < 0)
620		return AMD_XGBE_AN_ERROR;
621
622	lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 2);
623	if (lp_reg < 0)
624		return AMD_XGBE_AN_ERROR;
625
626	ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_CTRL);
627	if (ret < 0)
628		return AMD_XGBE_AN_ERROR;
629
630	if ((ad_reg & 0xc000) && (lp_reg & 0xc000))
631		ret |= 0x01;
632	else
633		ret &= ~0x01;
634
635	phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_CTRL, ret);
636
637	/* Start KR training */
638	ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
639	if (ret < 0)
640		return AMD_XGBE_AN_ERROR;
641
642	XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 1);
643
644	ret |= 0x01;
645	phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
646
647	XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 0);
648
649	return AMD_XGBE_AN_EVENT;
650}
651
652static enum amd_xgbe_phy_an amd_xgbe_an_tx_xnp(struct phy_device *phydev,
653					       enum amd_xgbe_phy_rx *state)
654{
655	u16 msg;
656
657	*state = AMD_XGBE_RX_XNP;
658
659	msg = XNP_MCF_NULL_MESSAGE;
660	msg |= XNP_MP_FORMATTED;
661
662	phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP + 2, 0);
663	phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP + 1, 0);
664	phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP, msg);
665
666	return AMD_XGBE_AN_EVENT;
667}
668
669static enum amd_xgbe_phy_an amd_xgbe_an_rx_bpa(struct phy_device *phydev,
670					       enum amd_xgbe_phy_rx *state)
671{
672	struct amd_xgbe_phy_priv *priv = phydev->priv;
673	unsigned int link_support;
674	int ret, ad_reg, lp_reg;
675
676	/* Read Base Ability register 2 first */
677	ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 1);
678	if (ret < 0)
679		return AMD_XGBE_AN_ERROR;
680
681	/* Check for a supported mode, otherwise restart in a different one */
682	link_support = (priv->mode == AMD_XGBE_MODE_KR) ? 0x80 : 0x20;
683	if (!(ret & link_support))
684		return amd_xgbe_an_switch_mode(phydev);
685
686	/* Check Extended Next Page support */
687	ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
688	if (ad_reg < 0)
689		return AMD_XGBE_AN_ERROR;
690
691	lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
692	if (lp_reg < 0)
693		return AMD_XGBE_AN_ERROR;
694
695	return ((ad_reg & XNP_NP_EXCHANGE) || (lp_reg & XNP_NP_EXCHANGE)) ?
696	       amd_xgbe_an_tx_xnp(phydev, state) :
697	       amd_xgbe_an_tx_training(phydev, state);
698}
699
700static enum amd_xgbe_phy_an amd_xgbe_an_rx_xnp(struct phy_device *phydev,
701					       enum amd_xgbe_phy_rx *state)
702{
703	int ad_reg, lp_reg;
704
705	/* Check Extended Next Page support */
706	ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
707	if (ad_reg < 0)
708		return AMD_XGBE_AN_ERROR;
709
710	lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
711	if (lp_reg < 0)
712		return AMD_XGBE_AN_ERROR;
713
714	return ((ad_reg & XNP_NP_EXCHANGE) || (lp_reg & XNP_NP_EXCHANGE)) ?
715	       amd_xgbe_an_tx_xnp(phydev, state) :
716	       amd_xgbe_an_tx_training(phydev, state);
717}
718
719static enum amd_xgbe_phy_an amd_xgbe_an_start(struct phy_device *phydev)
720{
721	struct amd_xgbe_phy_priv *priv = phydev->priv;
722	int ret;
723
724	/* Be sure we aren't looping trying to negotiate */
725	if (priv->mode == AMD_XGBE_MODE_KR) {
726		if (priv->kr_state != AMD_XGBE_RX_READY)
727			return AMD_XGBE_AN_NO_LINK;
728		priv->kr_state = AMD_XGBE_RX_BPA;
729	} else {
730		if (priv->kx_state != AMD_XGBE_RX_READY)
731			return AMD_XGBE_AN_NO_LINK;
732		priv->kx_state = AMD_XGBE_RX_BPA;
733	}
734
735	/* Set up Advertisement register 3 first */
736	ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
737	if (ret < 0)
738		return AMD_XGBE_AN_ERROR;
739
740	if (phydev->supported & SUPPORTED_10000baseR_FEC)
741		ret |= 0xc000;
742	else
743		ret &= ~0xc000;
744
745	phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, ret);
746
747	/* Set up Advertisement register 2 next */
748	ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
749	if (ret < 0)
750		return AMD_XGBE_AN_ERROR;
751
752	if (phydev->supported & SUPPORTED_10000baseKR_Full)
753		ret |= 0x80;
754	else
755		ret &= ~0x80;
756
757	if ((phydev->supported & SUPPORTED_1000baseKX_Full) ||
758	    (phydev->supported & SUPPORTED_2500baseX_Full))
759		ret |= 0x20;
760	else
761		ret &= ~0x20;
762
763	phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1, ret);
764
765	/* Set up Advertisement register 1 last */
766	ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
767	if (ret < 0)
768		return AMD_XGBE_AN_ERROR;
769
770	if (phydev->supported & SUPPORTED_Pause)
771		ret |= 0x400;
772	else
773		ret &= ~0x400;
774
775	if (phydev->supported & SUPPORTED_Asym_Pause)
776		ret |= 0x800;
777	else
778		ret &= ~0x800;
779
780	/* We don't intend to perform XNP */
781	ret &= ~XNP_NP_EXCHANGE;
782
783	phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE, ret);
784
785	/* Enable and start auto-negotiation */
786	phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
787
788	ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
789	if (ret < 0)
790		return AMD_XGBE_AN_ERROR;
791
792	ret |= MDIO_AN_CTRL1_ENABLE;
793	ret |= MDIO_AN_CTRL1_RESTART;
794	phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, ret);
795
796	return AMD_XGBE_AN_EVENT;
797}
798
799static enum amd_xgbe_phy_an amd_xgbe_an_event(struct phy_device *phydev)
800{
801	enum amd_xgbe_phy_an new_state;
802	int ret;
803
804	ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT);
805	if (ret < 0)
806		return AMD_XGBE_AN_ERROR;
807
808	new_state = AMD_XGBE_AN_EVENT;
809	if (ret & XGBE_AN_PG_RCV)
810		new_state = AMD_XGBE_AN_PAGE_RECEIVED;
811	else if (ret & XGBE_AN_INC_LINK)
812		new_state = AMD_XGBE_AN_INCOMPAT_LINK;
813	else if (ret & XGBE_AN_INT_CMPLT)
814		new_state = AMD_XGBE_AN_COMPLETE;
815
816	if (new_state != AMD_XGBE_AN_EVENT)
817		phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
818
819	return new_state;
820}
821
822static enum amd_xgbe_phy_an amd_xgbe_an_page_received(struct phy_device *phydev)
823{
824	struct amd_xgbe_phy_priv *priv = phydev->priv;
825	enum amd_xgbe_phy_rx *state;
826	int ret;
827
828	state = (priv->mode == AMD_XGBE_MODE_KR) ? &priv->kr_state
829						 : &priv->kx_state;
830
831	switch (*state) {
832	case AMD_XGBE_RX_BPA:
833		ret = amd_xgbe_an_rx_bpa(phydev, state);
834		break;
835
836	case AMD_XGBE_RX_XNP:
837		ret = amd_xgbe_an_rx_xnp(phydev, state);
838		break;
839
840	default:
841		ret = AMD_XGBE_AN_ERROR;
842	}
843
844	return ret;
845}
846
847static enum amd_xgbe_phy_an amd_xgbe_an_incompat_link(struct phy_device *phydev)
848{
849	return amd_xgbe_an_switch_mode(phydev);
850}
851
852static void amd_xgbe_an_state_machine(struct work_struct *work)
853{
854	struct amd_xgbe_phy_priv *priv = container_of(work,
855						      struct amd_xgbe_phy_priv,
856						      an_work);
857	struct phy_device *phydev = priv->phydev;
858	enum amd_xgbe_phy_an cur_state;
859	int sleep;
860
861	while (1) {
862		mutex_lock(&priv->an_mutex);
863
864		cur_state = priv->an_state;
865
866		switch (priv->an_state) {
867		case AMD_XGBE_AN_START:
868			priv->an_state = amd_xgbe_an_start(phydev);
869			break;
870
871		case AMD_XGBE_AN_EVENT:
872			priv->an_state = amd_xgbe_an_event(phydev);
873			break;
874
875		case AMD_XGBE_AN_PAGE_RECEIVED:
876			priv->an_state = amd_xgbe_an_page_received(phydev);
877			break;
878
879		case AMD_XGBE_AN_INCOMPAT_LINK:
880			priv->an_state = amd_xgbe_an_incompat_link(phydev);
881			break;
882
883		case AMD_XGBE_AN_COMPLETE:
884		case AMD_XGBE_AN_NO_LINK:
885		case AMD_XGBE_AN_EXIT:
886			goto exit_unlock;
887
888		default:
889			priv->an_state = AMD_XGBE_AN_ERROR;
890		}
891
892		if (priv->an_state == AMD_XGBE_AN_ERROR) {
893			netdev_err(phydev->attached_dev,
894				   "error during auto-negotiation, state=%u\n",
895				   cur_state);
896			goto exit_unlock;
897		}
898
899		sleep = (priv->an_state == AMD_XGBE_AN_EVENT) ? 1 : 0;
900
901		mutex_unlock(&priv->an_mutex);
902
903		if (sleep)
904			usleep_range(20, 50);
905	}
906
907exit_unlock:
908	priv->an_result = priv->an_state;
909	priv->an_state = AMD_XGBE_AN_READY;
910
911	mutex_unlock(&priv->an_mutex);
912}
913
914static int amd_xgbe_phy_soft_reset(struct phy_device *phydev)
915{
916	int count, ret;
917
918	ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
919	if (ret < 0)
920		return ret;
921
922	ret |= MDIO_CTRL1_RESET;
923	phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
924
925	count = 50;
926	do {
927		msleep(20);
928		ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
929		if (ret < 0)
930			return ret;
931	} while ((ret & MDIO_CTRL1_RESET) && --count);
932
933	if (ret & MDIO_CTRL1_RESET)
934		return -ETIMEDOUT;
935
936	return 0;
937}
938
939static int amd_xgbe_phy_config_init(struct phy_device *phydev)
940{
941	struct amd_xgbe_phy_priv *priv = phydev->priv;
942
943	/* Initialize supported features */
944	phydev->supported = SUPPORTED_Autoneg;
945	phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
946	phydev->supported |= SUPPORTED_Backplane;
947	phydev->supported |= SUPPORTED_10000baseKR_Full |
948			     SUPPORTED_10000baseR_FEC;
949	switch (priv->speed_set) {
950	case AMD_XGBE_PHY_SPEEDSET_1000_10000:
951		phydev->supported |= SUPPORTED_1000baseKX_Full;
952		break;
953	case AMD_XGBE_PHY_SPEEDSET_2500_10000:
954		phydev->supported |= SUPPORTED_2500baseX_Full;
955		break;
956	}
957	phydev->advertising = phydev->supported;
958
959	/* Turn off and clear interrupts */
960	phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
961	phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
962
963	return 0;
964}
965
966static int amd_xgbe_phy_setup_forced(struct phy_device *phydev)
967{
968	int ret;
969
970	/* Disable auto-negotiation */
971	ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
972	if (ret < 0)
973		return ret;
974
975	ret &= ~MDIO_AN_CTRL1_ENABLE;
976	phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, ret);
977
978	/* Validate/Set specified speed */
979	switch (phydev->speed) {
980	case SPEED_10000:
981		ret = amd_xgbe_phy_xgmii_mode(phydev);
982		break;
983
984	case SPEED_2500:
985		ret = amd_xgbe_phy_gmii_2500_mode(phydev);
986		break;
987
988	case SPEED_1000:
989		ret = amd_xgbe_phy_gmii_mode(phydev);
990		break;
991
992	default:
993		ret = -EINVAL;
994	}
995
996	if (ret < 0)
997		return ret;
998
999	/* Validate duplex mode */
1000	if (phydev->duplex != DUPLEX_FULL)
1001		return -EINVAL;
1002
1003	phydev->pause = 0;
1004	phydev->asym_pause = 0;
1005
1006	return 0;
1007}
1008
1009static int amd_xgbe_phy_config_aneg(struct phy_device *phydev)
1010{
1011	struct amd_xgbe_phy_priv *priv = phydev->priv;
1012	u32 mmd_mask = phydev->c45_ids.devices_in_package;
1013	int ret;
1014
1015	if (phydev->autoneg != AUTONEG_ENABLE)
1016		return amd_xgbe_phy_setup_forced(phydev);
1017
1018	/* Make sure we have the AN MMD present */
1019	if (!(mmd_mask & MDIO_DEVS_AN))
1020		return -EINVAL;
1021
1022	/* Get the current speed mode */
1023	ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
1024	if (ret < 0)
1025		return ret;
1026
1027	/* Start/Restart the auto-negotiation state machine */
1028	mutex_lock(&priv->an_mutex);
1029	priv->an_result = AMD_XGBE_AN_READY;
1030	priv->an_state = AMD_XGBE_AN_START;
1031	priv->kr_state = AMD_XGBE_RX_READY;
1032	priv->kx_state = AMD_XGBE_RX_READY;
1033	mutex_unlock(&priv->an_mutex);
1034
1035	queue_work(priv->an_workqueue, &priv->an_work);
1036
1037	return 0;
1038}
1039
1040static int amd_xgbe_phy_aneg_done(struct phy_device *phydev)
1041{
1042	struct amd_xgbe_phy_priv *priv = phydev->priv;
1043	enum amd_xgbe_phy_an state;
1044
1045	mutex_lock(&priv->an_mutex);
1046	state = priv->an_result;
1047	mutex_unlock(&priv->an_mutex);
1048
1049	return (state == AMD_XGBE_AN_COMPLETE);
1050}
1051
1052static int amd_xgbe_phy_update_link(struct phy_device *phydev)
1053{
1054	struct amd_xgbe_phy_priv *priv = phydev->priv;
1055	enum amd_xgbe_phy_an state;
1056	unsigned int check_again, autoneg;
1057	int ret;
1058
1059	/* If we're doing auto-negotiation don't report link down */
1060	mutex_lock(&priv->an_mutex);
1061	state = priv->an_state;
1062	mutex_unlock(&priv->an_mutex);
1063
1064	if (state != AMD_XGBE_AN_READY) {
1065		phydev->link = 1;
1066		return 0;
1067	}
1068
1069	/* Since the device can be in the wrong mode when a link is
1070	 * (re-)established (cable connected after the interface is
1071	 * up, etc.), the link status may report no link. If there
1072	 * is no link, try switching modes and checking the status
1073	 * again if auto negotiation is enabled.
1074	 */
1075	check_again = (phydev->autoneg == AUTONEG_ENABLE) ? 1 : 0;
1076again:
1077	/* Link status is latched low, so read once to clear
1078	 * and then read again to get current state
1079	 */
1080	ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1);
1081	if (ret < 0)
1082		return ret;
1083
1084	ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1);
1085	if (ret < 0)
1086		return ret;
1087
1088	phydev->link = (ret & MDIO_STAT1_LSTATUS) ? 1 : 0;
1089
1090	if (!phydev->link) {
1091		if (check_again) {
1092			ret = amd_xgbe_phy_switch_mode(phydev);
1093			if (ret < 0)
1094				return ret;
1095			check_again = 0;
1096			goto again;
1097		}
1098	}
1099
1100	autoneg = (phydev->link && !priv->link) ? 1 : 0;
1101	priv->link = phydev->link;
1102	if (autoneg) {
1103		/* Link is (back) up, re-start auto-negotiation */
1104		ret = amd_xgbe_phy_config_aneg(phydev);
1105		if (ret < 0)
1106			return ret;
1107	}
1108
1109	return 0;
1110}
1111
1112static int amd_xgbe_phy_read_status(struct phy_device *phydev)
1113{
1114	struct amd_xgbe_phy_priv *priv = phydev->priv;
1115	u32 mmd_mask = phydev->c45_ids.devices_in_package;
1116	int ret, mode, ad_ret, lp_ret;
1117
1118	ret = amd_xgbe_phy_update_link(phydev);
1119	if (ret)
1120		return ret;
1121
1122	mode = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
1123	if (mode < 0)
1124		return mode;
1125	mode &= MDIO_PCS_CTRL2_TYPE;
1126
1127	if (phydev->autoneg == AUTONEG_ENABLE) {
1128		if (!(mmd_mask & MDIO_DEVS_AN))
1129			return -EINVAL;
1130
1131		if (!amd_xgbe_phy_aneg_done(phydev))
1132			return 0;
1133
1134		/* Compare Advertisement and Link Partner register 1 */
1135		ad_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
1136		if (ad_ret < 0)
1137			return ad_ret;
1138		lp_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
1139		if (lp_ret < 0)
1140			return lp_ret;
1141
1142		ad_ret &= lp_ret;
1143		phydev->pause = (ad_ret & 0x400) ? 1 : 0;
1144		phydev->asym_pause = (ad_ret & 0x800) ? 1 : 0;
1145
1146		/* Compare Advertisement and Link Partner register 2 */
1147		ad_ret = phy_read_mmd(phydev, MDIO_MMD_AN,
1148				      MDIO_AN_ADVERTISE + 1);
1149		if (ad_ret < 0)
1150			return ad_ret;
1151		lp_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 1);
1152		if (lp_ret < 0)
1153			return lp_ret;
1154
1155		ad_ret &= lp_ret;
1156		if (ad_ret & 0x80) {
1157			phydev->speed = SPEED_10000;
1158			if (mode != MDIO_PCS_CTRL2_10GBR) {
1159				ret = amd_xgbe_phy_xgmii_mode(phydev);
1160				if (ret < 0)
1161					return ret;
1162			}
1163		} else {
1164			int (*mode_fcn)(struct phy_device *);
1165
1166			if (priv->speed_set ==
1167			    AMD_XGBE_PHY_SPEEDSET_1000_10000) {
1168				phydev->speed = SPEED_1000;
1169				mode_fcn = amd_xgbe_phy_gmii_mode;
1170			} else {
1171				phydev->speed = SPEED_2500;
1172				mode_fcn = amd_xgbe_phy_gmii_2500_mode;
1173			}
1174
1175			if (mode == MDIO_PCS_CTRL2_10GBR) {
1176				ret = mode_fcn(phydev);
1177				if (ret < 0)
1178					return ret;
1179			}
1180		}
1181
1182		phydev->duplex = DUPLEX_FULL;
1183	} else {
1184		if (mode == MDIO_PCS_CTRL2_10GBR) {
1185			phydev->speed = SPEED_10000;
1186		} else {
1187			if (priv->speed_set ==
1188			    AMD_XGBE_PHY_SPEEDSET_1000_10000)
1189				phydev->speed = SPEED_1000;
1190			else
1191				phydev->speed = SPEED_2500;
1192		}
1193		phydev->duplex = DUPLEX_FULL;
1194		phydev->pause = 0;
1195		phydev->asym_pause = 0;
1196	}
1197
1198	return 0;
1199}
1200
1201static int amd_xgbe_phy_suspend(struct phy_device *phydev)
1202{
1203	int ret;
1204
1205	mutex_lock(&phydev->lock);
1206
1207	ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
1208	if (ret < 0)
1209		goto unlock;
1210
1211	ret |= MDIO_CTRL1_LPOWER;
1212	phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
1213
1214	ret = 0;
1215
1216unlock:
1217	mutex_unlock(&phydev->lock);
1218
1219	return ret;
1220}
1221
1222static int amd_xgbe_phy_resume(struct phy_device *phydev)
1223{
1224	int ret;
1225
1226	mutex_lock(&phydev->lock);
1227
1228	ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
1229	if (ret < 0)
1230		goto unlock;
1231
1232	ret &= ~MDIO_CTRL1_LPOWER;
1233	phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
1234
1235	ret = 0;
1236
1237unlock:
1238	mutex_unlock(&phydev->lock);
1239
1240	return ret;
1241}
1242
1243static int amd_xgbe_phy_probe(struct phy_device *phydev)
1244{
1245	struct amd_xgbe_phy_priv *priv;
1246	struct platform_device *pdev;
1247	struct device *dev;
1248	char *wq_name;
1249	const __be32 *property;
1250	unsigned int speed_set;
1251	int ret;
1252
1253	if (!phydev->dev.of_node)
1254		return -EINVAL;
1255
1256	pdev = of_find_device_by_node(phydev->dev.of_node);
1257	if (!pdev)
1258		return -EINVAL;
1259	dev = &pdev->dev;
1260
1261	wq_name = kasprintf(GFP_KERNEL, "%s-amd-xgbe-phy", phydev->bus->name);
1262	if (!wq_name) {
1263		ret = -ENOMEM;
1264		goto err_pdev;
1265	}
1266
1267	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1268	if (!priv) {
1269		ret = -ENOMEM;
1270		goto err_name;
1271	}
1272
1273	priv->pdev = pdev;
1274	priv->dev = dev;
1275	priv->phydev = phydev;
1276
1277	/* Get the device mmio areas */
1278	priv->rxtx_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1279	priv->rxtx_regs = devm_ioremap_resource(dev, priv->rxtx_res);
1280	if (IS_ERR(priv->rxtx_regs)) {
1281		dev_err(dev, "rxtx ioremap failed\n");
1282		ret = PTR_ERR(priv->rxtx_regs);
1283		goto err_priv;
1284	}
1285
1286	priv->sir0_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1287	priv->sir0_regs = devm_ioremap_resource(dev, priv->sir0_res);
1288	if (IS_ERR(priv->sir0_regs)) {
1289		dev_err(dev, "sir0 ioremap failed\n");
1290		ret = PTR_ERR(priv->sir0_regs);
1291		goto err_rxtx;
1292	}
1293
1294	priv->sir1_res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
1295	priv->sir1_regs = devm_ioremap_resource(dev, priv->sir1_res);
1296	if (IS_ERR(priv->sir1_regs)) {
1297		dev_err(dev, "sir1 ioremap failed\n");
1298		ret = PTR_ERR(priv->sir1_regs);
1299		goto err_sir0;
1300	}
1301
1302	/* Get the device speed set property */
1303	speed_set = 0;
1304	property = of_get_property(dev->of_node, XGBE_PHY_SPEEDSET_PROPERTY,
1305				   NULL);
1306	if (property)
1307		speed_set = be32_to_cpu(*property);
1308
1309	switch (speed_set) {
1310	case 0:
1311		priv->speed_set = AMD_XGBE_PHY_SPEEDSET_1000_10000;
1312		break;
1313	case 1:
1314		priv->speed_set = AMD_XGBE_PHY_SPEEDSET_2500_10000;
1315		break;
1316	default:
1317		dev_err(dev, "invalid amd,speed-set property\n");
1318		ret = -EINVAL;
1319		goto err_sir1;
1320	}
1321
1322	priv->link = 1;
1323
1324	ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
1325	if (ret < 0)
1326		goto err_sir1;
1327	if ((ret & MDIO_PCS_CTRL2_TYPE) == MDIO_PCS_CTRL2_10GBR)
1328		priv->mode = AMD_XGBE_MODE_KR;
1329	else
1330		priv->mode = AMD_XGBE_MODE_KX;
1331
1332	mutex_init(&priv->an_mutex);
1333	INIT_WORK(&priv->an_work, amd_xgbe_an_state_machine);
1334	priv->an_workqueue = create_singlethread_workqueue(wq_name);
1335	if (!priv->an_workqueue) {
1336		ret = -ENOMEM;
1337		goto err_sir1;
1338	}
1339
1340	phydev->priv = priv;
1341
1342	kfree(wq_name);
1343	of_dev_put(pdev);
1344
1345	return 0;
1346
1347err_sir1:
1348	devm_iounmap(dev, priv->sir1_regs);
1349	devm_release_mem_region(dev, priv->sir1_res->start,
1350				resource_size(priv->sir1_res));
1351
1352err_sir0:
1353	devm_iounmap(dev, priv->sir0_regs);
1354	devm_release_mem_region(dev, priv->sir0_res->start,
1355				resource_size(priv->sir0_res));
1356
1357err_rxtx:
1358	devm_iounmap(dev, priv->rxtx_regs);
1359	devm_release_mem_region(dev, priv->rxtx_res->start,
1360				resource_size(priv->rxtx_res));
1361
1362err_priv:
1363	devm_kfree(dev, priv);
1364
1365err_name:
1366	kfree(wq_name);
1367
1368err_pdev:
1369	of_dev_put(pdev);
1370
1371	return ret;
1372}
1373
1374static void amd_xgbe_phy_remove(struct phy_device *phydev)
1375{
1376	struct amd_xgbe_phy_priv *priv = phydev->priv;
1377	struct device *dev = priv->dev;
1378
1379	/* Stop any in process auto-negotiation */
1380	mutex_lock(&priv->an_mutex);
1381	priv->an_state = AMD_XGBE_AN_EXIT;
1382	mutex_unlock(&priv->an_mutex);
1383
1384	flush_workqueue(priv->an_workqueue);
1385	destroy_workqueue(priv->an_workqueue);
1386
1387	/* Release resources */
1388	devm_iounmap(dev, priv->sir1_regs);
1389	devm_release_mem_region(dev, priv->sir1_res->start,
1390				resource_size(priv->sir1_res));
1391
1392	devm_iounmap(dev, priv->sir0_regs);
1393	devm_release_mem_region(dev, priv->sir0_res->start,
1394				resource_size(priv->sir0_res));
1395
1396	devm_iounmap(dev, priv->rxtx_regs);
1397	devm_release_mem_region(dev, priv->rxtx_res->start,
1398				resource_size(priv->rxtx_res));
1399
1400	devm_kfree(dev, priv);
1401}
1402
1403static int amd_xgbe_match_phy_device(struct phy_device *phydev)
1404{
1405	return phydev->c45_ids.device_ids[MDIO_MMD_PCS] == XGBE_PHY_ID;
1406}
1407
1408static struct phy_driver amd_xgbe_phy_driver[] = {
1409	{
1410		.phy_id			= XGBE_PHY_ID,
1411		.phy_id_mask		= XGBE_PHY_MASK,
1412		.name			= "AMD XGBE PHY",
1413		.features		= 0,
1414		.probe			= amd_xgbe_phy_probe,
1415		.remove			= amd_xgbe_phy_remove,
1416		.soft_reset		= amd_xgbe_phy_soft_reset,
1417		.config_init		= amd_xgbe_phy_config_init,
1418		.suspend		= amd_xgbe_phy_suspend,
1419		.resume			= amd_xgbe_phy_resume,
1420		.config_aneg		= amd_xgbe_phy_config_aneg,
1421		.aneg_done		= amd_xgbe_phy_aneg_done,
1422		.read_status		= amd_xgbe_phy_read_status,
1423		.match_phy_device	= amd_xgbe_match_phy_device,
1424		.driver			= {
1425			.owner = THIS_MODULE,
1426		},
1427	},
1428};
1429
1430static int __init amd_xgbe_phy_init(void)
1431{
1432	return phy_drivers_register(amd_xgbe_phy_driver,
1433				    ARRAY_SIZE(amd_xgbe_phy_driver));
1434}
1435
1436static void __exit amd_xgbe_phy_exit(void)
1437{
1438	phy_drivers_unregister(amd_xgbe_phy_driver,
1439			       ARRAY_SIZE(amd_xgbe_phy_driver));
1440}
1441
1442module_init(amd_xgbe_phy_init);
1443module_exit(amd_xgbe_phy_exit);
1444
1445static struct mdio_device_id __maybe_unused amd_xgbe_phy_ids[] = {
1446	{ XGBE_PHY_ID, XGBE_PHY_MASK },
1447	{ }
1448};
1449MODULE_DEVICE_TABLE(mdio, amd_xgbe_phy_ids);
1450