lpc_eth.c revision cdaf0b835df04177397b90214f8b457fd23b67e0
1/*
2 * drivers/net/ethernet/nxp/lpc_eth.c
3 *
4 * Author: Kevin Wells <kevin.wells@nxp.com>
5 *
6 * Copyright (C) 2010 NXP Semiconductors
7 * Copyright (C) 2012 Roland Stigge <stigge@antcom.de>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17 * GNU General Public License for more details.
18 */
19
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22#include <linux/init.h>
23#include <linux/module.h>
24#include <linux/kernel.h>
25#include <linux/sched.h>
26#include <linux/slab.h>
27#include <linux/delay.h>
28#include <linux/interrupt.h>
29#include <linux/errno.h>
30#include <linux/ioport.h>
31#include <linux/crc32.h>
32#include <linux/platform_device.h>
33#include <linux/spinlock.h>
34#include <linux/ethtool.h>
35#include <linux/mii.h>
36#include <linux/clk.h>
37#include <linux/workqueue.h>
38#include <linux/netdevice.h>
39#include <linux/etherdevice.h>
40#include <linux/skbuff.h>
41#include <linux/phy.h>
42#include <linux/dma-mapping.h>
43#include <linux/of_net.h>
44#include <linux/types.h>
45
46#include <linux/delay.h>
47#include <linux/io.h>
48#include <mach/board.h>
49#include <mach/platform.h>
50#include <mach/hardware.h>
51
52#define MODNAME "lpc-eth"
53#define DRV_VERSION "1.00"
54#define PHYDEF_ADDR 0x00
55
56#define ENET_MAXF_SIZE 1536
57#define ENET_RX_DESC 48
58#define ENET_TX_DESC 16
59
60#define NAPI_WEIGHT 16
61
62/*
63 * Ethernet MAC controller Register offsets
64 */
65#define LPC_ENET_MAC1(x)			(x + 0x000)
66#define LPC_ENET_MAC2(x)			(x + 0x004)
67#define LPC_ENET_IPGT(x)			(x + 0x008)
68#define LPC_ENET_IPGR(x)			(x + 0x00C)
69#define LPC_ENET_CLRT(x)			(x + 0x010)
70#define LPC_ENET_MAXF(x)			(x + 0x014)
71#define LPC_ENET_SUPP(x)			(x + 0x018)
72#define LPC_ENET_TEST(x)			(x + 0x01C)
73#define LPC_ENET_MCFG(x)			(x + 0x020)
74#define LPC_ENET_MCMD(x)			(x + 0x024)
75#define LPC_ENET_MADR(x)			(x + 0x028)
76#define LPC_ENET_MWTD(x)			(x + 0x02C)
77#define LPC_ENET_MRDD(x)			(x + 0x030)
78#define LPC_ENET_MIND(x)			(x + 0x034)
79#define LPC_ENET_SA0(x)				(x + 0x040)
80#define LPC_ENET_SA1(x)				(x + 0x044)
81#define LPC_ENET_SA2(x)				(x + 0x048)
82#define LPC_ENET_COMMAND(x)			(x + 0x100)
83#define LPC_ENET_STATUS(x)			(x + 0x104)
84#define LPC_ENET_RXDESCRIPTOR(x)		(x + 0x108)
85#define LPC_ENET_RXSTATUS(x)			(x + 0x10C)
86#define LPC_ENET_RXDESCRIPTORNUMBER(x)		(x + 0x110)
87#define LPC_ENET_RXPRODUCEINDEX(x)		(x + 0x114)
88#define LPC_ENET_RXCONSUMEINDEX(x)		(x + 0x118)
89#define LPC_ENET_TXDESCRIPTOR(x)		(x + 0x11C)
90#define LPC_ENET_TXSTATUS(x)			(x + 0x120)
91#define LPC_ENET_TXDESCRIPTORNUMBER(x)		(x + 0x124)
92#define LPC_ENET_TXPRODUCEINDEX(x)		(x + 0x128)
93#define LPC_ENET_TXCONSUMEINDEX(x)		(x + 0x12C)
94#define LPC_ENET_TSV0(x)			(x + 0x158)
95#define LPC_ENET_TSV1(x)			(x + 0x15C)
96#define LPC_ENET_RSV(x)				(x + 0x160)
97#define LPC_ENET_FLOWCONTROLCOUNTER(x)		(x + 0x170)
98#define LPC_ENET_FLOWCONTROLSTATUS(x)		(x + 0x174)
99#define LPC_ENET_RXFILTER_CTRL(x)		(x + 0x200)
100#define LPC_ENET_RXFILTERWOLSTATUS(x)		(x + 0x204)
101#define LPC_ENET_RXFILTERWOLCLEAR(x)		(x + 0x208)
102#define LPC_ENET_HASHFILTERL(x)			(x + 0x210)
103#define LPC_ENET_HASHFILTERH(x)			(x + 0x214)
104#define LPC_ENET_INTSTATUS(x)			(x + 0xFE0)
105#define LPC_ENET_INTENABLE(x)			(x + 0xFE4)
106#define LPC_ENET_INTCLEAR(x)			(x + 0xFE8)
107#define LPC_ENET_INTSET(x)			(x + 0xFEC)
108#define LPC_ENET_POWERDOWN(x)			(x + 0xFF4)
109
110/*
111 * mac1 register definitions
112 */
113#define LPC_MAC1_RECV_ENABLE			(1 << 0)
114#define LPC_MAC1_PASS_ALL_RX_FRAMES		(1 << 1)
115#define LPC_MAC1_RX_FLOW_CONTROL		(1 << 2)
116#define LPC_MAC1_TX_FLOW_CONTROL		(1 << 3)
117#define LPC_MAC1_LOOPBACK			(1 << 4)
118#define LPC_MAC1_RESET_TX			(1 << 8)
119#define LPC_MAC1_RESET_MCS_TX			(1 << 9)
120#define LPC_MAC1_RESET_RX			(1 << 10)
121#define LPC_MAC1_RESET_MCS_RX			(1 << 11)
122#define LPC_MAC1_SIMULATION_RESET		(1 << 14)
123#define LPC_MAC1_SOFT_RESET			(1 << 15)
124
125/*
126 * mac2 register definitions
127 */
128#define LPC_MAC2_FULL_DUPLEX			(1 << 0)
129#define LPC_MAC2_FRAME_LENGTH_CHECKING		(1 << 1)
130#define LPC_MAC2_HUGH_LENGTH_CHECKING		(1 << 2)
131#define LPC_MAC2_DELAYED_CRC			(1 << 3)
132#define LPC_MAC2_CRC_ENABLE			(1 << 4)
133#define LPC_MAC2_PAD_CRC_ENABLE			(1 << 5)
134#define LPC_MAC2_VLAN_PAD_ENABLE		(1 << 6)
135#define LPC_MAC2_AUTO_DETECT_PAD_ENABLE		(1 << 7)
136#define LPC_MAC2_PURE_PREAMBLE_ENFORCEMENT	(1 << 8)
137#define LPC_MAC2_LONG_PREAMBLE_ENFORCEMENT	(1 << 9)
138#define LPC_MAC2_NO_BACKOFF			(1 << 12)
139#define LPC_MAC2_BACK_PRESSURE			(1 << 13)
140#define LPC_MAC2_EXCESS_DEFER			(1 << 14)
141
142/*
143 * ipgt register definitions
144 */
145#define LPC_IPGT_LOAD(n)			((n) & 0x7F)
146
147/*
148 * ipgr register definitions
149 */
150#define LPC_IPGR_LOAD_PART2(n)			((n) & 0x7F)
151#define LPC_IPGR_LOAD_PART1(n)			(((n) & 0x7F) << 8)
152
153/*
154 * clrt register definitions
155 */
156#define LPC_CLRT_LOAD_RETRY_MAX(n)		((n) & 0xF)
157#define LPC_CLRT_LOAD_COLLISION_WINDOW(n)	(((n) & 0x3F) << 8)
158
159/*
160 * maxf register definitions
161 */
162#define LPC_MAXF_LOAD_MAX_FRAME_LEN(n)		((n) & 0xFFFF)
163
164/*
165 * supp register definitions
166 */
167#define LPC_SUPP_SPEED				(1 << 8)
168#define LPC_SUPP_RESET_RMII			(1 << 11)
169
170/*
171 * test register definitions
172 */
173#define LPC_TEST_SHORTCUT_PAUSE_QUANTA		(1 << 0)
174#define LPC_TEST_PAUSE				(1 << 1)
175#define LPC_TEST_BACKPRESSURE			(1 << 2)
176
177/*
178 * mcfg register definitions
179 */
180#define LPC_MCFG_SCAN_INCREMENT			(1 << 0)
181#define LPC_MCFG_SUPPRESS_PREAMBLE		(1 << 1)
182#define LPC_MCFG_CLOCK_SELECT(n)		(((n) & 0x7) << 2)
183#define LPC_MCFG_CLOCK_HOST_DIV_4		0
184#define LPC_MCFG_CLOCK_HOST_DIV_6		2
185#define LPC_MCFG_CLOCK_HOST_DIV_8		3
186#define LPC_MCFG_CLOCK_HOST_DIV_10		4
187#define LPC_MCFG_CLOCK_HOST_DIV_14		5
188#define LPC_MCFG_CLOCK_HOST_DIV_20		6
189#define LPC_MCFG_CLOCK_HOST_DIV_28		7
190#define LPC_MCFG_RESET_MII_MGMT			(1 << 15)
191
192/*
193 * mcmd register definitions
194 */
195#define LPC_MCMD_READ				(1 << 0)
196#define LPC_MCMD_SCAN				(1 << 1)
197
198/*
199 * madr register definitions
200 */
201#define LPC_MADR_REGISTER_ADDRESS(n)		((n) & 0x1F)
202#define LPC_MADR_PHY_0ADDRESS(n)		(((n) & 0x1F) << 8)
203
204/*
205 * mwtd register definitions
206 */
207#define LPC_MWDT_WRITE(n)			((n) & 0xFFFF)
208
209/*
210 * mrdd register definitions
211 */
212#define LPC_MRDD_READ_MASK			0xFFFF
213
214/*
215 * mind register definitions
216 */
217#define LPC_MIND_BUSY				(1 << 0)
218#define LPC_MIND_SCANNING			(1 << 1)
219#define LPC_MIND_NOT_VALID			(1 << 2)
220#define LPC_MIND_MII_LINK_FAIL			(1 << 3)
221
222/*
223 * command register definitions
224 */
225#define LPC_COMMAND_RXENABLE			(1 << 0)
226#define LPC_COMMAND_TXENABLE			(1 << 1)
227#define LPC_COMMAND_REG_RESET			(1 << 3)
228#define LPC_COMMAND_TXRESET			(1 << 4)
229#define LPC_COMMAND_RXRESET			(1 << 5)
230#define LPC_COMMAND_PASSRUNTFRAME		(1 << 6)
231#define LPC_COMMAND_PASSRXFILTER		(1 << 7)
232#define LPC_COMMAND_TXFLOWCONTROL		(1 << 8)
233#define LPC_COMMAND_RMII			(1 << 9)
234#define LPC_COMMAND_FULLDUPLEX			(1 << 10)
235
236/*
237 * status register definitions
238 */
239#define LPC_STATUS_RXACTIVE			(1 << 0)
240#define LPC_STATUS_TXACTIVE			(1 << 1)
241
242/*
243 * tsv0 register definitions
244 */
245#define LPC_TSV0_CRC_ERROR			(1 << 0)
246#define LPC_TSV0_LENGTH_CHECK_ERROR		(1 << 1)
247#define LPC_TSV0_LENGTH_OUT_OF_RANGE		(1 << 2)
248#define LPC_TSV0_DONE				(1 << 3)
249#define LPC_TSV0_MULTICAST			(1 << 4)
250#define LPC_TSV0_BROADCAST			(1 << 5)
251#define LPC_TSV0_PACKET_DEFER			(1 << 6)
252#define LPC_TSV0_ESCESSIVE_DEFER		(1 << 7)
253#define LPC_TSV0_ESCESSIVE_COLLISION		(1 << 8)
254#define LPC_TSV0_LATE_COLLISION			(1 << 9)
255#define LPC_TSV0_GIANT				(1 << 10)
256#define LPC_TSV0_UNDERRUN			(1 << 11)
257#define LPC_TSV0_TOTAL_BYTES(n)			(((n) >> 12) & 0xFFFF)
258#define LPC_TSV0_CONTROL_FRAME			(1 << 28)
259#define LPC_TSV0_PAUSE				(1 << 29)
260#define LPC_TSV0_BACKPRESSURE			(1 << 30)
261#define LPC_TSV0_VLAN				(1 << 31)
262
263/*
264 * tsv1 register definitions
265 */
266#define LPC_TSV1_TRANSMIT_BYTE_COUNT(n)		((n) & 0xFFFF)
267#define LPC_TSV1_COLLISION_COUNT(n)		(((n) >> 16) & 0xF)
268
269/*
270 * rsv register definitions
271 */
272#define LPC_RSV_RECEIVED_BYTE_COUNT(n)		((n) & 0xFFFF)
273#define LPC_RSV_RXDV_EVENT_IGNORED		(1 << 16)
274#define LPC_RSV_RXDV_EVENT_PREVIOUSLY_SEEN	(1 << 17)
275#define LPC_RSV_CARRIER_EVNT_PREVIOUS_SEEN	(1 << 18)
276#define LPC_RSV_RECEIVE_CODE_VIOLATION		(1 << 19)
277#define LPC_RSV_CRC_ERROR			(1 << 20)
278#define LPC_RSV_LENGTH_CHECK_ERROR		(1 << 21)
279#define LPC_RSV_LENGTH_OUT_OF_RANGE		(1 << 22)
280#define LPC_RSV_RECEIVE_OK			(1 << 23)
281#define LPC_RSV_MULTICAST			(1 << 24)
282#define LPC_RSV_BROADCAST			(1 << 25)
283#define LPC_RSV_DRIBBLE_NIBBLE			(1 << 26)
284#define LPC_RSV_CONTROL_FRAME			(1 << 27)
285#define LPC_RSV_PAUSE				(1 << 28)
286#define LPC_RSV_UNSUPPORTED_OPCODE		(1 << 29)
287#define LPC_RSV_VLAN				(1 << 30)
288
289/*
290 * flowcontrolcounter register definitions
291 */
292#define LPC_FCCR_MIRRORCOUNTER(n)		((n) & 0xFFFF)
293#define LPC_FCCR_PAUSETIMER(n)			(((n) >> 16) & 0xFFFF)
294
295/*
296 * flowcontrolstatus register definitions
297 */
298#define LPC_FCCR_MIRRORCOUNTERCURRENT(n)	((n) & 0xFFFF)
299
300/*
301 * rxfliterctrl, rxfilterwolstatus, and rxfilterwolclear shared
302 * register definitions
303 */
304#define LPC_RXFLTRW_ACCEPTUNICAST		(1 << 0)
305#define LPC_RXFLTRW_ACCEPTUBROADCAST		(1 << 1)
306#define LPC_RXFLTRW_ACCEPTUMULTICAST		(1 << 2)
307#define LPC_RXFLTRW_ACCEPTUNICASTHASH		(1 << 3)
308#define LPC_RXFLTRW_ACCEPTUMULTICASTHASH	(1 << 4)
309#define LPC_RXFLTRW_ACCEPTPERFECT		(1 << 5)
310
311/*
312 * rxfliterctrl register definitions
313 */
314#define LPC_RXFLTRWSTS_MAGICPACKETENWOL		(1 << 12)
315#define LPC_RXFLTRWSTS_RXFILTERENWOL		(1 << 13)
316
317/*
318 * rxfilterwolstatus/rxfilterwolclear register definitions
319 */
320#define LPC_RXFLTRWSTS_RXFILTERWOL		(1 << 7)
321#define LPC_RXFLTRWSTS_MAGICPACKETWOL		(1 << 8)
322
323/*
324 * intstatus, intenable, intclear, and Intset shared register
325 * definitions
326 */
327#define LPC_MACINT_RXOVERRUNINTEN		(1 << 0)
328#define LPC_MACINT_RXERRORONINT			(1 << 1)
329#define LPC_MACINT_RXFINISHEDINTEN		(1 << 2)
330#define LPC_MACINT_RXDONEINTEN			(1 << 3)
331#define LPC_MACINT_TXUNDERRUNINTEN		(1 << 4)
332#define LPC_MACINT_TXERRORINTEN			(1 << 5)
333#define LPC_MACINT_TXFINISHEDINTEN		(1 << 6)
334#define LPC_MACINT_TXDONEINTEN			(1 << 7)
335#define LPC_MACINT_SOFTINTEN			(1 << 12)
336#define LPC_MACINT_WAKEUPINTEN			(1 << 13)
337
338/*
339 * powerdown register definitions
340 */
341#define LPC_POWERDOWN_MACAHB			(1 << 31)
342
343/* Upon the upcoming introduction of device tree usage in LPC32xx,
344 * lpc_phy_interface_mode() and use_iram_for_net() will be extended with a
345 * device parameter for access to device tree information at runtime, instead
346 * of defining the values at compile time
347 */
348static inline phy_interface_t lpc_phy_interface_mode(void)
349{
350#ifdef CONFIG_ARCH_LPC32XX_MII_SUPPORT
351	return PHY_INTERFACE_MODE_MII;
352#else
353	return PHY_INTERFACE_MODE_RMII;
354#endif
355}
356
357static inline int use_iram_for_net(void)
358{
359#ifdef CONFIG_ARCH_LPC32XX_IRAM_FOR_NET
360	return 1;
361#else
362	return 0;
363#endif
364}
365
366/* Receive Status information word */
367#define RXSTATUS_SIZE			0x000007FF
368#define RXSTATUS_CONTROL		(1 << 18)
369#define RXSTATUS_VLAN			(1 << 19)
370#define RXSTATUS_FILTER			(1 << 20)
371#define RXSTATUS_MULTICAST		(1 << 21)
372#define RXSTATUS_BROADCAST		(1 << 22)
373#define RXSTATUS_CRC			(1 << 23)
374#define RXSTATUS_SYMBOL			(1 << 24)
375#define RXSTATUS_LENGTH			(1 << 25)
376#define RXSTATUS_RANGE			(1 << 26)
377#define RXSTATUS_ALIGN			(1 << 27)
378#define RXSTATUS_OVERRUN		(1 << 28)
379#define RXSTATUS_NODESC			(1 << 29)
380#define RXSTATUS_LAST			(1 << 30)
381#define RXSTATUS_ERROR			(1 << 31)
382
383#define RXSTATUS_STATUS_ERROR \
384	(RXSTATUS_NODESC | RXSTATUS_OVERRUN | RXSTATUS_ALIGN | \
385	 RXSTATUS_RANGE | RXSTATUS_LENGTH | RXSTATUS_SYMBOL | RXSTATUS_CRC)
386
387/* Receive Descriptor control word */
388#define RXDESC_CONTROL_SIZE		0x000007FF
389#define RXDESC_CONTROL_INT		(1 << 31)
390
391/* Transmit Status information word */
392#define TXSTATUS_COLLISIONS_GET(x)	(((x) >> 21) & 0xF)
393#define TXSTATUS_DEFER			(1 << 25)
394#define TXSTATUS_EXCESSDEFER		(1 << 26)
395#define TXSTATUS_EXCESSCOLL		(1 << 27)
396#define TXSTATUS_LATECOLL		(1 << 28)
397#define TXSTATUS_UNDERRUN		(1 << 29)
398#define TXSTATUS_NODESC			(1 << 30)
399#define TXSTATUS_ERROR			(1 << 31)
400
401/* Transmit Descriptor control word */
402#define TXDESC_CONTROL_SIZE		0x000007FF
403#define TXDESC_CONTROL_OVERRIDE		(1 << 26)
404#define TXDESC_CONTROL_HUGE		(1 << 27)
405#define TXDESC_CONTROL_PAD		(1 << 28)
406#define TXDESC_CONTROL_CRC		(1 << 29)
407#define TXDESC_CONTROL_LAST		(1 << 30)
408#define TXDESC_CONTROL_INT		(1 << 31)
409
410static int lpc_eth_hard_start_xmit(struct sk_buff *skb,
411				   struct net_device *ndev);
412
413/*
414 * Structure of a TX/RX descriptors and RX status
415 */
416struct txrx_desc_t {
417	__le32 packet;
418	__le32 control;
419};
420struct rx_status_t {
421	__le32 statusinfo;
422	__le32 statushashcrc;
423};
424
425/*
426 * Device driver data structure
427 */
428struct netdata_local {
429	struct platform_device	*pdev;
430	struct net_device	*ndev;
431	spinlock_t		lock;
432	void __iomem		*net_base;
433	u32			msg_enable;
434	struct sk_buff		*skb[ENET_TX_DESC];
435	unsigned int		last_tx_idx;
436	unsigned int		num_used_tx_buffs;
437	struct mii_bus		*mii_bus;
438	struct phy_device	*phy_dev;
439	struct clk		*clk;
440	dma_addr_t		dma_buff_base_p;
441	void			*dma_buff_base_v;
442	size_t			dma_buff_size;
443	struct txrx_desc_t	*tx_desc_v;
444	u32			*tx_stat_v;
445	void			*tx_buff_v;
446	struct txrx_desc_t	*rx_desc_v;
447	struct rx_status_t	*rx_stat_v;
448	void			*rx_buff_v;
449	int			link;
450	int			speed;
451	int			duplex;
452	struct napi_struct	napi;
453};
454
455/*
456 * MAC support functions
457 */
458static void __lpc_set_mac(struct netdata_local *pldat, u8 *mac)
459{
460	u32 tmp;
461
462	/* Set station address */
463	tmp = mac[0] | ((u32)mac[1] << 8);
464	writel(tmp, LPC_ENET_SA2(pldat->net_base));
465	tmp = mac[2] | ((u32)mac[3] << 8);
466	writel(tmp, LPC_ENET_SA1(pldat->net_base));
467	tmp = mac[4] | ((u32)mac[5] << 8);
468	writel(tmp, LPC_ENET_SA0(pldat->net_base));
469
470	netdev_dbg(pldat->ndev, "Ethernet MAC address %pM\n", mac);
471}
472
473static void __lpc_get_mac(struct netdata_local *pldat, u8 *mac)
474{
475	u32 tmp;
476
477	/* Get station address */
478	tmp = readl(LPC_ENET_SA2(pldat->net_base));
479	mac[0] = tmp & 0xFF;
480	mac[1] = tmp >> 8;
481	tmp = readl(LPC_ENET_SA1(pldat->net_base));
482	mac[2] = tmp & 0xFF;
483	mac[3] = tmp >> 8;
484	tmp = readl(LPC_ENET_SA0(pldat->net_base));
485	mac[4] = tmp & 0xFF;
486	mac[5] = tmp >> 8;
487}
488
489static void __lpc_eth_clock_enable(struct netdata_local *pldat,
490				   bool enable)
491{
492	if (enable)
493		clk_enable(pldat->clk);
494	else
495		clk_disable(pldat->clk);
496}
497
498static void __lpc_params_setup(struct netdata_local *pldat)
499{
500	u32 tmp;
501
502	if (pldat->duplex == DUPLEX_FULL) {
503		tmp = readl(LPC_ENET_MAC2(pldat->net_base));
504		tmp |= LPC_MAC2_FULL_DUPLEX;
505		writel(tmp, LPC_ENET_MAC2(pldat->net_base));
506		tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
507		tmp |= LPC_COMMAND_FULLDUPLEX;
508		writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
509		writel(LPC_IPGT_LOAD(0x15), LPC_ENET_IPGT(pldat->net_base));
510	} else {
511		tmp = readl(LPC_ENET_MAC2(pldat->net_base));
512		tmp &= ~LPC_MAC2_FULL_DUPLEX;
513		writel(tmp, LPC_ENET_MAC2(pldat->net_base));
514		tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
515		tmp &= ~LPC_COMMAND_FULLDUPLEX;
516		writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
517		writel(LPC_IPGT_LOAD(0x12), LPC_ENET_IPGT(pldat->net_base));
518	}
519
520	if (pldat->speed == SPEED_100)
521		writel(LPC_SUPP_SPEED, LPC_ENET_SUPP(pldat->net_base));
522	else
523		writel(0, LPC_ENET_SUPP(pldat->net_base));
524}
525
526static void __lpc_eth_reset(struct netdata_local *pldat)
527{
528	/* Reset all MAC logic */
529	writel((LPC_MAC1_RESET_TX | LPC_MAC1_RESET_MCS_TX | LPC_MAC1_RESET_RX |
530		LPC_MAC1_RESET_MCS_RX | LPC_MAC1_SIMULATION_RESET |
531		LPC_MAC1_SOFT_RESET), LPC_ENET_MAC1(pldat->net_base));
532	writel((LPC_COMMAND_REG_RESET | LPC_COMMAND_TXRESET |
533		LPC_COMMAND_RXRESET), LPC_ENET_COMMAND(pldat->net_base));
534}
535
536static int __lpc_mii_mngt_reset(struct netdata_local *pldat)
537{
538	/* Reset MII management hardware */
539	writel(LPC_MCFG_RESET_MII_MGMT, LPC_ENET_MCFG(pldat->net_base));
540
541	/* Setup MII clock to slowest rate with a /28 divider */
542	writel(LPC_MCFG_CLOCK_SELECT(LPC_MCFG_CLOCK_HOST_DIV_28),
543	       LPC_ENET_MCFG(pldat->net_base));
544
545	return 0;
546}
547
548static inline phys_addr_t __va_to_pa(void *addr, struct netdata_local *pldat)
549{
550	phys_addr_t phaddr;
551
552	phaddr = addr - pldat->dma_buff_base_v;
553	phaddr += pldat->dma_buff_base_p;
554
555	return phaddr;
556}
557
558static void lpc_eth_enable_int(void __iomem *regbase)
559{
560	writel((LPC_MACINT_RXDONEINTEN | LPC_MACINT_TXDONEINTEN),
561	       LPC_ENET_INTENABLE(regbase));
562}
563
564static void lpc_eth_disable_int(void __iomem *regbase)
565{
566	writel(0, LPC_ENET_INTENABLE(regbase));
567}
568
569/* Setup TX/RX descriptors */
570static void __lpc_txrx_desc_setup(struct netdata_local *pldat)
571{
572	u32 *ptxstat;
573	void *tbuff;
574	int i;
575	struct txrx_desc_t *ptxrxdesc;
576	struct rx_status_t *prxstat;
577
578	tbuff = PTR_ALIGN(pldat->dma_buff_base_v, 16);
579
580	/* Setup TX descriptors, status, and buffers */
581	pldat->tx_desc_v = tbuff;
582	tbuff += sizeof(struct txrx_desc_t) * ENET_TX_DESC;
583
584	pldat->tx_stat_v = tbuff;
585	tbuff += sizeof(u32) * ENET_TX_DESC;
586
587	tbuff = PTR_ALIGN(tbuff, 16);
588	pldat->tx_buff_v = tbuff;
589	tbuff += ENET_MAXF_SIZE * ENET_TX_DESC;
590
591	/* Setup RX descriptors, status, and buffers */
592	pldat->rx_desc_v = tbuff;
593	tbuff += sizeof(struct txrx_desc_t) * ENET_RX_DESC;
594
595	tbuff = PTR_ALIGN(tbuff, 16);
596	pldat->rx_stat_v = tbuff;
597	tbuff += sizeof(struct rx_status_t) * ENET_RX_DESC;
598
599	tbuff = PTR_ALIGN(tbuff, 16);
600	pldat->rx_buff_v = tbuff;
601	tbuff += ENET_MAXF_SIZE * ENET_RX_DESC;
602
603	/* Map the TX descriptors to the TX buffers in hardware */
604	for (i = 0; i < ENET_TX_DESC; i++) {
605		ptxstat = &pldat->tx_stat_v[i];
606		ptxrxdesc = &pldat->tx_desc_v[i];
607
608		ptxrxdesc->packet = __va_to_pa(
609				pldat->tx_buff_v + i * ENET_MAXF_SIZE, pldat);
610		ptxrxdesc->control = 0;
611		*ptxstat = 0;
612	}
613
614	/* Map the RX descriptors to the RX buffers in hardware */
615	for (i = 0; i < ENET_RX_DESC; i++) {
616		prxstat = &pldat->rx_stat_v[i];
617		ptxrxdesc = &pldat->rx_desc_v[i];
618
619		ptxrxdesc->packet = __va_to_pa(
620				pldat->rx_buff_v + i * ENET_MAXF_SIZE, pldat);
621		ptxrxdesc->control = RXDESC_CONTROL_INT | (ENET_MAXF_SIZE - 1);
622		prxstat->statusinfo = 0;
623		prxstat->statushashcrc = 0;
624	}
625
626	/* Setup base addresses in hardware to point to buffers and
627	 * descriptors
628	 */
629	writel((ENET_TX_DESC - 1),
630	       LPC_ENET_TXDESCRIPTORNUMBER(pldat->net_base));
631	writel(__va_to_pa(pldat->tx_desc_v, pldat),
632	       LPC_ENET_TXDESCRIPTOR(pldat->net_base));
633	writel(__va_to_pa(pldat->tx_stat_v, pldat),
634	       LPC_ENET_TXSTATUS(pldat->net_base));
635	writel((ENET_RX_DESC - 1),
636	       LPC_ENET_RXDESCRIPTORNUMBER(pldat->net_base));
637	writel(__va_to_pa(pldat->rx_desc_v, pldat),
638	       LPC_ENET_RXDESCRIPTOR(pldat->net_base));
639	writel(__va_to_pa(pldat->rx_stat_v, pldat),
640	       LPC_ENET_RXSTATUS(pldat->net_base));
641}
642
643static void __lpc_eth_init(struct netdata_local *pldat)
644{
645	u32 tmp;
646
647	/* Disable controller and reset */
648	tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
649	tmp &= ~LPC_COMMAND_RXENABLE | LPC_COMMAND_TXENABLE;
650	writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
651	tmp = readl(LPC_ENET_MAC1(pldat->net_base));
652	tmp &= ~LPC_MAC1_RECV_ENABLE;
653	writel(tmp, LPC_ENET_MAC1(pldat->net_base));
654
655	/* Initial MAC setup */
656	writel(LPC_MAC1_PASS_ALL_RX_FRAMES, LPC_ENET_MAC1(pldat->net_base));
657	writel((LPC_MAC2_PAD_CRC_ENABLE | LPC_MAC2_CRC_ENABLE),
658	       LPC_ENET_MAC2(pldat->net_base));
659	writel(ENET_MAXF_SIZE, LPC_ENET_MAXF(pldat->net_base));
660
661	/* Collision window, gap */
662	writel((LPC_CLRT_LOAD_RETRY_MAX(0xF) |
663		LPC_CLRT_LOAD_COLLISION_WINDOW(0x37)),
664	       LPC_ENET_CLRT(pldat->net_base));
665	writel(LPC_IPGR_LOAD_PART2(0x12), LPC_ENET_IPGR(pldat->net_base));
666
667	if (lpc_phy_interface_mode() == PHY_INTERFACE_MODE_MII)
668		writel(LPC_COMMAND_PASSRUNTFRAME,
669		       LPC_ENET_COMMAND(pldat->net_base));
670	else {
671		writel((LPC_COMMAND_PASSRUNTFRAME | LPC_COMMAND_RMII),
672		       LPC_ENET_COMMAND(pldat->net_base));
673		writel(LPC_SUPP_RESET_RMII, LPC_ENET_SUPP(pldat->net_base));
674	}
675
676	__lpc_params_setup(pldat);
677
678	/* Setup TX and RX descriptors */
679	__lpc_txrx_desc_setup(pldat);
680
681	/* Setup packet filtering */
682	writel((LPC_RXFLTRW_ACCEPTUBROADCAST | LPC_RXFLTRW_ACCEPTPERFECT),
683	       LPC_ENET_RXFILTER_CTRL(pldat->net_base));
684
685	/* Get the next TX buffer output index */
686	pldat->num_used_tx_buffs = 0;
687	pldat->last_tx_idx =
688		readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
689
690	/* Clear and enable interrupts */
691	writel(0xFFFF, LPC_ENET_INTCLEAR(pldat->net_base));
692	smp_wmb();
693	lpc_eth_enable_int(pldat->net_base);
694
695	/* Enable controller */
696	tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
697	tmp |= LPC_COMMAND_RXENABLE | LPC_COMMAND_TXENABLE;
698	writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
699	tmp = readl(LPC_ENET_MAC1(pldat->net_base));
700	tmp |= LPC_MAC1_RECV_ENABLE;
701	writel(tmp, LPC_ENET_MAC1(pldat->net_base));
702}
703
704static void __lpc_eth_shutdown(struct netdata_local *pldat)
705{
706	/* Reset ethernet and power down PHY */
707	__lpc_eth_reset(pldat);
708	writel(0, LPC_ENET_MAC1(pldat->net_base));
709	writel(0, LPC_ENET_MAC2(pldat->net_base));
710}
711
712/*
713 * MAC<--->PHY support functions
714 */
715static int lpc_mdio_read(struct mii_bus *bus, int phy_id, int phyreg)
716{
717	struct netdata_local *pldat = bus->priv;
718	unsigned long timeout = jiffies + msecs_to_jiffies(100);
719	int lps;
720
721	writel(((phy_id << 8) | phyreg), LPC_ENET_MADR(pldat->net_base));
722	writel(LPC_MCMD_READ, LPC_ENET_MCMD(pldat->net_base));
723
724	/* Wait for unbusy status */
725	while (readl(LPC_ENET_MIND(pldat->net_base)) & LPC_MIND_BUSY) {
726		if (time_after(jiffies, timeout))
727			return -EIO;
728		cpu_relax();
729	}
730
731	lps = readl(LPC_ENET_MRDD(pldat->net_base));
732	writel(0, LPC_ENET_MCMD(pldat->net_base));
733
734	return lps;
735}
736
737static int lpc_mdio_write(struct mii_bus *bus, int phy_id, int phyreg,
738			u16 phydata)
739{
740	struct netdata_local *pldat = bus->priv;
741	unsigned long timeout = jiffies + msecs_to_jiffies(100);
742
743	writel(((phy_id << 8) | phyreg), LPC_ENET_MADR(pldat->net_base));
744	writel(phydata, LPC_ENET_MWTD(pldat->net_base));
745
746	/* Wait for completion */
747	while (readl(LPC_ENET_MIND(pldat->net_base)) & LPC_MIND_BUSY) {
748		if (time_after(jiffies, timeout))
749			return -EIO;
750		cpu_relax();
751	}
752
753	return 0;
754}
755
756static int lpc_mdio_reset(struct mii_bus *bus)
757{
758	return __lpc_mii_mngt_reset((struct netdata_local *)bus->priv);
759}
760
761static void lpc_handle_link_change(struct net_device *ndev)
762{
763	struct netdata_local *pldat = netdev_priv(ndev);
764	struct phy_device *phydev = pldat->phy_dev;
765	unsigned long flags;
766
767	bool status_change = false;
768
769	spin_lock_irqsave(&pldat->lock, flags);
770
771	if (phydev->link) {
772		if ((pldat->speed != phydev->speed) ||
773		    (pldat->duplex != phydev->duplex)) {
774			pldat->speed = phydev->speed;
775			pldat->duplex = phydev->duplex;
776			status_change = true;
777		}
778	}
779
780	if (phydev->link != pldat->link) {
781		if (!phydev->link) {
782			pldat->speed = 0;
783			pldat->duplex = -1;
784		}
785		pldat->link = phydev->link;
786
787		status_change = true;
788	}
789
790	spin_unlock_irqrestore(&pldat->lock, flags);
791
792	if (status_change)
793		__lpc_params_setup(pldat);
794}
795
796static int lpc_mii_probe(struct net_device *ndev)
797{
798	struct netdata_local *pldat = netdev_priv(ndev);
799	struct phy_device *phydev = phy_find_first(pldat->mii_bus);
800
801	if (!phydev) {
802		netdev_err(ndev, "no PHY found\n");
803		return -ENODEV;
804	}
805
806	/* Attach to the PHY */
807	if (lpc_phy_interface_mode() == PHY_INTERFACE_MODE_MII)
808		netdev_info(ndev, "using MII interface\n");
809	else
810		netdev_info(ndev, "using RMII interface\n");
811	phydev = phy_connect(ndev, dev_name(&phydev->dev),
812		&lpc_handle_link_change, 0, lpc_phy_interface_mode());
813
814	if (IS_ERR(phydev)) {
815		netdev_err(ndev, "Could not attach to PHY\n");
816		return PTR_ERR(phydev);
817	}
818
819	/* mask with MAC supported features */
820	phydev->supported &= PHY_BASIC_FEATURES;
821
822	phydev->advertising = phydev->supported;
823
824	pldat->link = 0;
825	pldat->speed = 0;
826	pldat->duplex = -1;
827	pldat->phy_dev = phydev;
828
829	netdev_info(ndev,
830		"attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
831		phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
832	return 0;
833}
834
835static int lpc_mii_init(struct netdata_local *pldat)
836{
837	int err = -ENXIO, i;
838
839	pldat->mii_bus = mdiobus_alloc();
840	if (!pldat->mii_bus) {
841		err = -ENOMEM;
842		goto err_out;
843	}
844
845	/* Setup MII mode */
846	if (lpc_phy_interface_mode() == PHY_INTERFACE_MODE_MII)
847		writel(LPC_COMMAND_PASSRUNTFRAME,
848		       LPC_ENET_COMMAND(pldat->net_base));
849	else {
850		writel((LPC_COMMAND_PASSRUNTFRAME | LPC_COMMAND_RMII),
851		       LPC_ENET_COMMAND(pldat->net_base));
852		writel(LPC_SUPP_RESET_RMII, LPC_ENET_SUPP(pldat->net_base));
853	}
854
855	pldat->mii_bus->name = "lpc_mii_bus";
856	pldat->mii_bus->read = &lpc_mdio_read;
857	pldat->mii_bus->write = &lpc_mdio_write;
858	pldat->mii_bus->reset = &lpc_mdio_reset;
859	snprintf(pldat->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
860		 pldat->pdev->name, pldat->pdev->id);
861	pldat->mii_bus->priv = pldat;
862	pldat->mii_bus->parent = &pldat->pdev->dev;
863
864	pldat->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
865	if (!pldat->mii_bus->irq) {
866		err = -ENOMEM;
867		goto err_out_1;
868	}
869
870	for (i = 0; i < PHY_MAX_ADDR; i++)
871		pldat->mii_bus->irq[i] = PHY_POLL;
872
873	platform_set_drvdata(pldat->pdev, pldat->mii_bus);
874
875	if (mdiobus_register(pldat->mii_bus))
876		goto err_out_free_mdio_irq;
877
878	if (lpc_mii_probe(pldat->ndev) != 0)
879		goto err_out_unregister_bus;
880
881	return 0;
882
883err_out_unregister_bus:
884	mdiobus_unregister(pldat->mii_bus);
885err_out_free_mdio_irq:
886	kfree(pldat->mii_bus->irq);
887err_out_1:
888	mdiobus_free(pldat->mii_bus);
889err_out:
890	return err;
891}
892
893static void __lpc_handle_xmit(struct net_device *ndev)
894{
895	struct netdata_local *pldat = netdev_priv(ndev);
896	struct sk_buff *skb;
897	u32 txcidx, *ptxstat, txstat;
898
899	txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
900	while (pldat->last_tx_idx != txcidx) {
901		skb = pldat->skb[pldat->last_tx_idx];
902
903		/* A buffer is available, get buffer status */
904		ptxstat = &pldat->tx_stat_v[pldat->last_tx_idx];
905		txstat = *ptxstat;
906
907		/* Next buffer and decrement used buffer counter */
908		pldat->num_used_tx_buffs--;
909		pldat->last_tx_idx++;
910		if (pldat->last_tx_idx >= ENET_TX_DESC)
911			pldat->last_tx_idx = 0;
912
913		/* Update collision counter */
914		ndev->stats.collisions += TXSTATUS_COLLISIONS_GET(txstat);
915
916		/* Any errors occurred? */
917		if (txstat & TXSTATUS_ERROR) {
918			if (txstat & TXSTATUS_UNDERRUN) {
919				/* FIFO underrun */
920				ndev->stats.tx_fifo_errors++;
921			}
922			if (txstat & TXSTATUS_LATECOLL) {
923				/* Late collision */
924				ndev->stats.tx_aborted_errors++;
925			}
926			if (txstat & TXSTATUS_EXCESSCOLL) {
927				/* Excessive collision */
928				ndev->stats.tx_aborted_errors++;
929			}
930			if (txstat & TXSTATUS_EXCESSDEFER) {
931				/* Defer limit */
932				ndev->stats.tx_aborted_errors++;
933			}
934			ndev->stats.tx_errors++;
935		} else {
936			/* Update stats */
937			ndev->stats.tx_packets++;
938			ndev->stats.tx_bytes += skb->len;
939
940			/* Free buffer */
941			dev_kfree_skb_irq(skb);
942		}
943
944		txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
945	}
946
947	if (netif_queue_stopped(ndev))
948		netif_wake_queue(ndev);
949}
950
951static int __lpc_handle_recv(struct net_device *ndev, int budget)
952{
953	struct netdata_local *pldat = netdev_priv(ndev);
954	struct sk_buff *skb;
955	u32 rxconsidx, len, ethst;
956	struct rx_status_t *prxstat;
957	u8 *prdbuf;
958	int rx_done = 0;
959
960	/* Get the current RX buffer indexes */
961	rxconsidx = readl(LPC_ENET_RXCONSUMEINDEX(pldat->net_base));
962	while (rx_done < budget && rxconsidx !=
963			readl(LPC_ENET_RXPRODUCEINDEX(pldat->net_base))) {
964		/* Get pointer to receive status */
965		prxstat = &pldat->rx_stat_v[rxconsidx];
966		len = (prxstat->statusinfo & RXSTATUS_SIZE) + 1;
967
968		/* Status error? */
969		ethst = prxstat->statusinfo;
970		if ((ethst & (RXSTATUS_ERROR | RXSTATUS_STATUS_ERROR)) ==
971		    (RXSTATUS_ERROR | RXSTATUS_RANGE))
972			ethst &= ~RXSTATUS_ERROR;
973
974		if (ethst & RXSTATUS_ERROR) {
975			int si = prxstat->statusinfo;
976			/* Check statuses */
977			if (si & RXSTATUS_OVERRUN) {
978				/* Overrun error */
979				ndev->stats.rx_fifo_errors++;
980			} else if (si & RXSTATUS_CRC) {
981				/* CRC error */
982				ndev->stats.rx_crc_errors++;
983			} else if (si & RXSTATUS_LENGTH) {
984				/* Length error */
985				ndev->stats.rx_length_errors++;
986			} else if (si & RXSTATUS_ERROR) {
987				/* Other error */
988				ndev->stats.rx_length_errors++;
989			}
990			ndev->stats.rx_errors++;
991		} else {
992			/* Packet is good */
993			skb = dev_alloc_skb(len + 8);
994			if (!skb)
995				ndev->stats.rx_dropped++;
996			else {
997				prdbuf = skb_put(skb, len);
998
999				/* Copy packet from buffer */
1000				memcpy(prdbuf, pldat->rx_buff_v +
1001					rxconsidx * ENET_MAXF_SIZE, len);
1002
1003				/* Pass to upper layer */
1004				skb->protocol = eth_type_trans(skb, ndev);
1005				netif_receive_skb(skb);
1006				ndev->stats.rx_packets++;
1007				ndev->stats.rx_bytes += len;
1008			}
1009		}
1010
1011		/* Increment consume index */
1012		rxconsidx = rxconsidx + 1;
1013		if (rxconsidx >= ENET_RX_DESC)
1014			rxconsidx = 0;
1015		writel(rxconsidx,
1016		       LPC_ENET_RXCONSUMEINDEX(pldat->net_base));
1017		rx_done++;
1018	}
1019
1020	return rx_done;
1021}
1022
1023static int lpc_eth_poll(struct napi_struct *napi, int budget)
1024{
1025	struct netdata_local *pldat = container_of(napi,
1026			struct netdata_local, napi);
1027	struct net_device *ndev = pldat->ndev;
1028	int rx_done = 0;
1029	struct netdev_queue *txq = netdev_get_tx_queue(ndev, 0);
1030
1031	__netif_tx_lock(txq, smp_processor_id());
1032	__lpc_handle_xmit(ndev);
1033	__netif_tx_unlock(txq);
1034	rx_done = __lpc_handle_recv(ndev, budget);
1035
1036	if (rx_done < budget) {
1037		napi_complete(napi);
1038		lpc_eth_enable_int(pldat->net_base);
1039	}
1040
1041	return rx_done;
1042}
1043
1044static irqreturn_t __lpc_eth_interrupt(int irq, void *dev_id)
1045{
1046	struct net_device *ndev = dev_id;
1047	struct netdata_local *pldat = netdev_priv(ndev);
1048	u32 tmp;
1049
1050	spin_lock(&pldat->lock);
1051
1052	tmp = readl(LPC_ENET_INTSTATUS(pldat->net_base));
1053	/* Clear interrupts */
1054	writel(tmp, LPC_ENET_INTCLEAR(pldat->net_base));
1055
1056	lpc_eth_disable_int(pldat->net_base);
1057	if (likely(napi_schedule_prep(&pldat->napi)))
1058		__napi_schedule(&pldat->napi);
1059
1060	spin_unlock(&pldat->lock);
1061
1062	return IRQ_HANDLED;
1063}
1064
1065static int lpc_eth_close(struct net_device *ndev)
1066{
1067	unsigned long flags;
1068	struct netdata_local *pldat = netdev_priv(ndev);
1069
1070	if (netif_msg_ifdown(pldat))
1071		dev_dbg(&pldat->pdev->dev, "shutting down %s\n", ndev->name);
1072
1073	napi_disable(&pldat->napi);
1074	netif_stop_queue(ndev);
1075
1076	if (pldat->phy_dev)
1077		phy_stop(pldat->phy_dev);
1078
1079	spin_lock_irqsave(&pldat->lock, flags);
1080	__lpc_eth_reset(pldat);
1081	netif_carrier_off(ndev);
1082	writel(0, LPC_ENET_MAC1(pldat->net_base));
1083	writel(0, LPC_ENET_MAC2(pldat->net_base));
1084	spin_unlock_irqrestore(&pldat->lock, flags);
1085
1086	__lpc_eth_clock_enable(pldat, false);
1087
1088	return 0;
1089}
1090
1091static int lpc_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1092{
1093	struct netdata_local *pldat = netdev_priv(ndev);
1094	u32 len, txidx;
1095	u32 *ptxstat;
1096	struct txrx_desc_t *ptxrxdesc;
1097
1098	len = skb->len;
1099
1100	spin_lock_irq(&pldat->lock);
1101
1102	if (pldat->num_used_tx_buffs >= (ENET_TX_DESC - 1)) {
1103		/* This function should never be called when there are no
1104		   buffers */
1105		netif_stop_queue(ndev);
1106		spin_unlock_irq(&pldat->lock);
1107		WARN(1, "BUG! TX request when no free TX buffers!\n");
1108		return NETDEV_TX_BUSY;
1109	}
1110
1111	/* Get the next TX descriptor index */
1112	txidx = readl(LPC_ENET_TXPRODUCEINDEX(pldat->net_base));
1113
1114	/* Setup control for the transfer */
1115	ptxstat = &pldat->tx_stat_v[txidx];
1116	*ptxstat = 0;
1117	ptxrxdesc = &pldat->tx_desc_v[txidx];
1118	ptxrxdesc->control =
1119		(len - 1) | TXDESC_CONTROL_LAST | TXDESC_CONTROL_INT;
1120
1121	/* Copy data to the DMA buffer */
1122	memcpy(pldat->tx_buff_v + txidx * ENET_MAXF_SIZE, skb->data, len);
1123
1124	/* Save the buffer and increment the buffer counter */
1125	pldat->skb[txidx] = skb;
1126	pldat->num_used_tx_buffs++;
1127
1128	/* Start transmit */
1129	txidx++;
1130	if (txidx >= ENET_TX_DESC)
1131		txidx = 0;
1132	writel(txidx, LPC_ENET_TXPRODUCEINDEX(pldat->net_base));
1133
1134	/* Stop queue if no more TX buffers */
1135	if (pldat->num_used_tx_buffs >= (ENET_TX_DESC - 1))
1136		netif_stop_queue(ndev);
1137
1138	spin_unlock_irq(&pldat->lock);
1139
1140	return NETDEV_TX_OK;
1141}
1142
1143static int lpc_set_mac_address(struct net_device *ndev, void *p)
1144{
1145	struct sockaddr *addr = p;
1146	struct netdata_local *pldat = netdev_priv(ndev);
1147	unsigned long flags;
1148
1149	if (!is_valid_ether_addr(addr->sa_data))
1150		return -EADDRNOTAVAIL;
1151	memcpy(ndev->dev_addr, addr->sa_data, ETH_ALEN);
1152
1153	spin_lock_irqsave(&pldat->lock, flags);
1154
1155	/* Set station address */
1156	__lpc_set_mac(pldat, ndev->dev_addr);
1157
1158	spin_unlock_irqrestore(&pldat->lock, flags);
1159
1160	return 0;
1161}
1162
1163static void lpc_eth_set_multicast_list(struct net_device *ndev)
1164{
1165	struct netdata_local *pldat = netdev_priv(ndev);
1166	struct netdev_hw_addr_list *mcptr = &ndev->mc;
1167	struct netdev_hw_addr *ha;
1168	u32 tmp32, hash_val, hashlo, hashhi;
1169	unsigned long flags;
1170
1171	spin_lock_irqsave(&pldat->lock, flags);
1172
1173	/* Set station address */
1174	__lpc_set_mac(pldat, ndev->dev_addr);
1175
1176	tmp32 =  LPC_RXFLTRW_ACCEPTUBROADCAST | LPC_RXFLTRW_ACCEPTPERFECT;
1177
1178	if (ndev->flags & IFF_PROMISC)
1179		tmp32 |= LPC_RXFLTRW_ACCEPTUNICAST |
1180			LPC_RXFLTRW_ACCEPTUMULTICAST;
1181	if (ndev->flags & IFF_ALLMULTI)
1182		tmp32 |= LPC_RXFLTRW_ACCEPTUMULTICAST;
1183
1184	if (netdev_hw_addr_list_count(mcptr))
1185		tmp32 |= LPC_RXFLTRW_ACCEPTUMULTICASTHASH;
1186
1187	writel(tmp32, LPC_ENET_RXFILTER_CTRL(pldat->net_base));
1188
1189
1190	/* Set initial hash table */
1191	hashlo = 0x0;
1192	hashhi = 0x0;
1193
1194	/* 64 bits : multicast address in hash table */
1195	netdev_hw_addr_list_for_each(ha, mcptr) {
1196		hash_val = (ether_crc(6, ha->addr) >> 23) & 0x3F;
1197
1198		if (hash_val >= 32)
1199			hashhi |= 1 << (hash_val - 32);
1200		else
1201			hashlo |= 1 << hash_val;
1202	}
1203
1204	writel(hashlo, LPC_ENET_HASHFILTERL(pldat->net_base));
1205	writel(hashhi, LPC_ENET_HASHFILTERH(pldat->net_base));
1206
1207	spin_unlock_irqrestore(&pldat->lock, flags);
1208}
1209
1210static int lpc_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
1211{
1212	struct netdata_local *pldat = netdev_priv(ndev);
1213	struct phy_device *phydev = pldat->phy_dev;
1214
1215	if (!netif_running(ndev))
1216		return -EINVAL;
1217
1218	if (!phydev)
1219		return -ENODEV;
1220
1221	return phy_mii_ioctl(phydev, req, cmd);
1222}
1223
1224static int lpc_eth_open(struct net_device *ndev)
1225{
1226	struct netdata_local *pldat = netdev_priv(ndev);
1227
1228	if (netif_msg_ifup(pldat))
1229		dev_dbg(&pldat->pdev->dev, "enabling %s\n", ndev->name);
1230
1231	if (!is_valid_ether_addr(ndev->dev_addr))
1232		return -EADDRNOTAVAIL;
1233
1234	__lpc_eth_clock_enable(pldat, true);
1235
1236	/* Reset and initialize */
1237	__lpc_eth_reset(pldat);
1238	__lpc_eth_init(pldat);
1239
1240	/* schedule a link state check */
1241	phy_start(pldat->phy_dev);
1242	netif_start_queue(ndev);
1243	napi_enable(&pldat->napi);
1244
1245	return 0;
1246}
1247
1248/*
1249 * Ethtool ops
1250 */
1251static void lpc_eth_ethtool_getdrvinfo(struct net_device *ndev,
1252	struct ethtool_drvinfo *info)
1253{
1254	strcpy(info->driver, MODNAME);
1255	strcpy(info->version, DRV_VERSION);
1256	strcpy(info->bus_info, dev_name(ndev->dev.parent));
1257}
1258
1259static u32 lpc_eth_ethtool_getmsglevel(struct net_device *ndev)
1260{
1261	struct netdata_local *pldat = netdev_priv(ndev);
1262
1263	return pldat->msg_enable;
1264}
1265
1266static void lpc_eth_ethtool_setmsglevel(struct net_device *ndev, u32 level)
1267{
1268	struct netdata_local *pldat = netdev_priv(ndev);
1269
1270	pldat->msg_enable = level;
1271}
1272
1273static int lpc_eth_ethtool_getsettings(struct net_device *ndev,
1274	struct ethtool_cmd *cmd)
1275{
1276	struct netdata_local *pldat = netdev_priv(ndev);
1277	struct phy_device *phydev = pldat->phy_dev;
1278
1279	if (!phydev)
1280		return -EOPNOTSUPP;
1281
1282	return phy_ethtool_gset(phydev, cmd);
1283}
1284
1285static int lpc_eth_ethtool_setsettings(struct net_device *ndev,
1286	struct ethtool_cmd *cmd)
1287{
1288	struct netdata_local *pldat = netdev_priv(ndev);
1289	struct phy_device *phydev = pldat->phy_dev;
1290
1291	if (!phydev)
1292		return -EOPNOTSUPP;
1293
1294	return phy_ethtool_sset(phydev, cmd);
1295}
1296
1297static const struct ethtool_ops lpc_eth_ethtool_ops = {
1298	.get_drvinfo	= lpc_eth_ethtool_getdrvinfo,
1299	.get_settings	= lpc_eth_ethtool_getsettings,
1300	.set_settings	= lpc_eth_ethtool_setsettings,
1301	.get_msglevel	= lpc_eth_ethtool_getmsglevel,
1302	.set_msglevel	= lpc_eth_ethtool_setmsglevel,
1303	.get_link	= ethtool_op_get_link,
1304};
1305
1306static const struct net_device_ops lpc_netdev_ops = {
1307	.ndo_open		= lpc_eth_open,
1308	.ndo_stop		= lpc_eth_close,
1309	.ndo_start_xmit		= lpc_eth_hard_start_xmit,
1310	.ndo_set_rx_mode	= lpc_eth_set_multicast_list,
1311	.ndo_do_ioctl		= lpc_eth_ioctl,
1312	.ndo_set_mac_address	= lpc_set_mac_address,
1313};
1314
1315static int lpc_eth_drv_probe(struct platform_device *pdev)
1316{
1317	struct resource *res;
1318	struct resource *dma_res;
1319	struct net_device *ndev;
1320	struct netdata_local *pldat;
1321	struct phy_device *phydev;
1322	dma_addr_t dma_handle;
1323	int irq, ret;
1324
1325	/* Get platform resources */
1326	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1327	dma_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1328	irq = platform_get_irq(pdev, 0);
1329	if ((!res) || (!dma_res) || (irq < 0) || (irq >= NR_IRQS)) {
1330		dev_err(&pdev->dev, "error getting resources.\n");
1331		ret = -ENXIO;
1332		goto err_exit;
1333	}
1334
1335	/* Allocate net driver data structure */
1336	ndev = alloc_etherdev(sizeof(struct netdata_local));
1337	if (!ndev) {
1338		dev_err(&pdev->dev, "could not allocate device.\n");
1339		ret = -ENOMEM;
1340		goto err_exit;
1341	}
1342
1343	SET_NETDEV_DEV(ndev, &pdev->dev);
1344
1345	pldat = netdev_priv(ndev);
1346	pldat->pdev = pdev;
1347	pldat->ndev = ndev;
1348
1349	spin_lock_init(&pldat->lock);
1350
1351	/* Save resources */
1352	ndev->irq = irq;
1353
1354	/* Get clock for the device */
1355	pldat->clk = clk_get(&pdev->dev, NULL);
1356	if (IS_ERR(pldat->clk)) {
1357		dev_err(&pdev->dev, "error getting clock.\n");
1358		ret = PTR_ERR(pldat->clk);
1359		goto err_out_free_dev;
1360	}
1361
1362	/* Enable network clock */
1363	__lpc_eth_clock_enable(pldat, true);
1364
1365	/* Map IO space */
1366	pldat->net_base = ioremap(res->start, res->end - res->start + 1);
1367	if (!pldat->net_base) {
1368		dev_err(&pdev->dev, "failed to map registers\n");
1369		ret = -ENOMEM;
1370		goto err_out_disable_clocks;
1371	}
1372	ret = request_irq(ndev->irq, __lpc_eth_interrupt, 0,
1373			  ndev->name, ndev);
1374	if (ret) {
1375		dev_err(&pdev->dev, "error requesting interrupt.\n");
1376		goto err_out_iounmap;
1377	}
1378
1379	/* Fill in the fields of the device structure with ethernet values. */
1380	ether_setup(ndev);
1381
1382	/* Setup driver functions */
1383	ndev->netdev_ops = &lpc_netdev_ops;
1384	ndev->ethtool_ops = &lpc_eth_ethtool_ops;
1385	ndev->watchdog_timeo = msecs_to_jiffies(2500);
1386
1387	/* Get size of DMA buffers/descriptors region */
1388	pldat->dma_buff_size = (ENET_TX_DESC + ENET_RX_DESC) * (ENET_MAXF_SIZE +
1389		sizeof(struct txrx_desc_t) + sizeof(struct rx_status_t));
1390	pldat->dma_buff_base_v = 0;
1391
1392	if (use_iram_for_net()) {
1393		dma_handle = dma_res->start;
1394		if (pldat->dma_buff_size <= lpc32xx_return_iram_size())
1395			pldat->dma_buff_base_v =
1396				io_p2v(dma_res->start);
1397		else
1398			netdev_err(ndev,
1399				"IRAM not big enough for net buffers, using SDRAM instead.\n");
1400	}
1401
1402	if (pldat->dma_buff_base_v == 0) {
1403		pldat->dma_buff_size = PAGE_ALIGN(pldat->dma_buff_size);
1404
1405		/* Allocate a chunk of memory for the DMA ethernet buffers
1406		   and descriptors */
1407		pldat->dma_buff_base_v =
1408			dma_alloc_coherent(&pldat->pdev->dev,
1409					   pldat->dma_buff_size, &dma_handle,
1410					   GFP_KERNEL);
1411
1412		if (pldat->dma_buff_base_v == NULL) {
1413			dev_err(&pdev->dev, "error getting DMA region.\n");
1414			ret = -ENOMEM;
1415			goto err_out_free_irq;
1416		}
1417	}
1418	pldat->dma_buff_base_p = dma_handle;
1419
1420	netdev_dbg(ndev, "IO address start     :0x%08x\n",
1421			res->start);
1422	netdev_dbg(ndev, "IO address size      :%d\n",
1423			res->end - res->start + 1);
1424	netdev_err(ndev, "IO address (mapped)  :0x%p\n",
1425			pldat->net_base);
1426	netdev_dbg(ndev, "IRQ number           :%d\n", ndev->irq);
1427	netdev_dbg(ndev, "DMA buffer size      :%d\n", pldat->dma_buff_size);
1428	netdev_dbg(ndev, "DMA buffer P address :0x%08x\n",
1429			pldat->dma_buff_base_p);
1430	netdev_dbg(ndev, "DMA buffer V address :0x%p\n",
1431			pldat->dma_buff_base_v);
1432
1433	/* Get MAC address from current HW setting (POR state is all zeros) */
1434	__lpc_get_mac(pldat, ndev->dev_addr);
1435
1436#ifdef CONFIG_OF_NET
1437	if (!is_valid_ether_addr(ndev->dev_addr)) {
1438		const char *macaddr = of_get_mac_address(pdev->dev.of_node);
1439		if (macaddr)
1440			memcpy(ndev->dev_addr, macaddr, ETH_ALEN);
1441	}
1442#endif
1443	if (!is_valid_ether_addr(ndev->dev_addr))
1444		eth_hw_addr_random(ndev);
1445
1446	/* Reset the ethernet controller */
1447	__lpc_eth_reset(pldat);
1448
1449	/* then shut everything down to save power */
1450	__lpc_eth_shutdown(pldat);
1451
1452	/* Set default parameters */
1453	pldat->msg_enable = NETIF_MSG_LINK;
1454
1455	/* Force an MII interface reset and clock setup */
1456	__lpc_mii_mngt_reset(pldat);
1457
1458	/* Force default PHY interface setup in chip, this will probably be
1459	   changed by the PHY driver */
1460	pldat->link = 0;
1461	pldat->speed = 100;
1462	pldat->duplex = DUPLEX_FULL;
1463	__lpc_params_setup(pldat);
1464
1465	netif_napi_add(ndev, &pldat->napi, lpc_eth_poll, NAPI_WEIGHT);
1466
1467	ret = register_netdev(ndev);
1468	if (ret) {
1469		dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
1470		goto err_out_dma_unmap;
1471	}
1472	platform_set_drvdata(pdev, ndev);
1473
1474	if (lpc_mii_init(pldat) != 0)
1475		goto err_out_unregister_netdev;
1476
1477	netdev_info(ndev, "LPC mac at 0x%08x irq %d\n",
1478	       res->start, ndev->irq);
1479
1480	phydev = pldat->phy_dev;
1481
1482	device_init_wakeup(&pdev->dev, 1);
1483	device_set_wakeup_enable(&pdev->dev, 0);
1484
1485	return 0;
1486
1487err_out_unregister_netdev:
1488	platform_set_drvdata(pdev, NULL);
1489	unregister_netdev(ndev);
1490err_out_dma_unmap:
1491	if (!use_iram_for_net() ||
1492	    pldat->dma_buff_size > lpc32xx_return_iram_size())
1493		dma_free_coherent(&pldat->pdev->dev, pldat->dma_buff_size,
1494				  pldat->dma_buff_base_v,
1495				  pldat->dma_buff_base_p);
1496err_out_free_irq:
1497	free_irq(ndev->irq, ndev);
1498err_out_iounmap:
1499	iounmap(pldat->net_base);
1500err_out_disable_clocks:
1501	clk_disable(pldat->clk);
1502	clk_put(pldat->clk);
1503err_out_free_dev:
1504	free_netdev(ndev);
1505err_exit:
1506	pr_err("%s: not found (%d).\n", MODNAME, ret);
1507	return ret;
1508}
1509
1510static int lpc_eth_drv_remove(struct platform_device *pdev)
1511{
1512	struct net_device *ndev = platform_get_drvdata(pdev);
1513	struct netdata_local *pldat = netdev_priv(ndev);
1514
1515	unregister_netdev(ndev);
1516	platform_set_drvdata(pdev, NULL);
1517
1518	if (!use_iram_for_net() ||
1519	    pldat->dma_buff_size > lpc32xx_return_iram_size())
1520		dma_free_coherent(&pldat->pdev->dev, pldat->dma_buff_size,
1521				  pldat->dma_buff_base_v,
1522				  pldat->dma_buff_base_p);
1523	free_irq(ndev->irq, ndev);
1524	iounmap(pldat->net_base);
1525	mdiobus_free(pldat->mii_bus);
1526	clk_disable(pldat->clk);
1527	clk_put(pldat->clk);
1528	free_netdev(ndev);
1529
1530	return 0;
1531}
1532
1533#ifdef CONFIG_PM
1534static int lpc_eth_drv_suspend(struct platform_device *pdev,
1535	pm_message_t state)
1536{
1537	struct net_device *ndev = platform_get_drvdata(pdev);
1538	struct netdata_local *pldat = netdev_priv(ndev);
1539
1540	if (device_may_wakeup(&pdev->dev))
1541		enable_irq_wake(ndev->irq);
1542
1543	if (ndev) {
1544		if (netif_running(ndev)) {
1545			netif_device_detach(ndev);
1546			__lpc_eth_shutdown(pldat);
1547			clk_disable(pldat->clk);
1548
1549			/*
1550			 * Reset again now clock is disable to be sure
1551			 * EMC_MDC is down
1552			 */
1553			__lpc_eth_reset(pldat);
1554		}
1555	}
1556
1557	return 0;
1558}
1559
1560static int lpc_eth_drv_resume(struct platform_device *pdev)
1561{
1562	struct net_device *ndev = platform_get_drvdata(pdev);
1563	struct netdata_local *pldat;
1564
1565	if (device_may_wakeup(&pdev->dev))
1566		disable_irq_wake(ndev->irq);
1567
1568	if (ndev) {
1569		if (netif_running(ndev)) {
1570			pldat = netdev_priv(ndev);
1571
1572			/* Enable interface clock */
1573			clk_enable(pldat->clk);
1574
1575			/* Reset and initialize */
1576			__lpc_eth_reset(pldat);
1577			__lpc_eth_init(pldat);
1578
1579			netif_device_attach(ndev);
1580		}
1581	}
1582
1583	return 0;
1584}
1585#endif
1586
1587static struct platform_driver lpc_eth_driver = {
1588	.probe		= lpc_eth_drv_probe,
1589	.remove		= __devexit_p(lpc_eth_drv_remove),
1590#ifdef CONFIG_PM
1591	.suspend	= lpc_eth_drv_suspend,
1592	.resume		= lpc_eth_drv_resume,
1593#endif
1594	.driver		= {
1595		.name	= MODNAME,
1596	},
1597};
1598
1599module_platform_driver(lpc_eth_driver);
1600
1601MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>");
1602MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
1603MODULE_DESCRIPTION("LPC Ethernet Driver");
1604MODULE_LICENSE("GPL");
1605