sh_eth.c revision 55754f19d7ee4fa3633f55a4a084af8590c35efa
1/*
2 *  SuperH Ethernet device driver
3 *
4 *  Copyright (C) 2006-2012 Nobuhiro Iwamatsu
5 *  Copyright (C) 2008-2013 Renesas Solutions Corp.
6 *  Copyright (C) 2013 Cogent Embedded, Inc.
7 *
8 *  This program is free software; you can redistribute it and/or modify it
9 *  under the terms and conditions of the GNU General Public License,
10 *  version 2, as published by the Free Software Foundation.
11 *
12 *  This program is distributed in the hope it will be useful, but WITHOUT
13 *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15 *  more details.
16 *  You should have received a copy of the GNU General Public License along with
17 *  this program; if not, write to the Free Software Foundation, Inc.,
18 *  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 *  The full GNU General Public License is included in this distribution in
21 *  the file called "COPYING".
22 */
23
24#include <linux/init.h>
25#include <linux/module.h>
26#include <linux/kernel.h>
27#include <linux/spinlock.h>
28#include <linux/interrupt.h>
29#include <linux/dma-mapping.h>
30#include <linux/etherdevice.h>
31#include <linux/delay.h>
32#include <linux/platform_device.h>
33#include <linux/mdio-bitbang.h>
34#include <linux/netdevice.h>
35#include <linux/phy.h>
36#include <linux/cache.h>
37#include <linux/io.h>
38#include <linux/pm_runtime.h>
39#include <linux/slab.h>
40#include <linux/ethtool.h>
41#include <linux/if_vlan.h>
42#include <linux/clk.h>
43#include <linux/sh_eth.h>
44
45#include "sh_eth.h"
46
47#define SH_ETH_DEF_MSG_ENABLE \
48		(NETIF_MSG_LINK	| \
49		NETIF_MSG_TIMER	| \
50		NETIF_MSG_RX_ERR| \
51		NETIF_MSG_TX_ERR)
52
53static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
54	[EDSR]		= 0x0000,
55	[EDMR]		= 0x0400,
56	[EDTRR]		= 0x0408,
57	[EDRRR]		= 0x0410,
58	[EESR]		= 0x0428,
59	[EESIPR]	= 0x0430,
60	[TDLAR]		= 0x0010,
61	[TDFAR]		= 0x0014,
62	[TDFXR]		= 0x0018,
63	[TDFFR]		= 0x001c,
64	[RDLAR]		= 0x0030,
65	[RDFAR]		= 0x0034,
66	[RDFXR]		= 0x0038,
67	[RDFFR]		= 0x003c,
68	[TRSCER]	= 0x0438,
69	[RMFCR]		= 0x0440,
70	[TFTR]		= 0x0448,
71	[FDR]		= 0x0450,
72	[RMCR]		= 0x0458,
73	[RPADIR]	= 0x0460,
74	[FCFTR]		= 0x0468,
75	[CSMR]		= 0x04E4,
76
77	[ECMR]		= 0x0500,
78	[ECSR]		= 0x0510,
79	[ECSIPR]	= 0x0518,
80	[PIR]		= 0x0520,
81	[PSR]		= 0x0528,
82	[PIPR]		= 0x052c,
83	[RFLR]		= 0x0508,
84	[APR]		= 0x0554,
85	[MPR]		= 0x0558,
86	[PFTCR]		= 0x055c,
87	[PFRCR]		= 0x0560,
88	[TPAUSER]	= 0x0564,
89	[GECMR]		= 0x05b0,
90	[BCULR]		= 0x05b4,
91	[MAHR]		= 0x05c0,
92	[MALR]		= 0x05c8,
93	[TROCR]		= 0x0700,
94	[CDCR]		= 0x0708,
95	[LCCR]		= 0x0710,
96	[CEFCR]		= 0x0740,
97	[FRECR]		= 0x0748,
98	[TSFRCR]	= 0x0750,
99	[TLFRCR]	= 0x0758,
100	[RFCR]		= 0x0760,
101	[CERCR]		= 0x0768,
102	[CEECR]		= 0x0770,
103	[MAFCR]		= 0x0778,
104	[RMII_MII]	= 0x0790,
105
106	[ARSTR]		= 0x0000,
107	[TSU_CTRST]	= 0x0004,
108	[TSU_FWEN0]	= 0x0010,
109	[TSU_FWEN1]	= 0x0014,
110	[TSU_FCM]	= 0x0018,
111	[TSU_BSYSL0]	= 0x0020,
112	[TSU_BSYSL1]	= 0x0024,
113	[TSU_PRISL0]	= 0x0028,
114	[TSU_PRISL1]	= 0x002c,
115	[TSU_FWSL0]	= 0x0030,
116	[TSU_FWSL1]	= 0x0034,
117	[TSU_FWSLC]	= 0x0038,
118	[TSU_QTAG0]	= 0x0040,
119	[TSU_QTAG1]	= 0x0044,
120	[TSU_FWSR]	= 0x0050,
121	[TSU_FWINMK]	= 0x0054,
122	[TSU_ADQT0]	= 0x0048,
123	[TSU_ADQT1]	= 0x004c,
124	[TSU_VTAG0]	= 0x0058,
125	[TSU_VTAG1]	= 0x005c,
126	[TSU_ADSBSY]	= 0x0060,
127	[TSU_TEN]	= 0x0064,
128	[TSU_POST1]	= 0x0070,
129	[TSU_POST2]	= 0x0074,
130	[TSU_POST3]	= 0x0078,
131	[TSU_POST4]	= 0x007c,
132	[TSU_ADRH0]	= 0x0100,
133	[TSU_ADRL0]	= 0x0104,
134	[TSU_ADRH31]	= 0x01f8,
135	[TSU_ADRL31]	= 0x01fc,
136
137	[TXNLCR0]	= 0x0080,
138	[TXALCR0]	= 0x0084,
139	[RXNLCR0]	= 0x0088,
140	[RXALCR0]	= 0x008c,
141	[FWNLCR0]	= 0x0090,
142	[FWALCR0]	= 0x0094,
143	[TXNLCR1]	= 0x00a0,
144	[TXALCR1]	= 0x00a0,
145	[RXNLCR1]	= 0x00a8,
146	[RXALCR1]	= 0x00ac,
147	[FWNLCR1]	= 0x00b0,
148	[FWALCR1]	= 0x00b4,
149};
150
151static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
152	[ECMR]		= 0x0300,
153	[RFLR]		= 0x0308,
154	[ECSR]		= 0x0310,
155	[ECSIPR]	= 0x0318,
156	[PIR]		= 0x0320,
157	[PSR]		= 0x0328,
158	[RDMLR]		= 0x0340,
159	[IPGR]		= 0x0350,
160	[APR]		= 0x0354,
161	[MPR]		= 0x0358,
162	[RFCF]		= 0x0360,
163	[TPAUSER]	= 0x0364,
164	[TPAUSECR]	= 0x0368,
165	[MAHR]		= 0x03c0,
166	[MALR]		= 0x03c8,
167	[TROCR]		= 0x03d0,
168	[CDCR]		= 0x03d4,
169	[LCCR]		= 0x03d8,
170	[CNDCR]		= 0x03dc,
171	[CEFCR]		= 0x03e4,
172	[FRECR]		= 0x03e8,
173	[TSFRCR]	= 0x03ec,
174	[TLFRCR]	= 0x03f0,
175	[RFCR]		= 0x03f4,
176	[MAFCR]		= 0x03f8,
177
178	[EDMR]		= 0x0200,
179	[EDTRR]		= 0x0208,
180	[EDRRR]		= 0x0210,
181	[TDLAR]		= 0x0218,
182	[RDLAR]		= 0x0220,
183	[EESR]		= 0x0228,
184	[EESIPR]	= 0x0230,
185	[TRSCER]	= 0x0238,
186	[RMFCR]		= 0x0240,
187	[TFTR]		= 0x0248,
188	[FDR]		= 0x0250,
189	[RMCR]		= 0x0258,
190	[TFUCR]		= 0x0264,
191	[RFOCR]		= 0x0268,
192	[RMIIMODE]      = 0x026c,
193	[FCFTR]		= 0x0270,
194	[TRIMD]		= 0x027c,
195};
196
197static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
198	[ECMR]		= 0x0100,
199	[RFLR]		= 0x0108,
200	[ECSR]		= 0x0110,
201	[ECSIPR]	= 0x0118,
202	[PIR]		= 0x0120,
203	[PSR]		= 0x0128,
204	[RDMLR]		= 0x0140,
205	[IPGR]		= 0x0150,
206	[APR]		= 0x0154,
207	[MPR]		= 0x0158,
208	[TPAUSER]	= 0x0164,
209	[RFCF]		= 0x0160,
210	[TPAUSECR]	= 0x0168,
211	[BCFRR]		= 0x016c,
212	[MAHR]		= 0x01c0,
213	[MALR]		= 0x01c8,
214	[TROCR]		= 0x01d0,
215	[CDCR]		= 0x01d4,
216	[LCCR]		= 0x01d8,
217	[CNDCR]		= 0x01dc,
218	[CEFCR]		= 0x01e4,
219	[FRECR]		= 0x01e8,
220	[TSFRCR]	= 0x01ec,
221	[TLFRCR]	= 0x01f0,
222	[RFCR]		= 0x01f4,
223	[MAFCR]		= 0x01f8,
224	[RTRATE]	= 0x01fc,
225
226	[EDMR]		= 0x0000,
227	[EDTRR]		= 0x0008,
228	[EDRRR]		= 0x0010,
229	[TDLAR]		= 0x0018,
230	[RDLAR]		= 0x0020,
231	[EESR]		= 0x0028,
232	[EESIPR]	= 0x0030,
233	[TRSCER]	= 0x0038,
234	[RMFCR]		= 0x0040,
235	[TFTR]		= 0x0048,
236	[FDR]		= 0x0050,
237	[RMCR]		= 0x0058,
238	[TFUCR]		= 0x0064,
239	[RFOCR]		= 0x0068,
240	[FCFTR]		= 0x0070,
241	[RPADIR]	= 0x0078,
242	[TRIMD]		= 0x007c,
243	[RBWAR]		= 0x00c8,
244	[RDFAR]		= 0x00cc,
245	[TBRAR]		= 0x00d4,
246	[TDFAR]		= 0x00d8,
247};
248
249static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
250	[ECMR]		= 0x0160,
251	[ECSR]		= 0x0164,
252	[ECSIPR]	= 0x0168,
253	[PIR]		= 0x016c,
254	[MAHR]		= 0x0170,
255	[MALR]		= 0x0174,
256	[RFLR]		= 0x0178,
257	[PSR]		= 0x017c,
258	[TROCR]		= 0x0180,
259	[CDCR]		= 0x0184,
260	[LCCR]		= 0x0188,
261	[CNDCR]		= 0x018c,
262	[CEFCR]		= 0x0194,
263	[FRECR]		= 0x0198,
264	[TSFRCR]	= 0x019c,
265	[TLFRCR]	= 0x01a0,
266	[RFCR]		= 0x01a4,
267	[MAFCR]		= 0x01a8,
268	[IPGR]		= 0x01b4,
269	[APR]		= 0x01b8,
270	[MPR]		= 0x01bc,
271	[TPAUSER]	= 0x01c4,
272	[BCFR]		= 0x01cc,
273
274	[ARSTR]		= 0x0000,
275	[TSU_CTRST]	= 0x0004,
276	[TSU_FWEN0]	= 0x0010,
277	[TSU_FWEN1]	= 0x0014,
278	[TSU_FCM]	= 0x0018,
279	[TSU_BSYSL0]	= 0x0020,
280	[TSU_BSYSL1]	= 0x0024,
281	[TSU_PRISL0]	= 0x0028,
282	[TSU_PRISL1]	= 0x002c,
283	[TSU_FWSL0]	= 0x0030,
284	[TSU_FWSL1]	= 0x0034,
285	[TSU_FWSLC]	= 0x0038,
286	[TSU_QTAGM0]	= 0x0040,
287	[TSU_QTAGM1]	= 0x0044,
288	[TSU_ADQT0]	= 0x0048,
289	[TSU_ADQT1]	= 0x004c,
290	[TSU_FWSR]	= 0x0050,
291	[TSU_FWINMK]	= 0x0054,
292	[TSU_ADSBSY]	= 0x0060,
293	[TSU_TEN]	= 0x0064,
294	[TSU_POST1]	= 0x0070,
295	[TSU_POST2]	= 0x0074,
296	[TSU_POST3]	= 0x0078,
297	[TSU_POST4]	= 0x007c,
298
299	[TXNLCR0]	= 0x0080,
300	[TXALCR0]	= 0x0084,
301	[RXNLCR0]	= 0x0088,
302	[RXALCR0]	= 0x008c,
303	[FWNLCR0]	= 0x0090,
304	[FWALCR0]	= 0x0094,
305	[TXNLCR1]	= 0x00a0,
306	[TXALCR1]	= 0x00a0,
307	[RXNLCR1]	= 0x00a8,
308	[RXALCR1]	= 0x00ac,
309	[FWNLCR1]	= 0x00b0,
310	[FWALCR1]	= 0x00b4,
311
312	[TSU_ADRH0]	= 0x0100,
313	[TSU_ADRL0]	= 0x0104,
314	[TSU_ADRL31]	= 0x01fc,
315};
316
317static int sh_eth_is_gether(struct sh_eth_private *mdp)
318{
319	if (mdp->reg_offset == sh_eth_offset_gigabit)
320		return 1;
321	else
322		return 0;
323}
324
325static void sh_eth_select_mii(struct net_device *ndev)
326{
327	u32 value = 0x0;
328	struct sh_eth_private *mdp = netdev_priv(ndev);
329
330	switch (mdp->phy_interface) {
331	case PHY_INTERFACE_MODE_GMII:
332		value = 0x2;
333		break;
334	case PHY_INTERFACE_MODE_MII:
335		value = 0x1;
336		break;
337	case PHY_INTERFACE_MODE_RMII:
338		value = 0x0;
339		break;
340	default:
341		pr_warn("PHY interface mode was not setup. Set to MII.\n");
342		value = 0x1;
343		break;
344	}
345
346	sh_eth_write(ndev, value, RMII_MII);
347}
348
349static void sh_eth_set_duplex(struct net_device *ndev)
350{
351	struct sh_eth_private *mdp = netdev_priv(ndev);
352
353	if (mdp->duplex) /* Full */
354		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
355	else		/* Half */
356		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
357}
358
359/* There is CPU dependent code */
360static void sh_eth_set_rate_r8a777x(struct net_device *ndev)
361{
362	struct sh_eth_private *mdp = netdev_priv(ndev);
363
364	switch (mdp->speed) {
365	case 10: /* 10BASE */
366		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_ELB, ECMR);
367		break;
368	case 100:/* 100BASE */
369		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_ELB, ECMR);
370		break;
371	default:
372		break;
373	}
374}
375
376/* R8A7778/9 */
377static struct sh_eth_cpu_data r8a777x_data = {
378	.set_duplex	= sh_eth_set_duplex,
379	.set_rate	= sh_eth_set_rate_r8a777x,
380
381	.ecsr_value	= ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
382	.ecsipr_value	= ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
383	.eesipr_value	= 0x01ff009f,
384
385	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
386	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
387			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
388			  EESR_ECI,
389
390	.apr		= 1,
391	.mpr		= 1,
392	.tpauser	= 1,
393	.hw_swap	= 1,
394};
395
396static void sh_eth_set_rate_sh7724(struct net_device *ndev)
397{
398	struct sh_eth_private *mdp = netdev_priv(ndev);
399
400	switch (mdp->speed) {
401	case 10: /* 10BASE */
402		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR);
403		break;
404	case 100:/* 100BASE */
405		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR);
406		break;
407	default:
408		break;
409	}
410}
411
412/* SH7724 */
413static struct sh_eth_cpu_data sh7724_data = {
414	.set_duplex	= sh_eth_set_duplex,
415	.set_rate	= sh_eth_set_rate_sh7724,
416
417	.ecsr_value	= ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
418	.ecsipr_value	= ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
419	.eesipr_value	= 0x01ff009f,
420
421	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
422	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
423			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
424			  EESR_ECI,
425
426	.apr		= 1,
427	.mpr		= 1,
428	.tpauser	= 1,
429	.hw_swap	= 1,
430	.rpadir		= 1,
431	.rpadir_value	= 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
432};
433
434static void sh_eth_set_rate_sh7757(struct net_device *ndev)
435{
436	struct sh_eth_private *mdp = netdev_priv(ndev);
437
438	switch (mdp->speed) {
439	case 10: /* 10BASE */
440		sh_eth_write(ndev, 0, RTRATE);
441		break;
442	case 100:/* 100BASE */
443		sh_eth_write(ndev, 1, RTRATE);
444		break;
445	default:
446		break;
447	}
448}
449
450/* SH7757 */
451static struct sh_eth_cpu_data sh7757_data = {
452	.set_duplex	= sh_eth_set_duplex,
453	.set_rate	= sh_eth_set_rate_sh7757,
454
455	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
456	.rmcr_value	= 0x00000001,
457
458	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
459	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
460			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
461			  EESR_ECI,
462
463	.irq_flags	= IRQF_SHARED,
464	.apr		= 1,
465	.mpr		= 1,
466	.tpauser	= 1,
467	.hw_swap	= 1,
468	.no_ade		= 1,
469	.rpadir		= 1,
470	.rpadir_value   = 2 << 16,
471};
472
473#define SH_GIGA_ETH_BASE	0xfee00000UL
474#define GIGA_MALR(port)		(SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
475#define GIGA_MAHR(port)		(SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
476static void sh_eth_chip_reset_giga(struct net_device *ndev)
477{
478	int i;
479	unsigned long mahr[2], malr[2];
480
481	/* save MAHR and MALR */
482	for (i = 0; i < 2; i++) {
483		malr[i] = ioread32((void *)GIGA_MALR(i));
484		mahr[i] = ioread32((void *)GIGA_MAHR(i));
485	}
486
487	/* reset device */
488	iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800));
489	mdelay(1);
490
491	/* restore MAHR and MALR */
492	for (i = 0; i < 2; i++) {
493		iowrite32(malr[i], (void *)GIGA_MALR(i));
494		iowrite32(mahr[i], (void *)GIGA_MAHR(i));
495	}
496}
497
498static void sh_eth_set_rate_giga(struct net_device *ndev)
499{
500	struct sh_eth_private *mdp = netdev_priv(ndev);
501
502	switch (mdp->speed) {
503	case 10: /* 10BASE */
504		sh_eth_write(ndev, 0x00000000, GECMR);
505		break;
506	case 100:/* 100BASE */
507		sh_eth_write(ndev, 0x00000010, GECMR);
508		break;
509	case 1000: /* 1000BASE */
510		sh_eth_write(ndev, 0x00000020, GECMR);
511		break;
512	default:
513		break;
514	}
515}
516
517/* SH7757(GETHERC) */
518static struct sh_eth_cpu_data sh7757_data_giga = {
519	.chip_reset	= sh_eth_chip_reset_giga,
520	.set_duplex	= sh_eth_set_duplex,
521	.set_rate	= sh_eth_set_rate_giga,
522
523	.ecsr_value	= ECSR_ICD | ECSR_MPD,
524	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
525	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
526
527	.tx_check	= EESR_TC1 | EESR_FTC,
528	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
529			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
530			  EESR_TDE | EESR_ECI,
531	.fdr_value	= 0x0000072f,
532	.rmcr_value	= 0x00000001,
533
534	.irq_flags	= IRQF_SHARED,
535	.apr		= 1,
536	.mpr		= 1,
537	.tpauser	= 1,
538	.bculr		= 1,
539	.hw_swap	= 1,
540	.rpadir		= 1,
541	.rpadir_value   = 2 << 16,
542	.no_trimd	= 1,
543	.no_ade		= 1,
544	.tsu		= 1,
545};
546
547static void sh_eth_chip_reset(struct net_device *ndev)
548{
549	struct sh_eth_private *mdp = netdev_priv(ndev);
550
551	/* reset device */
552	sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
553	mdelay(1);
554}
555
556static void sh_eth_set_rate_gether(struct net_device *ndev)
557{
558	struct sh_eth_private *mdp = netdev_priv(ndev);
559
560	switch (mdp->speed) {
561	case 10: /* 10BASE */
562		sh_eth_write(ndev, GECMR_10, GECMR);
563		break;
564	case 100:/* 100BASE */
565		sh_eth_write(ndev, GECMR_100, GECMR);
566		break;
567	case 1000: /* 1000BASE */
568		sh_eth_write(ndev, GECMR_1000, GECMR);
569		break;
570	default:
571		break;
572	}
573}
574
575/* SH7734 */
576static struct sh_eth_cpu_data sh7734_data = {
577	.chip_reset	= sh_eth_chip_reset,
578	.set_duplex	= sh_eth_set_duplex,
579	.set_rate	= sh_eth_set_rate_gether,
580
581	.ecsr_value	= ECSR_ICD | ECSR_MPD,
582	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
583	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
584
585	.tx_check	= EESR_TC1 | EESR_FTC,
586	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
587			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
588			  EESR_TDE | EESR_ECI,
589
590	.apr		= 1,
591	.mpr		= 1,
592	.tpauser	= 1,
593	.bculr		= 1,
594	.hw_swap	= 1,
595	.no_trimd	= 1,
596	.no_ade		= 1,
597	.tsu		= 1,
598	.hw_crc		= 1,
599	.select_mii	= 1,
600};
601
602/* SH7763 */
603static struct sh_eth_cpu_data sh7763_data = {
604	.chip_reset	= sh_eth_chip_reset,
605	.set_duplex	= sh_eth_set_duplex,
606	.set_rate	= sh_eth_set_rate_gether,
607
608	.ecsr_value	= ECSR_ICD | ECSR_MPD,
609	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
610	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
611
612	.tx_check	= EESR_TC1 | EESR_FTC,
613	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
614			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
615			  EESR_ECI,
616
617	.apr		= 1,
618	.mpr		= 1,
619	.tpauser	= 1,
620	.bculr		= 1,
621	.hw_swap	= 1,
622	.no_trimd	= 1,
623	.no_ade		= 1,
624	.tsu		= 1,
625	.irq_flags	= IRQF_SHARED,
626};
627
628static void sh_eth_chip_reset_r8a7740(struct net_device *ndev)
629{
630	struct sh_eth_private *mdp = netdev_priv(ndev);
631
632	/* reset device */
633	sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
634	mdelay(1);
635
636	sh_eth_select_mii(ndev);
637}
638
639/* R8A7740 */
640static struct sh_eth_cpu_data r8a7740_data = {
641	.chip_reset	= sh_eth_chip_reset_r8a7740,
642	.set_duplex	= sh_eth_set_duplex,
643	.set_rate	= sh_eth_set_rate_gether,
644
645	.ecsr_value	= ECSR_ICD | ECSR_MPD,
646	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
647	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
648
649	.tx_check	= EESR_TC1 | EESR_FTC,
650	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
651			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
652			  EESR_TDE | EESR_ECI,
653
654	.apr		= 1,
655	.mpr		= 1,
656	.tpauser	= 1,
657	.bculr		= 1,
658	.hw_swap	= 1,
659	.no_trimd	= 1,
660	.no_ade		= 1,
661	.tsu		= 1,
662	.select_mii	= 1,
663	.shift_rd0	= 1,
664};
665
666static struct sh_eth_cpu_data sh7619_data = {
667	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
668
669	.apr		= 1,
670	.mpr		= 1,
671	.tpauser	= 1,
672	.hw_swap	= 1,
673};
674
675static struct sh_eth_cpu_data sh771x_data = {
676	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
677	.tsu		= 1,
678};
679
680static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
681{
682	if (!cd->ecsr_value)
683		cd->ecsr_value = DEFAULT_ECSR_INIT;
684
685	if (!cd->ecsipr_value)
686		cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
687
688	if (!cd->fcftr_value)
689		cd->fcftr_value = DEFAULT_FIFO_F_D_RFF | \
690				  DEFAULT_FIFO_F_D_RFD;
691
692	if (!cd->fdr_value)
693		cd->fdr_value = DEFAULT_FDR_INIT;
694
695	if (!cd->rmcr_value)
696		cd->rmcr_value = DEFAULT_RMCR_VALUE;
697
698	if (!cd->tx_check)
699		cd->tx_check = DEFAULT_TX_CHECK;
700
701	if (!cd->eesr_err_check)
702		cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
703}
704
705static int sh_eth_check_reset(struct net_device *ndev)
706{
707	int ret = 0;
708	int cnt = 100;
709
710	while (cnt > 0) {
711		if (!(sh_eth_read(ndev, EDMR) & 0x3))
712			break;
713		mdelay(1);
714		cnt--;
715	}
716	if (cnt <= 0) {
717		pr_err("Device reset failed\n");
718		ret = -ETIMEDOUT;
719	}
720	return ret;
721}
722
723static int sh_eth_reset(struct net_device *ndev)
724{
725	struct sh_eth_private *mdp = netdev_priv(ndev);
726	int ret = 0;
727
728	if (sh_eth_is_gether(mdp)) {
729		sh_eth_write(ndev, EDSR_ENALL, EDSR);
730		sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
731			     EDMR);
732
733		ret = sh_eth_check_reset(ndev);
734		if (ret)
735			goto out;
736
737		/* Table Init */
738		sh_eth_write(ndev, 0x0, TDLAR);
739		sh_eth_write(ndev, 0x0, TDFAR);
740		sh_eth_write(ndev, 0x0, TDFXR);
741		sh_eth_write(ndev, 0x0, TDFFR);
742		sh_eth_write(ndev, 0x0, RDLAR);
743		sh_eth_write(ndev, 0x0, RDFAR);
744		sh_eth_write(ndev, 0x0, RDFXR);
745		sh_eth_write(ndev, 0x0, RDFFR);
746
747		/* Reset HW CRC register */
748		if (mdp->cd->hw_crc)
749			sh_eth_write(ndev, 0x0, CSMR);
750
751		/* Select MII mode */
752		if (mdp->cd->select_mii)
753			sh_eth_select_mii(ndev);
754	} else {
755		sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER,
756			     EDMR);
757		mdelay(3);
758		sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
759			     EDMR);
760	}
761
762out:
763	return ret;
764}
765
766#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
767static void sh_eth_set_receive_align(struct sk_buff *skb)
768{
769	int reserve;
770
771	reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1));
772	if (reserve)
773		skb_reserve(skb, reserve);
774}
775#else
776static void sh_eth_set_receive_align(struct sk_buff *skb)
777{
778	skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN);
779}
780#endif
781
782
783/* CPU <-> EDMAC endian convert */
784static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x)
785{
786	switch (mdp->edmac_endian) {
787	case EDMAC_LITTLE_ENDIAN:
788		return cpu_to_le32(x);
789	case EDMAC_BIG_ENDIAN:
790		return cpu_to_be32(x);
791	}
792	return x;
793}
794
795static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
796{
797	switch (mdp->edmac_endian) {
798	case EDMAC_LITTLE_ENDIAN:
799		return le32_to_cpu(x);
800	case EDMAC_BIG_ENDIAN:
801		return be32_to_cpu(x);
802	}
803	return x;
804}
805
806/*
807 * Program the hardware MAC address from dev->dev_addr.
808 */
809static void update_mac_address(struct net_device *ndev)
810{
811	sh_eth_write(ndev,
812		(ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
813		(ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
814	sh_eth_write(ndev,
815		(ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
816}
817
818/*
819 * Get MAC address from SuperH MAC address register
820 *
821 * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
822 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
823 * When you want use this device, you must set MAC address in bootloader.
824 *
825 */
826static void read_mac_address(struct net_device *ndev, unsigned char *mac)
827{
828	if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
829		memcpy(ndev->dev_addr, mac, 6);
830	} else {
831		ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24);
832		ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF;
833		ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF;
834		ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF);
835		ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF;
836		ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF);
837	}
838}
839
840static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
841{
842	if (sh_eth_is_gether(mdp))
843		return EDTRR_TRNS_GETHER;
844	else
845		return EDTRR_TRNS_ETHER;
846}
847
848struct bb_info {
849	void (*set_gate)(void *addr);
850	struct mdiobb_ctrl ctrl;
851	void *addr;
852	u32 mmd_msk;/* MMD */
853	u32 mdo_msk;
854	u32 mdi_msk;
855	u32 mdc_msk;
856};
857
858/* PHY bit set */
859static void bb_set(void *addr, u32 msk)
860{
861	iowrite32(ioread32(addr) | msk, addr);
862}
863
864/* PHY bit clear */
865static void bb_clr(void *addr, u32 msk)
866{
867	iowrite32((ioread32(addr) & ~msk), addr);
868}
869
870/* PHY bit read */
871static int bb_read(void *addr, u32 msk)
872{
873	return (ioread32(addr) & msk) != 0;
874}
875
876/* Data I/O pin control */
877static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
878{
879	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
880
881	if (bitbang->set_gate)
882		bitbang->set_gate(bitbang->addr);
883
884	if (bit)
885		bb_set(bitbang->addr, bitbang->mmd_msk);
886	else
887		bb_clr(bitbang->addr, bitbang->mmd_msk);
888}
889
890/* Set bit data*/
891static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
892{
893	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
894
895	if (bitbang->set_gate)
896		bitbang->set_gate(bitbang->addr);
897
898	if (bit)
899		bb_set(bitbang->addr, bitbang->mdo_msk);
900	else
901		bb_clr(bitbang->addr, bitbang->mdo_msk);
902}
903
904/* Get bit data*/
905static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
906{
907	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
908
909	if (bitbang->set_gate)
910		bitbang->set_gate(bitbang->addr);
911
912	return bb_read(bitbang->addr, bitbang->mdi_msk);
913}
914
915/* MDC pin control */
916static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
917{
918	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
919
920	if (bitbang->set_gate)
921		bitbang->set_gate(bitbang->addr);
922
923	if (bit)
924		bb_set(bitbang->addr, bitbang->mdc_msk);
925	else
926		bb_clr(bitbang->addr, bitbang->mdc_msk);
927}
928
929/* mdio bus control struct */
930static struct mdiobb_ops bb_ops = {
931	.owner = THIS_MODULE,
932	.set_mdc = sh_mdc_ctrl,
933	.set_mdio_dir = sh_mmd_ctrl,
934	.set_mdio_data = sh_set_mdio,
935	.get_mdio_data = sh_get_mdio,
936};
937
938/* free skb and descriptor buffer */
939static void sh_eth_ring_free(struct net_device *ndev)
940{
941	struct sh_eth_private *mdp = netdev_priv(ndev);
942	int i;
943
944	/* Free Rx skb ringbuffer */
945	if (mdp->rx_skbuff) {
946		for (i = 0; i < mdp->num_rx_ring; i++) {
947			if (mdp->rx_skbuff[i])
948				dev_kfree_skb(mdp->rx_skbuff[i]);
949		}
950	}
951	kfree(mdp->rx_skbuff);
952	mdp->rx_skbuff = NULL;
953
954	/* Free Tx skb ringbuffer */
955	if (mdp->tx_skbuff) {
956		for (i = 0; i < mdp->num_tx_ring; i++) {
957			if (mdp->tx_skbuff[i])
958				dev_kfree_skb(mdp->tx_skbuff[i]);
959		}
960	}
961	kfree(mdp->tx_skbuff);
962	mdp->tx_skbuff = NULL;
963}
964
965/* format skb and descriptor buffer */
966static void sh_eth_ring_format(struct net_device *ndev)
967{
968	struct sh_eth_private *mdp = netdev_priv(ndev);
969	int i;
970	struct sk_buff *skb;
971	struct sh_eth_rxdesc *rxdesc = NULL;
972	struct sh_eth_txdesc *txdesc = NULL;
973	int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
974	int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
975
976	mdp->cur_rx = mdp->cur_tx = 0;
977	mdp->dirty_rx = mdp->dirty_tx = 0;
978
979	memset(mdp->rx_ring, 0, rx_ringsize);
980
981	/* build Rx ring buffer */
982	for (i = 0; i < mdp->num_rx_ring; i++) {
983		/* skb */
984		mdp->rx_skbuff[i] = NULL;
985		skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
986		mdp->rx_skbuff[i] = skb;
987		if (skb == NULL)
988			break;
989		dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
990				DMA_FROM_DEVICE);
991		sh_eth_set_receive_align(skb);
992
993		/* RX descriptor */
994		rxdesc = &mdp->rx_ring[i];
995		rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
996		rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
997
998		/* The size of the buffer is 16 byte boundary. */
999		rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1000		/* Rx descriptor address set */
1001		if (i == 0) {
1002			sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
1003			if (sh_eth_is_gether(mdp))
1004				sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
1005		}
1006	}
1007
1008	mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
1009
1010	/* Mark the last entry as wrapping the ring. */
1011	rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
1012
1013	memset(mdp->tx_ring, 0, tx_ringsize);
1014
1015	/* build Tx ring buffer */
1016	for (i = 0; i < mdp->num_tx_ring; i++) {
1017		mdp->tx_skbuff[i] = NULL;
1018		txdesc = &mdp->tx_ring[i];
1019		txdesc->status = cpu_to_edmac(mdp, TD_TFP);
1020		txdesc->buffer_length = 0;
1021		if (i == 0) {
1022			/* Tx descriptor address set */
1023			sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
1024			if (sh_eth_is_gether(mdp))
1025				sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
1026		}
1027	}
1028
1029	txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
1030}
1031
1032/* Get skb and descriptor buffer */
1033static int sh_eth_ring_init(struct net_device *ndev)
1034{
1035	struct sh_eth_private *mdp = netdev_priv(ndev);
1036	int rx_ringsize, tx_ringsize, ret = 0;
1037
1038	/*
1039	 * +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
1040	 * card needs room to do 8 byte alignment, +2 so we can reserve
1041	 * the first 2 bytes, and +16 gets room for the status word from the
1042	 * card.
1043	 */
1044	mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
1045			  (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
1046	if (mdp->cd->rpadir)
1047		mdp->rx_buf_sz += NET_IP_ALIGN;
1048
1049	/* Allocate RX and TX skb rings */
1050	mdp->rx_skbuff = kmalloc_array(mdp->num_rx_ring,
1051				       sizeof(*mdp->rx_skbuff), GFP_KERNEL);
1052	if (!mdp->rx_skbuff) {
1053		ret = -ENOMEM;
1054		return ret;
1055	}
1056
1057	mdp->tx_skbuff = kmalloc_array(mdp->num_tx_ring,
1058				       sizeof(*mdp->tx_skbuff), GFP_KERNEL);
1059	if (!mdp->tx_skbuff) {
1060		ret = -ENOMEM;
1061		goto skb_ring_free;
1062	}
1063
1064	/* Allocate all Rx descriptors. */
1065	rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1066	mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
1067					  GFP_KERNEL);
1068	if (!mdp->rx_ring) {
1069		ret = -ENOMEM;
1070		goto desc_ring_free;
1071	}
1072
1073	mdp->dirty_rx = 0;
1074
1075	/* Allocate all Tx descriptors. */
1076	tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1077	mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
1078					  GFP_KERNEL);
1079	if (!mdp->tx_ring) {
1080		ret = -ENOMEM;
1081		goto desc_ring_free;
1082	}
1083	return ret;
1084
1085desc_ring_free:
1086	/* free DMA buffer */
1087	dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma);
1088
1089skb_ring_free:
1090	/* Free Rx and Tx skb ring buffer */
1091	sh_eth_ring_free(ndev);
1092	mdp->tx_ring = NULL;
1093	mdp->rx_ring = NULL;
1094
1095	return ret;
1096}
1097
1098static void sh_eth_free_dma_buffer(struct sh_eth_private *mdp)
1099{
1100	int ringsize;
1101
1102	if (mdp->rx_ring) {
1103		ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1104		dma_free_coherent(NULL, ringsize, mdp->rx_ring,
1105				  mdp->rx_desc_dma);
1106		mdp->rx_ring = NULL;
1107	}
1108
1109	if (mdp->tx_ring) {
1110		ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1111		dma_free_coherent(NULL, ringsize, mdp->tx_ring,
1112				  mdp->tx_desc_dma);
1113		mdp->tx_ring = NULL;
1114	}
1115}
1116
1117static int sh_eth_dev_init(struct net_device *ndev, bool start)
1118{
1119	int ret = 0;
1120	struct sh_eth_private *mdp = netdev_priv(ndev);
1121	u32 val;
1122
1123	/* Soft Reset */
1124	ret = sh_eth_reset(ndev);
1125	if (ret)
1126		goto out;
1127
1128	if (mdp->cd->rmiimode)
1129		sh_eth_write(ndev, 0x1, RMIIMODE);
1130
1131	/* Descriptor format */
1132	sh_eth_ring_format(ndev);
1133	if (mdp->cd->rpadir)
1134		sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
1135
1136	/* all sh_eth int mask */
1137	sh_eth_write(ndev, 0, EESIPR);
1138
1139#if defined(__LITTLE_ENDIAN)
1140	if (mdp->cd->hw_swap)
1141		sh_eth_write(ndev, EDMR_EL, EDMR);
1142	else
1143#endif
1144		sh_eth_write(ndev, 0, EDMR);
1145
1146	/* FIFO size set */
1147	sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
1148	sh_eth_write(ndev, 0, TFTR);
1149
1150	/* Frame recv control */
1151	sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR);
1152
1153	sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER);
1154
1155	if (mdp->cd->bculr)
1156		sh_eth_write(ndev, 0x800, BCULR);	/* Burst sycle set */
1157
1158	sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
1159
1160	if (!mdp->cd->no_trimd)
1161		sh_eth_write(ndev, 0, TRIMD);
1162
1163	/* Recv frame limit set register */
1164	sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
1165		     RFLR);
1166
1167	sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
1168	if (start)
1169		sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1170
1171	/* PAUSE Prohibition */
1172	val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
1173		ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
1174
1175	sh_eth_write(ndev, val, ECMR);
1176
1177	if (mdp->cd->set_rate)
1178		mdp->cd->set_rate(ndev);
1179
1180	/* E-MAC Status Register clear */
1181	sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
1182
1183	/* E-MAC Interrupt Enable register */
1184	if (start)
1185		sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
1186
1187	/* Set MAC address */
1188	update_mac_address(ndev);
1189
1190	/* mask reset */
1191	if (mdp->cd->apr)
1192		sh_eth_write(ndev, APR_AP, APR);
1193	if (mdp->cd->mpr)
1194		sh_eth_write(ndev, MPR_MP, MPR);
1195	if (mdp->cd->tpauser)
1196		sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
1197
1198	if (start) {
1199		/* Setting the Rx mode will start the Rx process. */
1200		sh_eth_write(ndev, EDRRR_R, EDRRR);
1201
1202		netif_start_queue(ndev);
1203	}
1204
1205out:
1206	return ret;
1207}
1208
1209/* free Tx skb function */
1210static int sh_eth_txfree(struct net_device *ndev)
1211{
1212	struct sh_eth_private *mdp = netdev_priv(ndev);
1213	struct sh_eth_txdesc *txdesc;
1214	int freeNum = 0;
1215	int entry = 0;
1216
1217	for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
1218		entry = mdp->dirty_tx % mdp->num_tx_ring;
1219		txdesc = &mdp->tx_ring[entry];
1220		if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
1221			break;
1222		/* Free the original skb. */
1223		if (mdp->tx_skbuff[entry]) {
1224			dma_unmap_single(&ndev->dev, txdesc->addr,
1225					 txdesc->buffer_length, DMA_TO_DEVICE);
1226			dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
1227			mdp->tx_skbuff[entry] = NULL;
1228			freeNum++;
1229		}
1230		txdesc->status = cpu_to_edmac(mdp, TD_TFP);
1231		if (entry >= mdp->num_tx_ring - 1)
1232			txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
1233
1234		ndev->stats.tx_packets++;
1235		ndev->stats.tx_bytes += txdesc->buffer_length;
1236	}
1237	return freeNum;
1238}
1239
1240/* Packet receive function */
1241static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1242{
1243	struct sh_eth_private *mdp = netdev_priv(ndev);
1244	struct sh_eth_rxdesc *rxdesc;
1245
1246	int entry = mdp->cur_rx % mdp->num_rx_ring;
1247	int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
1248	struct sk_buff *skb;
1249	int exceeded = 0;
1250	u16 pkt_len = 0;
1251	u32 desc_status;
1252
1253	rxdesc = &mdp->rx_ring[entry];
1254	while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
1255		desc_status = edmac_to_cpu(mdp, rxdesc->status);
1256		pkt_len = rxdesc->frame_length;
1257
1258		if (--boguscnt < 0)
1259			break;
1260
1261		if (*quota <= 0) {
1262			exceeded = 1;
1263			break;
1264		}
1265		(*quota)--;
1266
1267		if (!(desc_status & RDFEND))
1268			ndev->stats.rx_length_errors++;
1269
1270		/*
1271		 * In case of almost all GETHER/ETHERs, the Receive Frame State
1272		 * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
1273		 * bit 0. However, in case of the R8A7740's GETHER, the RFS
1274		 * bits are from bit 25 to bit 16. So, the driver needs right
1275		 * shifting by 16.
1276		 */
1277		if (mdp->cd->shift_rd0)
1278			desc_status >>= 16;
1279
1280		if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
1281				   RD_RFS5 | RD_RFS6 | RD_RFS10)) {
1282			ndev->stats.rx_errors++;
1283			if (desc_status & RD_RFS1)
1284				ndev->stats.rx_crc_errors++;
1285			if (desc_status & RD_RFS2)
1286				ndev->stats.rx_frame_errors++;
1287			if (desc_status & RD_RFS3)
1288				ndev->stats.rx_length_errors++;
1289			if (desc_status & RD_RFS4)
1290				ndev->stats.rx_length_errors++;
1291			if (desc_status & RD_RFS6)
1292				ndev->stats.rx_missed_errors++;
1293			if (desc_status & RD_RFS10)
1294				ndev->stats.rx_over_errors++;
1295		} else {
1296			if (!mdp->cd->hw_swap)
1297				sh_eth_soft_swap(
1298					phys_to_virt(ALIGN(rxdesc->addr, 4)),
1299					pkt_len + 2);
1300			skb = mdp->rx_skbuff[entry];
1301			mdp->rx_skbuff[entry] = NULL;
1302			if (mdp->cd->rpadir)
1303				skb_reserve(skb, NET_IP_ALIGN);
1304			skb_put(skb, pkt_len);
1305			skb->protocol = eth_type_trans(skb, ndev);
1306			netif_rx(skb);
1307			ndev->stats.rx_packets++;
1308			ndev->stats.rx_bytes += pkt_len;
1309		}
1310		rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
1311		entry = (++mdp->cur_rx) % mdp->num_rx_ring;
1312		rxdesc = &mdp->rx_ring[entry];
1313	}
1314
1315	/* Refill the Rx ring buffers. */
1316	for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
1317		entry = mdp->dirty_rx % mdp->num_rx_ring;
1318		rxdesc = &mdp->rx_ring[entry];
1319		/* The size of the buffer is 16 byte boundary. */
1320		rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1321
1322		if (mdp->rx_skbuff[entry] == NULL) {
1323			skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
1324			mdp->rx_skbuff[entry] = skb;
1325			if (skb == NULL)
1326				break;	/* Better luck next round. */
1327			dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
1328					DMA_FROM_DEVICE);
1329			sh_eth_set_receive_align(skb);
1330
1331			skb_checksum_none_assert(skb);
1332			rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
1333		}
1334		if (entry >= mdp->num_rx_ring - 1)
1335			rxdesc->status |=
1336				cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
1337		else
1338			rxdesc->status |=
1339				cpu_to_edmac(mdp, RD_RACT | RD_RFP);
1340	}
1341
1342	/* Restart Rx engine if stopped. */
1343	/* If we don't need to check status, don't. -KDU */
1344	if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
1345		/* fix the values for the next receiving if RDE is set */
1346		if (intr_status & EESR_RDE)
1347			mdp->cur_rx = mdp->dirty_rx =
1348				(sh_eth_read(ndev, RDFAR) -
1349				 sh_eth_read(ndev, RDLAR)) >> 4;
1350		sh_eth_write(ndev, EDRRR_R, EDRRR);
1351	}
1352
1353	return exceeded;
1354}
1355
1356static void sh_eth_rcv_snd_disable(struct net_device *ndev)
1357{
1358	/* disable tx and rx */
1359	sh_eth_write(ndev, sh_eth_read(ndev, ECMR) &
1360		~(ECMR_RE | ECMR_TE), ECMR);
1361}
1362
1363static void sh_eth_rcv_snd_enable(struct net_device *ndev)
1364{
1365	/* enable tx and rx */
1366	sh_eth_write(ndev, sh_eth_read(ndev, ECMR) |
1367		(ECMR_RE | ECMR_TE), ECMR);
1368}
1369
1370/* error control function */
1371static void sh_eth_error(struct net_device *ndev, int intr_status)
1372{
1373	struct sh_eth_private *mdp = netdev_priv(ndev);
1374	u32 felic_stat;
1375	u32 link_stat;
1376	u32 mask;
1377
1378	if (intr_status & EESR_ECI) {
1379		felic_stat = sh_eth_read(ndev, ECSR);
1380		sh_eth_write(ndev, felic_stat, ECSR);	/* clear int */
1381		if (felic_stat & ECSR_ICD)
1382			ndev->stats.tx_carrier_errors++;
1383		if (felic_stat & ECSR_LCHNG) {
1384			/* Link Changed */
1385			if (mdp->cd->no_psr || mdp->no_ether_link) {
1386				goto ignore_link;
1387			} else {
1388				link_stat = (sh_eth_read(ndev, PSR));
1389				if (mdp->ether_link_active_low)
1390					link_stat = ~link_stat;
1391			}
1392			if (!(link_stat & PHY_ST_LINK))
1393				sh_eth_rcv_snd_disable(ndev);
1394			else {
1395				/* Link Up */
1396				sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
1397					  ~DMAC_M_ECI, EESIPR);
1398				/*clear int */
1399				sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
1400					  ECSR);
1401				sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
1402					  DMAC_M_ECI, EESIPR);
1403				/* enable tx and rx */
1404				sh_eth_rcv_snd_enable(ndev);
1405			}
1406		}
1407	}
1408
1409ignore_link:
1410	if (intr_status & EESR_TWB) {
1411		/* Unused write back interrupt */
1412		if (intr_status & EESR_TABT) {	/* Transmit Abort int */
1413			ndev->stats.tx_aborted_errors++;
1414			if (netif_msg_tx_err(mdp))
1415				dev_err(&ndev->dev, "Transmit Abort\n");
1416		}
1417	}
1418
1419	if (intr_status & EESR_RABT) {
1420		/* Receive Abort int */
1421		if (intr_status & EESR_RFRMER) {
1422			/* Receive Frame Overflow int */
1423			ndev->stats.rx_frame_errors++;
1424			if (netif_msg_rx_err(mdp))
1425				dev_err(&ndev->dev, "Receive Abort\n");
1426		}
1427	}
1428
1429	if (intr_status & EESR_TDE) {
1430		/* Transmit Descriptor Empty int */
1431		ndev->stats.tx_fifo_errors++;
1432		if (netif_msg_tx_err(mdp))
1433			dev_err(&ndev->dev, "Transmit Descriptor Empty\n");
1434	}
1435
1436	if (intr_status & EESR_TFE) {
1437		/* FIFO under flow */
1438		ndev->stats.tx_fifo_errors++;
1439		if (netif_msg_tx_err(mdp))
1440			dev_err(&ndev->dev, "Transmit FIFO Under flow\n");
1441	}
1442
1443	if (intr_status & EESR_RDE) {
1444		/* Receive Descriptor Empty int */
1445		ndev->stats.rx_over_errors++;
1446
1447		if (netif_msg_rx_err(mdp))
1448			dev_err(&ndev->dev, "Receive Descriptor Empty\n");
1449	}
1450
1451	if (intr_status & EESR_RFE) {
1452		/* Receive FIFO Overflow int */
1453		ndev->stats.rx_fifo_errors++;
1454		if (netif_msg_rx_err(mdp))
1455			dev_err(&ndev->dev, "Receive FIFO Overflow\n");
1456	}
1457
1458	if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
1459		/* Address Error */
1460		ndev->stats.tx_fifo_errors++;
1461		if (netif_msg_tx_err(mdp))
1462			dev_err(&ndev->dev, "Address Error\n");
1463	}
1464
1465	mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
1466	if (mdp->cd->no_ade)
1467		mask &= ~EESR_ADE;
1468	if (intr_status & mask) {
1469		/* Tx error */
1470		u32 edtrr = sh_eth_read(ndev, EDTRR);
1471		/* dmesg */
1472		dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ",
1473				intr_status, mdp->cur_tx);
1474		dev_err(&ndev->dev, "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
1475				mdp->dirty_tx, (u32) ndev->state, edtrr);
1476		/* dirty buffer free */
1477		sh_eth_txfree(ndev);
1478
1479		/* SH7712 BUG */
1480		if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
1481			/* tx dma start */
1482			sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
1483		}
1484		/* wakeup */
1485		netif_wake_queue(ndev);
1486	}
1487}
1488
1489static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1490{
1491	struct net_device *ndev = netdev;
1492	struct sh_eth_private *mdp = netdev_priv(ndev);
1493	struct sh_eth_cpu_data *cd = mdp->cd;
1494	irqreturn_t ret = IRQ_NONE;
1495	unsigned long intr_status, intr_enable;
1496
1497	spin_lock(&mdp->lock);
1498
1499	/* Get interrupt status */
1500	intr_status = sh_eth_read(ndev, EESR);
1501	/* Mask it with the interrupt mask, forcing ECI interrupt to be always
1502	 * enabled since it's the one that  comes thru regardless of the mask,
1503	 * and we need to fully handle it in sh_eth_error() in order to quench
1504	 * it as it doesn't get cleared by just writing 1 to the ECI bit...
1505	 */
1506	intr_enable = sh_eth_read(ndev, EESIPR);
1507	intr_status &= intr_enable | DMAC_M_ECI;
1508	if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check))
1509		ret = IRQ_HANDLED;
1510	else
1511		goto other_irq;
1512
1513	if (intr_status & EESR_RX_CHECK) {
1514		if (napi_schedule_prep(&mdp->napi)) {
1515			/* Mask Rx interrupts */
1516			sh_eth_write(ndev, intr_enable & ~EESR_RX_CHECK,
1517				     EESIPR);
1518			__napi_schedule(&mdp->napi);
1519		} else {
1520			dev_warn(&ndev->dev,
1521				 "ignoring interrupt, status 0x%08lx, mask 0x%08lx.\n",
1522				 intr_status, intr_enable);
1523		}
1524	}
1525
1526	/* Tx Check */
1527	if (intr_status & cd->tx_check) {
1528		/* Clear Tx interrupts */
1529		sh_eth_write(ndev, intr_status & cd->tx_check, EESR);
1530
1531		sh_eth_txfree(ndev);
1532		netif_wake_queue(ndev);
1533	}
1534
1535	if (intr_status & cd->eesr_err_check) {
1536		/* Clear error interrupts */
1537		sh_eth_write(ndev, intr_status & cd->eesr_err_check, EESR);
1538
1539		sh_eth_error(ndev, intr_status);
1540	}
1541
1542other_irq:
1543	spin_unlock(&mdp->lock);
1544
1545	return ret;
1546}
1547
1548static int sh_eth_poll(struct napi_struct *napi, int budget)
1549{
1550	struct sh_eth_private *mdp = container_of(napi, struct sh_eth_private,
1551						  napi);
1552	struct net_device *ndev = napi->dev;
1553	int quota = budget;
1554	unsigned long intr_status;
1555
1556	for (;;) {
1557		intr_status = sh_eth_read(ndev, EESR);
1558		if (!(intr_status & EESR_RX_CHECK))
1559			break;
1560		/* Clear Rx interrupts */
1561		sh_eth_write(ndev, intr_status & EESR_RX_CHECK, EESR);
1562
1563		if (sh_eth_rx(ndev, intr_status, &quota))
1564			goto out;
1565	}
1566
1567	napi_complete(napi);
1568
1569	/* Reenable Rx interrupts */
1570	sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1571out:
1572	return budget - quota;
1573}
1574
1575/* PHY state control function */
1576static void sh_eth_adjust_link(struct net_device *ndev)
1577{
1578	struct sh_eth_private *mdp = netdev_priv(ndev);
1579	struct phy_device *phydev = mdp->phydev;
1580	int new_state = 0;
1581
1582	if (phydev->link) {
1583		if (phydev->duplex != mdp->duplex) {
1584			new_state = 1;
1585			mdp->duplex = phydev->duplex;
1586			if (mdp->cd->set_duplex)
1587				mdp->cd->set_duplex(ndev);
1588		}
1589
1590		if (phydev->speed != mdp->speed) {
1591			new_state = 1;
1592			mdp->speed = phydev->speed;
1593			if (mdp->cd->set_rate)
1594				mdp->cd->set_rate(ndev);
1595		}
1596		if (!mdp->link) {
1597			sh_eth_write(ndev,
1598				(sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR);
1599			new_state = 1;
1600			mdp->link = phydev->link;
1601			if (mdp->cd->no_psr || mdp->no_ether_link)
1602				sh_eth_rcv_snd_enable(ndev);
1603		}
1604	} else if (mdp->link) {
1605		new_state = 1;
1606		mdp->link = 0;
1607		mdp->speed = 0;
1608		mdp->duplex = -1;
1609		if (mdp->cd->no_psr || mdp->no_ether_link)
1610			sh_eth_rcv_snd_disable(ndev);
1611	}
1612
1613	if (new_state && netif_msg_link(mdp))
1614		phy_print_status(phydev);
1615}
1616
1617/* PHY init function */
1618static int sh_eth_phy_init(struct net_device *ndev)
1619{
1620	struct sh_eth_private *mdp = netdev_priv(ndev);
1621	char phy_id[MII_BUS_ID_SIZE + 3];
1622	struct phy_device *phydev = NULL;
1623
1624	snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
1625		mdp->mii_bus->id , mdp->phy_id);
1626
1627	mdp->link = 0;
1628	mdp->speed = 0;
1629	mdp->duplex = -1;
1630
1631	/* Try connect to PHY */
1632	phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
1633			     mdp->phy_interface);
1634	if (IS_ERR(phydev)) {
1635		dev_err(&ndev->dev, "phy_connect failed\n");
1636		return PTR_ERR(phydev);
1637	}
1638
1639	dev_info(&ndev->dev, "attached phy %i to driver %s\n",
1640		phydev->addr, phydev->drv->name);
1641
1642	mdp->phydev = phydev;
1643
1644	return 0;
1645}
1646
1647/* PHY control start function */
1648static int sh_eth_phy_start(struct net_device *ndev)
1649{
1650	struct sh_eth_private *mdp = netdev_priv(ndev);
1651	int ret;
1652
1653	ret = sh_eth_phy_init(ndev);
1654	if (ret)
1655		return ret;
1656
1657	/* reset phy - this also wakes it from PDOWN */
1658	phy_write(mdp->phydev, MII_BMCR, BMCR_RESET);
1659	phy_start(mdp->phydev);
1660
1661	return 0;
1662}
1663
1664static int sh_eth_get_settings(struct net_device *ndev,
1665			struct ethtool_cmd *ecmd)
1666{
1667	struct sh_eth_private *mdp = netdev_priv(ndev);
1668	unsigned long flags;
1669	int ret;
1670
1671	spin_lock_irqsave(&mdp->lock, flags);
1672	ret = phy_ethtool_gset(mdp->phydev, ecmd);
1673	spin_unlock_irqrestore(&mdp->lock, flags);
1674
1675	return ret;
1676}
1677
1678static int sh_eth_set_settings(struct net_device *ndev,
1679		struct ethtool_cmd *ecmd)
1680{
1681	struct sh_eth_private *mdp = netdev_priv(ndev);
1682	unsigned long flags;
1683	int ret;
1684
1685	spin_lock_irqsave(&mdp->lock, flags);
1686
1687	/* disable tx and rx */
1688	sh_eth_rcv_snd_disable(ndev);
1689
1690	ret = phy_ethtool_sset(mdp->phydev, ecmd);
1691	if (ret)
1692		goto error_exit;
1693
1694	if (ecmd->duplex == DUPLEX_FULL)
1695		mdp->duplex = 1;
1696	else
1697		mdp->duplex = 0;
1698
1699	if (mdp->cd->set_duplex)
1700		mdp->cd->set_duplex(ndev);
1701
1702error_exit:
1703	mdelay(1);
1704
1705	/* enable tx and rx */
1706	sh_eth_rcv_snd_enable(ndev);
1707
1708	spin_unlock_irqrestore(&mdp->lock, flags);
1709
1710	return ret;
1711}
1712
1713static int sh_eth_nway_reset(struct net_device *ndev)
1714{
1715	struct sh_eth_private *mdp = netdev_priv(ndev);
1716	unsigned long flags;
1717	int ret;
1718
1719	spin_lock_irqsave(&mdp->lock, flags);
1720	ret = phy_start_aneg(mdp->phydev);
1721	spin_unlock_irqrestore(&mdp->lock, flags);
1722
1723	return ret;
1724}
1725
1726static u32 sh_eth_get_msglevel(struct net_device *ndev)
1727{
1728	struct sh_eth_private *mdp = netdev_priv(ndev);
1729	return mdp->msg_enable;
1730}
1731
1732static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
1733{
1734	struct sh_eth_private *mdp = netdev_priv(ndev);
1735	mdp->msg_enable = value;
1736}
1737
1738static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
1739	"rx_current", "tx_current",
1740	"rx_dirty", "tx_dirty",
1741};
1742#define SH_ETH_STATS_LEN  ARRAY_SIZE(sh_eth_gstrings_stats)
1743
1744static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
1745{
1746	switch (sset) {
1747	case ETH_SS_STATS:
1748		return SH_ETH_STATS_LEN;
1749	default:
1750		return -EOPNOTSUPP;
1751	}
1752}
1753
1754static void sh_eth_get_ethtool_stats(struct net_device *ndev,
1755			struct ethtool_stats *stats, u64 *data)
1756{
1757	struct sh_eth_private *mdp = netdev_priv(ndev);
1758	int i = 0;
1759
1760	/* device-specific stats */
1761	data[i++] = mdp->cur_rx;
1762	data[i++] = mdp->cur_tx;
1763	data[i++] = mdp->dirty_rx;
1764	data[i++] = mdp->dirty_tx;
1765}
1766
1767static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1768{
1769	switch (stringset) {
1770	case ETH_SS_STATS:
1771		memcpy(data, *sh_eth_gstrings_stats,
1772					sizeof(sh_eth_gstrings_stats));
1773		break;
1774	}
1775}
1776
1777static void sh_eth_get_ringparam(struct net_device *ndev,
1778				 struct ethtool_ringparam *ring)
1779{
1780	struct sh_eth_private *mdp = netdev_priv(ndev);
1781
1782	ring->rx_max_pending = RX_RING_MAX;
1783	ring->tx_max_pending = TX_RING_MAX;
1784	ring->rx_pending = mdp->num_rx_ring;
1785	ring->tx_pending = mdp->num_tx_ring;
1786}
1787
1788static int sh_eth_set_ringparam(struct net_device *ndev,
1789				struct ethtool_ringparam *ring)
1790{
1791	struct sh_eth_private *mdp = netdev_priv(ndev);
1792	int ret;
1793
1794	if (ring->tx_pending > TX_RING_MAX ||
1795	    ring->rx_pending > RX_RING_MAX ||
1796	    ring->tx_pending < TX_RING_MIN ||
1797	    ring->rx_pending < RX_RING_MIN)
1798		return -EINVAL;
1799	if (ring->rx_mini_pending || ring->rx_jumbo_pending)
1800		return -EINVAL;
1801
1802	if (netif_running(ndev)) {
1803		netif_tx_disable(ndev);
1804		/* Disable interrupts by clearing the interrupt mask. */
1805		sh_eth_write(ndev, 0x0000, EESIPR);
1806		/* Stop the chip's Tx and Rx processes. */
1807		sh_eth_write(ndev, 0, EDTRR);
1808		sh_eth_write(ndev, 0, EDRRR);
1809		synchronize_irq(ndev->irq);
1810	}
1811
1812	/* Free all the skbuffs in the Rx queue. */
1813	sh_eth_ring_free(ndev);
1814	/* Free DMA buffer */
1815	sh_eth_free_dma_buffer(mdp);
1816
1817	/* Set new parameters */
1818	mdp->num_rx_ring = ring->rx_pending;
1819	mdp->num_tx_ring = ring->tx_pending;
1820
1821	ret = sh_eth_ring_init(ndev);
1822	if (ret < 0) {
1823		dev_err(&ndev->dev, "%s: sh_eth_ring_init failed.\n", __func__);
1824		return ret;
1825	}
1826	ret = sh_eth_dev_init(ndev, false);
1827	if (ret < 0) {
1828		dev_err(&ndev->dev, "%s: sh_eth_dev_init failed.\n", __func__);
1829		return ret;
1830	}
1831
1832	if (netif_running(ndev)) {
1833		sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1834		/* Setting the Rx mode will start the Rx process. */
1835		sh_eth_write(ndev, EDRRR_R, EDRRR);
1836		netif_wake_queue(ndev);
1837	}
1838
1839	return 0;
1840}
1841
1842static const struct ethtool_ops sh_eth_ethtool_ops = {
1843	.get_settings	= sh_eth_get_settings,
1844	.set_settings	= sh_eth_set_settings,
1845	.nway_reset	= sh_eth_nway_reset,
1846	.get_msglevel	= sh_eth_get_msglevel,
1847	.set_msglevel	= sh_eth_set_msglevel,
1848	.get_link	= ethtool_op_get_link,
1849	.get_strings	= sh_eth_get_strings,
1850	.get_ethtool_stats  = sh_eth_get_ethtool_stats,
1851	.get_sset_count     = sh_eth_get_sset_count,
1852	.get_ringparam	= sh_eth_get_ringparam,
1853	.set_ringparam	= sh_eth_set_ringparam,
1854};
1855
1856/* network device open function */
1857static int sh_eth_open(struct net_device *ndev)
1858{
1859	int ret = 0;
1860	struct sh_eth_private *mdp = netdev_priv(ndev);
1861
1862	pm_runtime_get_sync(&mdp->pdev->dev);
1863
1864	ret = request_irq(ndev->irq, sh_eth_interrupt,
1865			  mdp->cd->irq_flags, ndev->name, ndev);
1866	if (ret) {
1867		dev_err(&ndev->dev, "Can not assign IRQ number\n");
1868		return ret;
1869	}
1870
1871	/* Descriptor set */
1872	ret = sh_eth_ring_init(ndev);
1873	if (ret)
1874		goto out_free_irq;
1875
1876	/* device init */
1877	ret = sh_eth_dev_init(ndev, true);
1878	if (ret)
1879		goto out_free_irq;
1880
1881	/* PHY control start*/
1882	ret = sh_eth_phy_start(ndev);
1883	if (ret)
1884		goto out_free_irq;
1885
1886	napi_enable(&mdp->napi);
1887
1888	return ret;
1889
1890out_free_irq:
1891	free_irq(ndev->irq, ndev);
1892	pm_runtime_put_sync(&mdp->pdev->dev);
1893	return ret;
1894}
1895
1896/* Timeout function */
1897static void sh_eth_tx_timeout(struct net_device *ndev)
1898{
1899	struct sh_eth_private *mdp = netdev_priv(ndev);
1900	struct sh_eth_rxdesc *rxdesc;
1901	int i;
1902
1903	netif_stop_queue(ndev);
1904
1905	if (netif_msg_timer(mdp))
1906		dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x,"
1907	       " resetting...\n", ndev->name, (int)sh_eth_read(ndev, EESR));
1908
1909	/* tx_errors count up */
1910	ndev->stats.tx_errors++;
1911
1912	/* Free all the skbuffs in the Rx queue. */
1913	for (i = 0; i < mdp->num_rx_ring; i++) {
1914		rxdesc = &mdp->rx_ring[i];
1915		rxdesc->status = 0;
1916		rxdesc->addr = 0xBADF00D0;
1917		if (mdp->rx_skbuff[i])
1918			dev_kfree_skb(mdp->rx_skbuff[i]);
1919		mdp->rx_skbuff[i] = NULL;
1920	}
1921	for (i = 0; i < mdp->num_tx_ring; i++) {
1922		if (mdp->tx_skbuff[i])
1923			dev_kfree_skb(mdp->tx_skbuff[i]);
1924		mdp->tx_skbuff[i] = NULL;
1925	}
1926
1927	/* device init */
1928	sh_eth_dev_init(ndev, true);
1929}
1930
1931/* Packet transmit function */
1932static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1933{
1934	struct sh_eth_private *mdp = netdev_priv(ndev);
1935	struct sh_eth_txdesc *txdesc;
1936	u32 entry;
1937	unsigned long flags;
1938
1939	spin_lock_irqsave(&mdp->lock, flags);
1940	if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
1941		if (!sh_eth_txfree(ndev)) {
1942			if (netif_msg_tx_queued(mdp))
1943				dev_warn(&ndev->dev, "TxFD exhausted.\n");
1944			netif_stop_queue(ndev);
1945			spin_unlock_irqrestore(&mdp->lock, flags);
1946			return NETDEV_TX_BUSY;
1947		}
1948	}
1949	spin_unlock_irqrestore(&mdp->lock, flags);
1950
1951	entry = mdp->cur_tx % mdp->num_tx_ring;
1952	mdp->tx_skbuff[entry] = skb;
1953	txdesc = &mdp->tx_ring[entry];
1954	/* soft swap. */
1955	if (!mdp->cd->hw_swap)
1956		sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)),
1957				 skb->len + 2);
1958	txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
1959				      DMA_TO_DEVICE);
1960	if (skb->len < ETHERSMALL)
1961		txdesc->buffer_length = ETHERSMALL;
1962	else
1963		txdesc->buffer_length = skb->len;
1964
1965	if (entry >= mdp->num_tx_ring - 1)
1966		txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
1967	else
1968		txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
1969
1970	mdp->cur_tx++;
1971
1972	if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp)))
1973		sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
1974
1975	return NETDEV_TX_OK;
1976}
1977
1978/* device close function */
1979static int sh_eth_close(struct net_device *ndev)
1980{
1981	struct sh_eth_private *mdp = netdev_priv(ndev);
1982
1983	napi_disable(&mdp->napi);
1984
1985	netif_stop_queue(ndev);
1986
1987	/* Disable interrupts by clearing the interrupt mask. */
1988	sh_eth_write(ndev, 0x0000, EESIPR);
1989
1990	/* Stop the chip's Tx and Rx processes. */
1991	sh_eth_write(ndev, 0, EDTRR);
1992	sh_eth_write(ndev, 0, EDRRR);
1993
1994	/* PHY Disconnect */
1995	if (mdp->phydev) {
1996		phy_stop(mdp->phydev);
1997		phy_disconnect(mdp->phydev);
1998	}
1999
2000	free_irq(ndev->irq, ndev);
2001
2002	/* Free all the skbuffs in the Rx queue. */
2003	sh_eth_ring_free(ndev);
2004
2005	/* free DMA buffer */
2006	sh_eth_free_dma_buffer(mdp);
2007
2008	pm_runtime_put_sync(&mdp->pdev->dev);
2009
2010	return 0;
2011}
2012
2013static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
2014{
2015	struct sh_eth_private *mdp = netdev_priv(ndev);
2016
2017	pm_runtime_get_sync(&mdp->pdev->dev);
2018
2019	ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
2020	sh_eth_write(ndev, 0, TROCR);	/* (write clear) */
2021	ndev->stats.collisions += sh_eth_read(ndev, CDCR);
2022	sh_eth_write(ndev, 0, CDCR);	/* (write clear) */
2023	ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
2024	sh_eth_write(ndev, 0, LCCR);	/* (write clear) */
2025	if (sh_eth_is_gether(mdp)) {
2026		ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
2027		sh_eth_write(ndev, 0, CERCR);	/* (write clear) */
2028		ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
2029		sh_eth_write(ndev, 0, CEECR);	/* (write clear) */
2030	} else {
2031		ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
2032		sh_eth_write(ndev, 0, CNDCR);	/* (write clear) */
2033	}
2034	pm_runtime_put_sync(&mdp->pdev->dev);
2035
2036	return &ndev->stats;
2037}
2038
2039/* ioctl to device function */
2040static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq,
2041				int cmd)
2042{
2043	struct sh_eth_private *mdp = netdev_priv(ndev);
2044	struct phy_device *phydev = mdp->phydev;
2045
2046	if (!netif_running(ndev))
2047		return -EINVAL;
2048
2049	if (!phydev)
2050		return -ENODEV;
2051
2052	return phy_mii_ioctl(phydev, rq, cmd);
2053}
2054
2055/* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
2056static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp,
2057					    int entry)
2058{
2059	return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4);
2060}
2061
2062static u32 sh_eth_tsu_get_post_mask(int entry)
2063{
2064	return 0x0f << (28 - ((entry % 8) * 4));
2065}
2066
2067static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry)
2068{
2069	return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4));
2070}
2071
2072static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev,
2073					     int entry)
2074{
2075	struct sh_eth_private *mdp = netdev_priv(ndev);
2076	u32 tmp;
2077	void *reg_offset;
2078
2079	reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2080	tmp = ioread32(reg_offset);
2081	iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset);
2082}
2083
2084static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev,
2085					      int entry)
2086{
2087	struct sh_eth_private *mdp = netdev_priv(ndev);
2088	u32 post_mask, ref_mask, tmp;
2089	void *reg_offset;
2090
2091	reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2092	post_mask = sh_eth_tsu_get_post_mask(entry);
2093	ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask;
2094
2095	tmp = ioread32(reg_offset);
2096	iowrite32(tmp & ~post_mask, reg_offset);
2097
2098	/* If other port enables, the function returns "true" */
2099	return tmp & ref_mask;
2100}
2101
2102static int sh_eth_tsu_busy(struct net_device *ndev)
2103{
2104	int timeout = SH_ETH_TSU_TIMEOUT_MS * 100;
2105	struct sh_eth_private *mdp = netdev_priv(ndev);
2106
2107	while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) {
2108		udelay(10);
2109		timeout--;
2110		if (timeout <= 0) {
2111			dev_err(&ndev->dev, "%s: timeout\n", __func__);
2112			return -ETIMEDOUT;
2113		}
2114	}
2115
2116	return 0;
2117}
2118
2119static int sh_eth_tsu_write_entry(struct net_device *ndev, void *reg,
2120				  const u8 *addr)
2121{
2122	u32 val;
2123
2124	val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3];
2125	iowrite32(val, reg);
2126	if (sh_eth_tsu_busy(ndev) < 0)
2127		return -EBUSY;
2128
2129	val = addr[4] << 8 | addr[5];
2130	iowrite32(val, reg + 4);
2131	if (sh_eth_tsu_busy(ndev) < 0)
2132		return -EBUSY;
2133
2134	return 0;
2135}
2136
2137static void sh_eth_tsu_read_entry(void *reg, u8 *addr)
2138{
2139	u32 val;
2140
2141	val = ioread32(reg);
2142	addr[0] = (val >> 24) & 0xff;
2143	addr[1] = (val >> 16) & 0xff;
2144	addr[2] = (val >> 8) & 0xff;
2145	addr[3] = val & 0xff;
2146	val = ioread32(reg + 4);
2147	addr[4] = (val >> 8) & 0xff;
2148	addr[5] = val & 0xff;
2149}
2150
2151
2152static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr)
2153{
2154	struct sh_eth_private *mdp = netdev_priv(ndev);
2155	void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2156	int i;
2157	u8 c_addr[ETH_ALEN];
2158
2159	for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2160		sh_eth_tsu_read_entry(reg_offset, c_addr);
2161		if (memcmp(addr, c_addr, ETH_ALEN) == 0)
2162			return i;
2163	}
2164
2165	return -ENOENT;
2166}
2167
2168static int sh_eth_tsu_find_empty(struct net_device *ndev)
2169{
2170	u8 blank[ETH_ALEN];
2171	int entry;
2172
2173	memset(blank, 0, sizeof(blank));
2174	entry = sh_eth_tsu_find_entry(ndev, blank);
2175	return (entry < 0) ? -ENOMEM : entry;
2176}
2177
2178static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev,
2179					      int entry)
2180{
2181	struct sh_eth_private *mdp = netdev_priv(ndev);
2182	void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2183	int ret;
2184	u8 blank[ETH_ALEN];
2185
2186	sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) &
2187			 ~(1 << (31 - entry)), TSU_TEN);
2188
2189	memset(blank, 0, sizeof(blank));
2190	ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank);
2191	if (ret < 0)
2192		return ret;
2193	return 0;
2194}
2195
2196static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr)
2197{
2198	struct sh_eth_private *mdp = netdev_priv(ndev);
2199	void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2200	int i, ret;
2201
2202	if (!mdp->cd->tsu)
2203		return 0;
2204
2205	i = sh_eth_tsu_find_entry(ndev, addr);
2206	if (i < 0) {
2207		/* No entry found, create one */
2208		i = sh_eth_tsu_find_empty(ndev);
2209		if (i < 0)
2210			return -ENOMEM;
2211		ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr);
2212		if (ret < 0)
2213			return ret;
2214
2215		/* Enable the entry */
2216		sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) |
2217				 (1 << (31 - i)), TSU_TEN);
2218	}
2219
2220	/* Entry found or created, enable POST */
2221	sh_eth_tsu_enable_cam_entry_post(ndev, i);
2222
2223	return 0;
2224}
2225
2226static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr)
2227{
2228	struct sh_eth_private *mdp = netdev_priv(ndev);
2229	int i, ret;
2230
2231	if (!mdp->cd->tsu)
2232		return 0;
2233
2234	i = sh_eth_tsu_find_entry(ndev, addr);
2235	if (i) {
2236		/* Entry found */
2237		if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2238			goto done;
2239
2240		/* Disable the entry if both ports was disabled */
2241		ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2242		if (ret < 0)
2243			return ret;
2244	}
2245done:
2246	return 0;
2247}
2248
2249static int sh_eth_tsu_purge_all(struct net_device *ndev)
2250{
2251	struct sh_eth_private *mdp = netdev_priv(ndev);
2252	int i, ret;
2253
2254	if (unlikely(!mdp->cd->tsu))
2255		return 0;
2256
2257	for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) {
2258		if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2259			continue;
2260
2261		/* Disable the entry if both ports was disabled */
2262		ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2263		if (ret < 0)
2264			return ret;
2265	}
2266
2267	return 0;
2268}
2269
2270static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
2271{
2272	struct sh_eth_private *mdp = netdev_priv(ndev);
2273	u8 addr[ETH_ALEN];
2274	void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2275	int i;
2276
2277	if (unlikely(!mdp->cd->tsu))
2278		return;
2279
2280	for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2281		sh_eth_tsu_read_entry(reg_offset, addr);
2282		if (is_multicast_ether_addr(addr))
2283			sh_eth_tsu_del_entry(ndev, addr);
2284	}
2285}
2286
2287/* Multicast reception directions set */
2288static void sh_eth_set_multicast_list(struct net_device *ndev)
2289{
2290	struct sh_eth_private *mdp = netdev_priv(ndev);
2291	u32 ecmr_bits;
2292	int mcast_all = 0;
2293	unsigned long flags;
2294
2295	spin_lock_irqsave(&mdp->lock, flags);
2296	/*
2297	 * Initial condition is MCT = 1, PRM = 0.
2298	 * Depending on ndev->flags, set PRM or clear MCT
2299	 */
2300	ecmr_bits = (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | ECMR_MCT;
2301
2302	if (!(ndev->flags & IFF_MULTICAST)) {
2303		sh_eth_tsu_purge_mcast(ndev);
2304		mcast_all = 1;
2305	}
2306	if (ndev->flags & IFF_ALLMULTI) {
2307		sh_eth_tsu_purge_mcast(ndev);
2308		ecmr_bits &= ~ECMR_MCT;
2309		mcast_all = 1;
2310	}
2311
2312	if (ndev->flags & IFF_PROMISC) {
2313		sh_eth_tsu_purge_all(ndev);
2314		ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM;
2315	} else if (mdp->cd->tsu) {
2316		struct netdev_hw_addr *ha;
2317		netdev_for_each_mc_addr(ha, ndev) {
2318			if (mcast_all && is_multicast_ether_addr(ha->addr))
2319				continue;
2320
2321			if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) {
2322				if (!mcast_all) {
2323					sh_eth_tsu_purge_mcast(ndev);
2324					ecmr_bits &= ~ECMR_MCT;
2325					mcast_all = 1;
2326				}
2327			}
2328		}
2329	} else {
2330		/* Normal, unicast/broadcast-only mode. */
2331		ecmr_bits = (ecmr_bits & ~ECMR_PRM) | ECMR_MCT;
2332	}
2333
2334	/* update the ethernet mode */
2335	sh_eth_write(ndev, ecmr_bits, ECMR);
2336
2337	spin_unlock_irqrestore(&mdp->lock, flags);
2338}
2339
2340static int sh_eth_get_vtag_index(struct sh_eth_private *mdp)
2341{
2342	if (!mdp->port)
2343		return TSU_VTAG0;
2344	else
2345		return TSU_VTAG1;
2346}
2347
2348static int sh_eth_vlan_rx_add_vid(struct net_device *ndev,
2349				  __be16 proto, u16 vid)
2350{
2351	struct sh_eth_private *mdp = netdev_priv(ndev);
2352	int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2353
2354	if (unlikely(!mdp->cd->tsu))
2355		return -EPERM;
2356
2357	/* No filtering if vid = 0 */
2358	if (!vid)
2359		return 0;
2360
2361	mdp->vlan_num_ids++;
2362
2363	/*
2364	 * The controller has one VLAN tag HW filter. So, if the filter is
2365	 * already enabled, the driver disables it and the filte
2366	 */
2367	if (mdp->vlan_num_ids > 1) {
2368		/* disable VLAN filter */
2369		sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2370		return 0;
2371	}
2372
2373	sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK),
2374			 vtag_reg_index);
2375
2376	return 0;
2377}
2378
2379static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev,
2380				   __be16 proto, u16 vid)
2381{
2382	struct sh_eth_private *mdp = netdev_priv(ndev);
2383	int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2384
2385	if (unlikely(!mdp->cd->tsu))
2386		return -EPERM;
2387
2388	/* No filtering if vid = 0 */
2389	if (!vid)
2390		return 0;
2391
2392	mdp->vlan_num_ids--;
2393	sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2394
2395	return 0;
2396}
2397
2398/* SuperH's TSU register init function */
2399static void sh_eth_tsu_init(struct sh_eth_private *mdp)
2400{
2401	sh_eth_tsu_write(mdp, 0, TSU_FWEN0);	/* Disable forward(0->1) */
2402	sh_eth_tsu_write(mdp, 0, TSU_FWEN1);	/* Disable forward(1->0) */
2403	sh_eth_tsu_write(mdp, 0, TSU_FCM);	/* forward fifo 3k-3k */
2404	sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
2405	sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
2406	sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
2407	sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
2408	sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
2409	sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
2410	sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
2411	if (sh_eth_is_gether(mdp)) {
2412		sh_eth_tsu_write(mdp, 0, TSU_QTAG0);	/* Disable QTAG(0->1) */
2413		sh_eth_tsu_write(mdp, 0, TSU_QTAG1);	/* Disable QTAG(1->0) */
2414	} else {
2415		sh_eth_tsu_write(mdp, 0, TSU_QTAGM0);	/* Disable QTAG(0->1) */
2416		sh_eth_tsu_write(mdp, 0, TSU_QTAGM1);	/* Disable QTAG(1->0) */
2417	}
2418	sh_eth_tsu_write(mdp, 0, TSU_FWSR);	/* all interrupt status clear */
2419	sh_eth_tsu_write(mdp, 0, TSU_FWINMK);	/* Disable all interrupt */
2420	sh_eth_tsu_write(mdp, 0, TSU_TEN);	/* Disable all CAM entry */
2421	sh_eth_tsu_write(mdp, 0, TSU_POST1);	/* Disable CAM entry [ 0- 7] */
2422	sh_eth_tsu_write(mdp, 0, TSU_POST2);	/* Disable CAM entry [ 8-15] */
2423	sh_eth_tsu_write(mdp, 0, TSU_POST3);	/* Disable CAM entry [16-23] */
2424	sh_eth_tsu_write(mdp, 0, TSU_POST4);	/* Disable CAM entry [24-31] */
2425}
2426
2427/* MDIO bus release function */
2428static int sh_mdio_release(struct net_device *ndev)
2429{
2430	struct mii_bus *bus = dev_get_drvdata(&ndev->dev);
2431
2432	/* unregister mdio bus */
2433	mdiobus_unregister(bus);
2434
2435	/* remove mdio bus info from net_device */
2436	dev_set_drvdata(&ndev->dev, NULL);
2437
2438	/* free bitbang info */
2439	free_mdio_bitbang(bus);
2440
2441	return 0;
2442}
2443
2444/* MDIO bus init function */
2445static int sh_mdio_init(struct net_device *ndev, int id,
2446			struct sh_eth_plat_data *pd)
2447{
2448	int ret, i;
2449	struct bb_info *bitbang;
2450	struct sh_eth_private *mdp = netdev_priv(ndev);
2451
2452	/* create bit control struct for PHY */
2453	bitbang = devm_kzalloc(&ndev->dev, sizeof(struct bb_info),
2454			       GFP_KERNEL);
2455	if (!bitbang) {
2456		ret = -ENOMEM;
2457		goto out;
2458	}
2459
2460	/* bitbang init */
2461	bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
2462	bitbang->set_gate = pd->set_mdio_gate;
2463	bitbang->mdi_msk = PIR_MDI;
2464	bitbang->mdo_msk = PIR_MDO;
2465	bitbang->mmd_msk = PIR_MMD;
2466	bitbang->mdc_msk = PIR_MDC;
2467	bitbang->ctrl.ops = &bb_ops;
2468
2469	/* MII controller setting */
2470	mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
2471	if (!mdp->mii_bus) {
2472		ret = -ENOMEM;
2473		goto out;
2474	}
2475
2476	/* Hook up MII support for ethtool */
2477	mdp->mii_bus->name = "sh_mii";
2478	mdp->mii_bus->parent = &ndev->dev;
2479	snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2480		mdp->pdev->name, id);
2481
2482	/* PHY IRQ */
2483	mdp->mii_bus->irq = devm_kzalloc(&ndev->dev,
2484					 sizeof(int) * PHY_MAX_ADDR,
2485					 GFP_KERNEL);
2486	if (!mdp->mii_bus->irq) {
2487		ret = -ENOMEM;
2488		goto out_free_bus;
2489	}
2490
2491	for (i = 0; i < PHY_MAX_ADDR; i++)
2492		mdp->mii_bus->irq[i] = PHY_POLL;
2493
2494	/* register mdio bus */
2495	ret = mdiobus_register(mdp->mii_bus);
2496	if (ret)
2497		goto out_free_bus;
2498
2499	dev_set_drvdata(&ndev->dev, mdp->mii_bus);
2500
2501	return 0;
2502
2503out_free_bus:
2504	free_mdio_bitbang(mdp->mii_bus);
2505
2506out:
2507	return ret;
2508}
2509
2510static const u16 *sh_eth_get_register_offset(int register_type)
2511{
2512	const u16 *reg_offset = NULL;
2513
2514	switch (register_type) {
2515	case SH_ETH_REG_GIGABIT:
2516		reg_offset = sh_eth_offset_gigabit;
2517		break;
2518	case SH_ETH_REG_FAST_RCAR:
2519		reg_offset = sh_eth_offset_fast_rcar;
2520		break;
2521	case SH_ETH_REG_FAST_SH4:
2522		reg_offset = sh_eth_offset_fast_sh4;
2523		break;
2524	case SH_ETH_REG_FAST_SH3_SH2:
2525		reg_offset = sh_eth_offset_fast_sh3_sh2;
2526		break;
2527	default:
2528		pr_err("Unknown register type (%d)\n", register_type);
2529		break;
2530	}
2531
2532	return reg_offset;
2533}
2534
2535static const struct net_device_ops sh_eth_netdev_ops = {
2536	.ndo_open		= sh_eth_open,
2537	.ndo_stop		= sh_eth_close,
2538	.ndo_start_xmit		= sh_eth_start_xmit,
2539	.ndo_get_stats		= sh_eth_get_stats,
2540	.ndo_tx_timeout		= sh_eth_tx_timeout,
2541	.ndo_do_ioctl		= sh_eth_do_ioctl,
2542	.ndo_validate_addr	= eth_validate_addr,
2543	.ndo_set_mac_address	= eth_mac_addr,
2544	.ndo_change_mtu		= eth_change_mtu,
2545};
2546
2547static const struct net_device_ops sh_eth_netdev_ops_tsu = {
2548	.ndo_open		= sh_eth_open,
2549	.ndo_stop		= sh_eth_close,
2550	.ndo_start_xmit		= sh_eth_start_xmit,
2551	.ndo_get_stats		= sh_eth_get_stats,
2552	.ndo_set_rx_mode	= sh_eth_set_multicast_list,
2553	.ndo_vlan_rx_add_vid	= sh_eth_vlan_rx_add_vid,
2554	.ndo_vlan_rx_kill_vid	= sh_eth_vlan_rx_kill_vid,
2555	.ndo_tx_timeout		= sh_eth_tx_timeout,
2556	.ndo_do_ioctl		= sh_eth_do_ioctl,
2557	.ndo_validate_addr	= eth_validate_addr,
2558	.ndo_set_mac_address	= eth_mac_addr,
2559	.ndo_change_mtu		= eth_change_mtu,
2560};
2561
2562static int sh_eth_drv_probe(struct platform_device *pdev)
2563{
2564	int ret, devno = 0;
2565	struct resource *res;
2566	struct net_device *ndev = NULL;
2567	struct sh_eth_private *mdp = NULL;
2568	struct sh_eth_plat_data *pd = pdev->dev.platform_data;
2569	const struct platform_device_id *id = platform_get_device_id(pdev);
2570
2571	/* get base addr */
2572	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2573	if (unlikely(res == NULL)) {
2574		dev_err(&pdev->dev, "invalid resource\n");
2575		ret = -EINVAL;
2576		goto out;
2577	}
2578
2579	ndev = alloc_etherdev(sizeof(struct sh_eth_private));
2580	if (!ndev) {
2581		ret = -ENOMEM;
2582		goto out;
2583	}
2584
2585	/* The sh Ether-specific entries in the device structure. */
2586	ndev->base_addr = res->start;
2587	devno = pdev->id;
2588	if (devno < 0)
2589		devno = 0;
2590
2591	ndev->dma = -1;
2592	ret = platform_get_irq(pdev, 0);
2593	if (ret < 0) {
2594		ret = -ENODEV;
2595		goto out_release;
2596	}
2597	ndev->irq = ret;
2598
2599	SET_NETDEV_DEV(ndev, &pdev->dev);
2600
2601	/* Fill in the fields of the device structure with ethernet values. */
2602	ether_setup(ndev);
2603
2604	mdp = netdev_priv(ndev);
2605	mdp->num_tx_ring = TX_RING_SIZE;
2606	mdp->num_rx_ring = RX_RING_SIZE;
2607	mdp->addr = devm_ioremap_resource(&pdev->dev, res);
2608	if (IS_ERR(mdp->addr)) {
2609		ret = PTR_ERR(mdp->addr);
2610		goto out_release;
2611	}
2612
2613	spin_lock_init(&mdp->lock);
2614	mdp->pdev = pdev;
2615	pm_runtime_enable(&pdev->dev);
2616	pm_runtime_resume(&pdev->dev);
2617
2618	/* get PHY ID */
2619	mdp->phy_id = pd->phy;
2620	mdp->phy_interface = pd->phy_interface;
2621	/* EDMAC endian */
2622	mdp->edmac_endian = pd->edmac_endian;
2623	mdp->no_ether_link = pd->no_ether_link;
2624	mdp->ether_link_active_low = pd->ether_link_active_low;
2625	mdp->reg_offset = sh_eth_get_register_offset(pd->register_type);
2626
2627	/* set cpu data */
2628	mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
2629	sh_eth_set_default_cpu_data(mdp->cd);
2630
2631	/* set function */
2632	if (mdp->cd->tsu)
2633		ndev->netdev_ops = &sh_eth_netdev_ops_tsu;
2634	else
2635		ndev->netdev_ops = &sh_eth_netdev_ops;
2636	SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops);
2637	ndev->watchdog_timeo = TX_TIMEOUT;
2638
2639	/* debug message level */
2640	mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
2641
2642	/* read and set MAC address */
2643	read_mac_address(ndev, pd->mac_addr);
2644	if (!is_valid_ether_addr(ndev->dev_addr)) {
2645		dev_warn(&pdev->dev,
2646			 "no valid MAC address supplied, using a random one.\n");
2647		eth_hw_addr_random(ndev);
2648	}
2649
2650	/* ioremap the TSU registers */
2651	if (mdp->cd->tsu) {
2652		struct resource *rtsu;
2653		rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2654		mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu);
2655		if (IS_ERR(mdp->tsu_addr)) {
2656			ret = PTR_ERR(mdp->tsu_addr);
2657			goto out_release;
2658		}
2659		mdp->port = devno % 2;
2660		ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
2661	}
2662
2663	/* initialize first or needed device */
2664	if (!devno || pd->needs_init) {
2665		if (mdp->cd->chip_reset)
2666			mdp->cd->chip_reset(ndev);
2667
2668		if (mdp->cd->tsu) {
2669			/* TSU init (Init only)*/
2670			sh_eth_tsu_init(mdp);
2671		}
2672	}
2673
2674	netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64);
2675
2676	/* network device register */
2677	ret = register_netdev(ndev);
2678	if (ret)
2679		goto out_napi_del;
2680
2681	/* mdio bus init */
2682	ret = sh_mdio_init(ndev, pdev->id, pd);
2683	if (ret)
2684		goto out_unregister;
2685
2686	/* print device information */
2687	pr_info("Base address at 0x%x, %pM, IRQ %d.\n",
2688	       (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
2689
2690	platform_set_drvdata(pdev, ndev);
2691
2692	return ret;
2693
2694out_unregister:
2695	unregister_netdev(ndev);
2696
2697out_napi_del:
2698	netif_napi_del(&mdp->napi);
2699
2700out_release:
2701	/* net_dev free */
2702	if (ndev)
2703		free_netdev(ndev);
2704
2705out:
2706	return ret;
2707}
2708
2709static int sh_eth_drv_remove(struct platform_device *pdev)
2710{
2711	struct net_device *ndev = platform_get_drvdata(pdev);
2712	struct sh_eth_private *mdp = netdev_priv(ndev);
2713
2714	sh_mdio_release(ndev);
2715	unregister_netdev(ndev);
2716	netif_napi_del(&mdp->napi);
2717	pm_runtime_disable(&pdev->dev);
2718	free_netdev(ndev);
2719
2720	return 0;
2721}
2722
2723#ifdef CONFIG_PM
2724static int sh_eth_runtime_nop(struct device *dev)
2725{
2726	/*
2727	 * Runtime PM callback shared between ->runtime_suspend()
2728	 * and ->runtime_resume(). Simply returns success.
2729	 *
2730	 * This driver re-initializes all registers after
2731	 * pm_runtime_get_sync() anyway so there is no need
2732	 * to save and restore registers here.
2733	 */
2734	return 0;
2735}
2736
2737static const struct dev_pm_ops sh_eth_dev_pm_ops = {
2738	.runtime_suspend = sh_eth_runtime_nop,
2739	.runtime_resume = sh_eth_runtime_nop,
2740};
2741#define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops)
2742#else
2743#define SH_ETH_PM_OPS NULL
2744#endif
2745
2746static struct platform_device_id sh_eth_id_table[] = {
2747	{ "sh7619-ether", (kernel_ulong_t)&sh7619_data },
2748	{ "sh771x-ether", (kernel_ulong_t)&sh771x_data },
2749	{ "sh7724-ether", (kernel_ulong_t)&sh7724_data },
2750	{ "sh7734-gether", (kernel_ulong_t)&sh7734_data },
2751	{ "sh7757-ether", (kernel_ulong_t)&sh7757_data },
2752	{ "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga },
2753	{ "sh7763-gether", (kernel_ulong_t)&sh7763_data },
2754	{ "r8a7740-gether", (kernel_ulong_t)&r8a7740_data },
2755	{ "r8a777x-ether", (kernel_ulong_t)&r8a777x_data },
2756	{ }
2757};
2758MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
2759
2760static struct platform_driver sh_eth_driver = {
2761	.probe = sh_eth_drv_probe,
2762	.remove = sh_eth_drv_remove,
2763	.id_table = sh_eth_id_table,
2764	.driver = {
2765		   .name = CARDNAME,
2766		   .pm = SH_ETH_PM_OPS,
2767	},
2768};
2769
2770module_platform_driver(sh_eth_driver);
2771
2772MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
2773MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
2774MODULE_LICENSE("GPL v2");
2775