sh_eth.c revision c0013f6f8bbcb7605d591431444780d636dbe223
1/*
2 *  SuperH Ethernet device driver
3 *
4 *  Copyright (C) 2006-2012 Nobuhiro Iwamatsu
5 *  Copyright (C) 2008-2012 Renesas Solutions Corp.
6 *
7 *  This program is free software; you can redistribute it and/or modify it
8 *  under the terms and conditions of the GNU General Public License,
9 *  version 2, as published by the Free Software Foundation.
10 *
11 *  This program is distributed in the hope it will be useful, but WITHOUT
12 *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14 *  more details.
15 *  You should have received a copy of the GNU General Public License along with
16 *  this program; if not, write to the Free Software Foundation, Inc.,
17 *  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 *  The full GNU General Public License is included in this distribution in
20 *  the file called "COPYING".
21 */
22
23#include <linux/init.h>
24#include <linux/module.h>
25#include <linux/kernel.h>
26#include <linux/spinlock.h>
27#include <linux/interrupt.h>
28#include <linux/dma-mapping.h>
29#include <linux/etherdevice.h>
30#include <linux/delay.h>
31#include <linux/platform_device.h>
32#include <linux/mdio-bitbang.h>
33#include <linux/netdevice.h>
34#include <linux/phy.h>
35#include <linux/cache.h>
36#include <linux/io.h>
37#include <linux/pm_runtime.h>
38#include <linux/slab.h>
39#include <linux/ethtool.h>
40#include <linux/if_vlan.h>
41#include <linux/clk.h>
42#include <linux/sh_eth.h>
43
44#include "sh_eth.h"
45
46#define SH_ETH_DEF_MSG_ENABLE \
47		(NETIF_MSG_LINK	| \
48		NETIF_MSG_TIMER	| \
49		NETIF_MSG_RX_ERR| \
50		NETIF_MSG_TX_ERR)
51
52static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
53	[EDSR]		= 0x0000,
54	[EDMR]		= 0x0400,
55	[EDTRR]		= 0x0408,
56	[EDRRR]		= 0x0410,
57	[EESR]		= 0x0428,
58	[EESIPR]	= 0x0430,
59	[TDLAR]		= 0x0010,
60	[TDFAR]		= 0x0014,
61	[TDFXR]		= 0x0018,
62	[TDFFR]		= 0x001c,
63	[RDLAR]		= 0x0030,
64	[RDFAR]		= 0x0034,
65	[RDFXR]		= 0x0038,
66	[RDFFR]		= 0x003c,
67	[TRSCER]	= 0x0438,
68	[RMFCR]		= 0x0440,
69	[TFTR]		= 0x0448,
70	[FDR]		= 0x0450,
71	[RMCR]		= 0x0458,
72	[RPADIR]	= 0x0460,
73	[FCFTR]		= 0x0468,
74	[CSMR]		= 0x04E4,
75
76	[ECMR]		= 0x0500,
77	[ECSR]		= 0x0510,
78	[ECSIPR]	= 0x0518,
79	[PIR]		= 0x0520,
80	[PSR]		= 0x0528,
81	[PIPR]		= 0x052c,
82	[RFLR]		= 0x0508,
83	[APR]		= 0x0554,
84	[MPR]		= 0x0558,
85	[PFTCR]		= 0x055c,
86	[PFRCR]		= 0x0560,
87	[TPAUSER]	= 0x0564,
88	[GECMR]		= 0x05b0,
89	[BCULR]		= 0x05b4,
90	[MAHR]		= 0x05c0,
91	[MALR]		= 0x05c8,
92	[TROCR]		= 0x0700,
93	[CDCR]		= 0x0708,
94	[LCCR]		= 0x0710,
95	[CEFCR]		= 0x0740,
96	[FRECR]		= 0x0748,
97	[TSFRCR]	= 0x0750,
98	[TLFRCR]	= 0x0758,
99	[RFCR]		= 0x0760,
100	[CERCR]		= 0x0768,
101	[CEECR]		= 0x0770,
102	[MAFCR]		= 0x0778,
103	[RMII_MII]	= 0x0790,
104
105	[ARSTR]		= 0x0000,
106	[TSU_CTRST]	= 0x0004,
107	[TSU_FWEN0]	= 0x0010,
108	[TSU_FWEN1]	= 0x0014,
109	[TSU_FCM]	= 0x0018,
110	[TSU_BSYSL0]	= 0x0020,
111	[TSU_BSYSL1]	= 0x0024,
112	[TSU_PRISL0]	= 0x0028,
113	[TSU_PRISL1]	= 0x002c,
114	[TSU_FWSL0]	= 0x0030,
115	[TSU_FWSL1]	= 0x0034,
116	[TSU_FWSLC]	= 0x0038,
117	[TSU_QTAG0]	= 0x0040,
118	[TSU_QTAG1]	= 0x0044,
119	[TSU_FWSR]	= 0x0050,
120	[TSU_FWINMK]	= 0x0054,
121	[TSU_ADQT0]	= 0x0048,
122	[TSU_ADQT1]	= 0x004c,
123	[TSU_VTAG0]	= 0x0058,
124	[TSU_VTAG1]	= 0x005c,
125	[TSU_ADSBSY]	= 0x0060,
126	[TSU_TEN]	= 0x0064,
127	[TSU_POST1]	= 0x0070,
128	[TSU_POST2]	= 0x0074,
129	[TSU_POST3]	= 0x0078,
130	[TSU_POST4]	= 0x007c,
131	[TSU_ADRH0]	= 0x0100,
132	[TSU_ADRL0]	= 0x0104,
133	[TSU_ADRH31]	= 0x01f8,
134	[TSU_ADRL31]	= 0x01fc,
135
136	[TXNLCR0]	= 0x0080,
137	[TXALCR0]	= 0x0084,
138	[RXNLCR0]	= 0x0088,
139	[RXALCR0]	= 0x008c,
140	[FWNLCR0]	= 0x0090,
141	[FWALCR0]	= 0x0094,
142	[TXNLCR1]	= 0x00a0,
143	[TXALCR1]	= 0x00a0,
144	[RXNLCR1]	= 0x00a8,
145	[RXALCR1]	= 0x00ac,
146	[FWNLCR1]	= 0x00b0,
147	[FWALCR1]	= 0x00b4,
148};
149
150static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
151	[ECMR]		= 0x0100,
152	[RFLR]		= 0x0108,
153	[ECSR]		= 0x0110,
154	[ECSIPR]	= 0x0118,
155	[PIR]		= 0x0120,
156	[PSR]		= 0x0128,
157	[RDMLR]		= 0x0140,
158	[IPGR]		= 0x0150,
159	[APR]		= 0x0154,
160	[MPR]		= 0x0158,
161	[TPAUSER]	= 0x0164,
162	[RFCF]		= 0x0160,
163	[TPAUSECR]	= 0x0168,
164	[BCFRR]		= 0x016c,
165	[MAHR]		= 0x01c0,
166	[MALR]		= 0x01c8,
167	[TROCR]		= 0x01d0,
168	[CDCR]		= 0x01d4,
169	[LCCR]		= 0x01d8,
170	[CNDCR]		= 0x01dc,
171	[CEFCR]		= 0x01e4,
172	[FRECR]		= 0x01e8,
173	[TSFRCR]	= 0x01ec,
174	[TLFRCR]	= 0x01f0,
175	[RFCR]		= 0x01f4,
176	[MAFCR]		= 0x01f8,
177	[RTRATE]	= 0x01fc,
178
179	[EDMR]		= 0x0000,
180	[EDTRR]		= 0x0008,
181	[EDRRR]		= 0x0010,
182	[TDLAR]		= 0x0018,
183	[RDLAR]		= 0x0020,
184	[EESR]		= 0x0028,
185	[EESIPR]	= 0x0030,
186	[TRSCER]	= 0x0038,
187	[RMFCR]		= 0x0040,
188	[TFTR]		= 0x0048,
189	[FDR]		= 0x0050,
190	[RMCR]		= 0x0058,
191	[TFUCR]		= 0x0064,
192	[RFOCR]		= 0x0068,
193	[FCFTR]		= 0x0070,
194	[RPADIR]	= 0x0078,
195	[TRIMD]		= 0x007c,
196	[RBWAR]		= 0x00c8,
197	[RDFAR]		= 0x00cc,
198	[TBRAR]		= 0x00d4,
199	[TDFAR]		= 0x00d8,
200};
201
202static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
203	[ECMR]		= 0x0160,
204	[ECSR]		= 0x0164,
205	[ECSIPR]	= 0x0168,
206	[PIR]		= 0x016c,
207	[MAHR]		= 0x0170,
208	[MALR]		= 0x0174,
209	[RFLR]		= 0x0178,
210	[PSR]		= 0x017c,
211	[TROCR]		= 0x0180,
212	[CDCR]		= 0x0184,
213	[LCCR]		= 0x0188,
214	[CNDCR]		= 0x018c,
215	[CEFCR]		= 0x0194,
216	[FRECR]		= 0x0198,
217	[TSFRCR]	= 0x019c,
218	[TLFRCR]	= 0x01a0,
219	[RFCR]		= 0x01a4,
220	[MAFCR]		= 0x01a8,
221	[IPGR]		= 0x01b4,
222	[APR]		= 0x01b8,
223	[MPR]		= 0x01bc,
224	[TPAUSER]	= 0x01c4,
225	[BCFR]		= 0x01cc,
226
227	[ARSTR]		= 0x0000,
228	[TSU_CTRST]	= 0x0004,
229	[TSU_FWEN0]	= 0x0010,
230	[TSU_FWEN1]	= 0x0014,
231	[TSU_FCM]	= 0x0018,
232	[TSU_BSYSL0]	= 0x0020,
233	[TSU_BSYSL1]	= 0x0024,
234	[TSU_PRISL0]	= 0x0028,
235	[TSU_PRISL1]	= 0x002c,
236	[TSU_FWSL0]	= 0x0030,
237	[TSU_FWSL1]	= 0x0034,
238	[TSU_FWSLC]	= 0x0038,
239	[TSU_QTAGM0]	= 0x0040,
240	[TSU_QTAGM1]	= 0x0044,
241	[TSU_ADQT0]	= 0x0048,
242	[TSU_ADQT1]	= 0x004c,
243	[TSU_FWSR]	= 0x0050,
244	[TSU_FWINMK]	= 0x0054,
245	[TSU_ADSBSY]	= 0x0060,
246	[TSU_TEN]	= 0x0064,
247	[TSU_POST1]	= 0x0070,
248	[TSU_POST2]	= 0x0074,
249	[TSU_POST3]	= 0x0078,
250	[TSU_POST4]	= 0x007c,
251
252	[TXNLCR0]	= 0x0080,
253	[TXALCR0]	= 0x0084,
254	[RXNLCR0]	= 0x0088,
255	[RXALCR0]	= 0x008c,
256	[FWNLCR0]	= 0x0090,
257	[FWALCR0]	= 0x0094,
258	[TXNLCR1]	= 0x00a0,
259	[TXALCR1]	= 0x00a0,
260	[RXNLCR1]	= 0x00a8,
261	[RXALCR1]	= 0x00ac,
262	[FWNLCR1]	= 0x00b0,
263	[FWALCR1]	= 0x00b4,
264
265	[TSU_ADRH0]	= 0x0100,
266	[TSU_ADRL0]	= 0x0104,
267	[TSU_ADRL31]	= 0x01fc,
268};
269
270#if defined(CONFIG_CPU_SUBTYPE_SH7734) || \
271	defined(CONFIG_CPU_SUBTYPE_SH7763) || \
272	defined(CONFIG_ARCH_R8A7740)
273static void sh_eth_select_mii(struct net_device *ndev)
274{
275	u32 value = 0x0;
276	struct sh_eth_private *mdp = netdev_priv(ndev);
277
278	switch (mdp->phy_interface) {
279	case PHY_INTERFACE_MODE_GMII:
280		value = 0x2;
281		break;
282	case PHY_INTERFACE_MODE_MII:
283		value = 0x1;
284		break;
285	case PHY_INTERFACE_MODE_RMII:
286		value = 0x0;
287		break;
288	default:
289		pr_warn("PHY interface mode was not setup. Set to MII.\n");
290		value = 0x1;
291		break;
292	}
293
294	sh_eth_write(ndev, value, RMII_MII);
295}
296#endif
297
298/* There is CPU dependent code */
299#if defined(CONFIG_CPU_SUBTYPE_SH7724) || defined(CONFIG_ARCH_R8A7779)
300#define SH_ETH_RESET_DEFAULT	1
301static void sh_eth_set_duplex(struct net_device *ndev)
302{
303	struct sh_eth_private *mdp = netdev_priv(ndev);
304
305	if (mdp->duplex) /* Full */
306		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
307	else		/* Half */
308		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
309}
310
311static void sh_eth_set_rate(struct net_device *ndev)
312{
313	struct sh_eth_private *mdp = netdev_priv(ndev);
314	unsigned int bits = ECMR_RTM;
315
316#if defined(CONFIG_ARCH_R8A7779)
317	bits |= ECMR_ELB;
318#endif
319
320	switch (mdp->speed) {
321	case 10: /* 10BASE */
322		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~bits, ECMR);
323		break;
324	case 100:/* 100BASE */
325		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | bits, ECMR);
326		break;
327	default:
328		break;
329	}
330}
331
332/* SH7724 */
333static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
334	.set_duplex	= sh_eth_set_duplex,
335	.set_rate	= sh_eth_set_rate,
336
337	.ecsr_value	= ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
338	.ecsipr_value	= ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
339	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f,
340
341	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
342	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
343			  EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
344	.tx_error_check	= EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
345
346	.apr		= 1,
347	.mpr		= 1,
348	.tpauser	= 1,
349	.hw_swap	= 1,
350	.rpadir		= 1,
351	.rpadir_value	= 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
352};
353#elif defined(CONFIG_CPU_SUBTYPE_SH7757)
354#define SH_ETH_HAS_BOTH_MODULES	1
355#define SH_ETH_HAS_TSU	1
356static int sh_eth_check_reset(struct net_device *ndev);
357
358static void sh_eth_set_duplex(struct net_device *ndev)
359{
360	struct sh_eth_private *mdp = netdev_priv(ndev);
361
362	if (mdp->duplex) /* Full */
363		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
364	else		/* Half */
365		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
366}
367
368static void sh_eth_set_rate(struct net_device *ndev)
369{
370	struct sh_eth_private *mdp = netdev_priv(ndev);
371
372	switch (mdp->speed) {
373	case 10: /* 10BASE */
374		sh_eth_write(ndev, 0, RTRATE);
375		break;
376	case 100:/* 100BASE */
377		sh_eth_write(ndev, 1, RTRATE);
378		break;
379	default:
380		break;
381	}
382}
383
384/* SH7757 */
385static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
386	.set_duplex		= sh_eth_set_duplex,
387	.set_rate		= sh_eth_set_rate,
388
389	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
390	.rmcr_value	= 0x00000001,
391
392	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
393	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
394			  EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
395	.tx_error_check	= EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
396
397	.apr		= 1,
398	.mpr		= 1,
399	.tpauser	= 1,
400	.hw_swap	= 1,
401	.no_ade		= 1,
402	.rpadir		= 1,
403	.rpadir_value   = 2 << 16,
404};
405
406#define SH_GIGA_ETH_BASE	0xfee00000
407#define GIGA_MALR(port)		(SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
408#define GIGA_MAHR(port)		(SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
409static void sh_eth_chip_reset_giga(struct net_device *ndev)
410{
411	int i;
412	unsigned long mahr[2], malr[2];
413
414	/* save MAHR and MALR */
415	for (i = 0; i < 2; i++) {
416		malr[i] = ioread32((void *)GIGA_MALR(i));
417		mahr[i] = ioread32((void *)GIGA_MAHR(i));
418	}
419
420	/* reset device */
421	iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800));
422	mdelay(1);
423
424	/* restore MAHR and MALR */
425	for (i = 0; i < 2; i++) {
426		iowrite32(malr[i], (void *)GIGA_MALR(i));
427		iowrite32(mahr[i], (void *)GIGA_MAHR(i));
428	}
429}
430
431static int sh_eth_is_gether(struct sh_eth_private *mdp);
432static int sh_eth_reset(struct net_device *ndev)
433{
434	struct sh_eth_private *mdp = netdev_priv(ndev);
435	int ret = 0;
436
437	if (sh_eth_is_gether(mdp)) {
438		sh_eth_write(ndev, 0x03, EDSR);
439		sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
440				EDMR);
441
442		ret = sh_eth_check_reset(ndev);
443		if (ret)
444			goto out;
445
446		/* Table Init */
447		sh_eth_write(ndev, 0x0, TDLAR);
448		sh_eth_write(ndev, 0x0, TDFAR);
449		sh_eth_write(ndev, 0x0, TDFXR);
450		sh_eth_write(ndev, 0x0, TDFFR);
451		sh_eth_write(ndev, 0x0, RDLAR);
452		sh_eth_write(ndev, 0x0, RDFAR);
453		sh_eth_write(ndev, 0x0, RDFXR);
454		sh_eth_write(ndev, 0x0, RDFFR);
455	} else {
456		sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER,
457				EDMR);
458		mdelay(3);
459		sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
460				EDMR);
461	}
462
463out:
464	return ret;
465}
466
467static void sh_eth_set_duplex_giga(struct net_device *ndev)
468{
469	struct sh_eth_private *mdp = netdev_priv(ndev);
470
471	if (mdp->duplex) /* Full */
472		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
473	else		/* Half */
474		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
475}
476
477static void sh_eth_set_rate_giga(struct net_device *ndev)
478{
479	struct sh_eth_private *mdp = netdev_priv(ndev);
480
481	switch (mdp->speed) {
482	case 10: /* 10BASE */
483		sh_eth_write(ndev, 0x00000000, GECMR);
484		break;
485	case 100:/* 100BASE */
486		sh_eth_write(ndev, 0x00000010, GECMR);
487		break;
488	case 1000: /* 1000BASE */
489		sh_eth_write(ndev, 0x00000020, GECMR);
490		break;
491	default:
492		break;
493	}
494}
495
496/* SH7757(GETHERC) */
497static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = {
498	.chip_reset	= sh_eth_chip_reset_giga,
499	.set_duplex	= sh_eth_set_duplex_giga,
500	.set_rate	= sh_eth_set_rate_giga,
501
502	.ecsr_value	= ECSR_ICD | ECSR_MPD,
503	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
504	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
505
506	.tx_check	= EESR_TC1 | EESR_FTC,
507	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
508			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
509			  EESR_ECI,
510	.tx_error_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
511			  EESR_TFE,
512	.fdr_value	= 0x0000072f,
513	.rmcr_value	= 0x00000001,
514
515	.apr		= 1,
516	.mpr		= 1,
517	.tpauser	= 1,
518	.bculr		= 1,
519	.hw_swap	= 1,
520	.rpadir		= 1,
521	.rpadir_value   = 2 << 16,
522	.no_trimd	= 1,
523	.no_ade		= 1,
524	.tsu		= 1,
525};
526
527static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp)
528{
529	if (sh_eth_is_gether(mdp))
530		return &sh_eth_my_cpu_data_giga;
531	else
532		return &sh_eth_my_cpu_data;
533}
534
535#elif defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763)
536#define SH_ETH_HAS_TSU	1
537static int sh_eth_check_reset(struct net_device *ndev);
538static void sh_eth_reset_hw_crc(struct net_device *ndev);
539
540static void sh_eth_chip_reset(struct net_device *ndev)
541{
542	struct sh_eth_private *mdp = netdev_priv(ndev);
543
544	/* reset device */
545	sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
546	mdelay(1);
547}
548
549static void sh_eth_set_duplex(struct net_device *ndev)
550{
551	struct sh_eth_private *mdp = netdev_priv(ndev);
552
553	if (mdp->duplex) /* Full */
554		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
555	else		/* Half */
556		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
557}
558
559static void sh_eth_set_rate(struct net_device *ndev)
560{
561	struct sh_eth_private *mdp = netdev_priv(ndev);
562
563	switch (mdp->speed) {
564	case 10: /* 10BASE */
565		sh_eth_write(ndev, GECMR_10, GECMR);
566		break;
567	case 100:/* 100BASE */
568		sh_eth_write(ndev, GECMR_100, GECMR);
569		break;
570	case 1000: /* 1000BASE */
571		sh_eth_write(ndev, GECMR_1000, GECMR);
572		break;
573	default:
574		break;
575	}
576}
577
578/* sh7763 */
579static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
580	.chip_reset	= sh_eth_chip_reset,
581	.set_duplex	= sh_eth_set_duplex,
582	.set_rate	= sh_eth_set_rate,
583
584	.ecsr_value	= ECSR_ICD | ECSR_MPD,
585	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
586	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
587
588	.tx_check	= EESR_TC1 | EESR_FTC,
589	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
590			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
591			  EESR_ECI,
592	.tx_error_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
593			  EESR_TFE,
594
595	.apr		= 1,
596	.mpr		= 1,
597	.tpauser	= 1,
598	.bculr		= 1,
599	.hw_swap	= 1,
600	.no_trimd	= 1,
601	.no_ade		= 1,
602	.tsu		= 1,
603#if defined(CONFIG_CPU_SUBTYPE_SH7734)
604	.hw_crc     = 1,
605	.select_mii = 1,
606#endif
607};
608
609static int sh_eth_reset(struct net_device *ndev)
610{
611	int ret = 0;
612
613	sh_eth_write(ndev, EDSR_ENALL, EDSR);
614	sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
615
616	ret = sh_eth_check_reset(ndev);
617	if (ret)
618		goto out;
619
620	/* Table Init */
621	sh_eth_write(ndev, 0x0, TDLAR);
622	sh_eth_write(ndev, 0x0, TDFAR);
623	sh_eth_write(ndev, 0x0, TDFXR);
624	sh_eth_write(ndev, 0x0, TDFFR);
625	sh_eth_write(ndev, 0x0, RDLAR);
626	sh_eth_write(ndev, 0x0, RDFAR);
627	sh_eth_write(ndev, 0x0, RDFXR);
628	sh_eth_write(ndev, 0x0, RDFFR);
629
630	/* Reset HW CRC register */
631	sh_eth_reset_hw_crc(ndev);
632
633	/* Select MII mode */
634	if (sh_eth_my_cpu_data.select_mii)
635		sh_eth_select_mii(ndev);
636out:
637	return ret;
638}
639
640static void sh_eth_reset_hw_crc(struct net_device *ndev)
641{
642	if (sh_eth_my_cpu_data.hw_crc)
643		sh_eth_write(ndev, 0x0, CSMR);
644}
645
646#elif defined(CONFIG_ARCH_R8A7740)
647#define SH_ETH_HAS_TSU	1
648static int sh_eth_check_reset(struct net_device *ndev);
649
650static void sh_eth_chip_reset(struct net_device *ndev)
651{
652	struct sh_eth_private *mdp = netdev_priv(ndev);
653
654	/* reset device */
655	sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
656	mdelay(1);
657
658	sh_eth_select_mii(ndev);
659}
660
661static int sh_eth_reset(struct net_device *ndev)
662{
663	int ret = 0;
664
665	sh_eth_write(ndev, EDSR_ENALL, EDSR);
666	sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
667
668	ret = sh_eth_check_reset(ndev);
669	if (ret)
670		goto out;
671
672	/* Table Init */
673	sh_eth_write(ndev, 0x0, TDLAR);
674	sh_eth_write(ndev, 0x0, TDFAR);
675	sh_eth_write(ndev, 0x0, TDFXR);
676	sh_eth_write(ndev, 0x0, TDFFR);
677	sh_eth_write(ndev, 0x0, RDLAR);
678	sh_eth_write(ndev, 0x0, RDFAR);
679	sh_eth_write(ndev, 0x0, RDFXR);
680	sh_eth_write(ndev, 0x0, RDFFR);
681
682out:
683	return ret;
684}
685
686static void sh_eth_set_duplex(struct net_device *ndev)
687{
688	struct sh_eth_private *mdp = netdev_priv(ndev);
689
690	if (mdp->duplex) /* Full */
691		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
692	else		/* Half */
693		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
694}
695
696static void sh_eth_set_rate(struct net_device *ndev)
697{
698	struct sh_eth_private *mdp = netdev_priv(ndev);
699
700	switch (mdp->speed) {
701	case 10: /* 10BASE */
702		sh_eth_write(ndev, GECMR_10, GECMR);
703		break;
704	case 100:/* 100BASE */
705		sh_eth_write(ndev, GECMR_100, GECMR);
706		break;
707	case 1000: /* 1000BASE */
708		sh_eth_write(ndev, GECMR_1000, GECMR);
709		break;
710	default:
711		break;
712	}
713}
714
715/* R8A7740 */
716static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
717	.chip_reset	= sh_eth_chip_reset,
718	.set_duplex	= sh_eth_set_duplex,
719	.set_rate	= sh_eth_set_rate,
720
721	.ecsr_value	= ECSR_ICD | ECSR_MPD,
722	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
723	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
724
725	.tx_check	= EESR_TC1 | EESR_FTC,
726	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
727			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
728			  EESR_ECI,
729	.tx_error_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
730			  EESR_TFE,
731
732	.apr		= 1,
733	.mpr		= 1,
734	.tpauser	= 1,
735	.bculr		= 1,
736	.hw_swap	= 1,
737	.no_trimd	= 1,
738	.no_ade		= 1,
739	.tsu		= 1,
740	.select_mii	= 1,
741};
742
743#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
744#define SH_ETH_RESET_DEFAULT	1
745static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
746	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
747
748	.apr		= 1,
749	.mpr		= 1,
750	.tpauser	= 1,
751	.hw_swap	= 1,
752};
753#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
754#define SH_ETH_RESET_DEFAULT	1
755#define SH_ETH_HAS_TSU	1
756static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
757	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
758	.tsu		= 1,
759};
760#endif
761
762static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
763{
764	if (!cd->ecsr_value)
765		cd->ecsr_value = DEFAULT_ECSR_INIT;
766
767	if (!cd->ecsipr_value)
768		cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
769
770	if (!cd->fcftr_value)
771		cd->fcftr_value = DEFAULT_FIFO_F_D_RFF | \
772				  DEFAULT_FIFO_F_D_RFD;
773
774	if (!cd->fdr_value)
775		cd->fdr_value = DEFAULT_FDR_INIT;
776
777	if (!cd->rmcr_value)
778		cd->rmcr_value = DEFAULT_RMCR_VALUE;
779
780	if (!cd->tx_check)
781		cd->tx_check = DEFAULT_TX_CHECK;
782
783	if (!cd->eesr_err_check)
784		cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
785
786	if (!cd->tx_error_check)
787		cd->tx_error_check = DEFAULT_TX_ERROR_CHECK;
788}
789
790#if defined(SH_ETH_RESET_DEFAULT)
791/* Chip Reset */
792static int  sh_eth_reset(struct net_device *ndev)
793{
794	sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, EDMR);
795	mdelay(3);
796	sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, EDMR);
797
798	return 0;
799}
800#else
801static int sh_eth_check_reset(struct net_device *ndev)
802{
803	int ret = 0;
804	int cnt = 100;
805
806	while (cnt > 0) {
807		if (!(sh_eth_read(ndev, EDMR) & 0x3))
808			break;
809		mdelay(1);
810		cnt--;
811	}
812	if (cnt < 0) {
813		pr_err("Device reset fail\n");
814		ret = -ETIMEDOUT;
815	}
816	return ret;
817}
818#endif
819
820#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
821static void sh_eth_set_receive_align(struct sk_buff *skb)
822{
823	int reserve;
824
825	reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1));
826	if (reserve)
827		skb_reserve(skb, reserve);
828}
829#else
830static void sh_eth_set_receive_align(struct sk_buff *skb)
831{
832	skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN);
833}
834#endif
835
836
837/* CPU <-> EDMAC endian convert */
838static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x)
839{
840	switch (mdp->edmac_endian) {
841	case EDMAC_LITTLE_ENDIAN:
842		return cpu_to_le32(x);
843	case EDMAC_BIG_ENDIAN:
844		return cpu_to_be32(x);
845	}
846	return x;
847}
848
849static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
850{
851	switch (mdp->edmac_endian) {
852	case EDMAC_LITTLE_ENDIAN:
853		return le32_to_cpu(x);
854	case EDMAC_BIG_ENDIAN:
855		return be32_to_cpu(x);
856	}
857	return x;
858}
859
860/*
861 * Program the hardware MAC address from dev->dev_addr.
862 */
863static void update_mac_address(struct net_device *ndev)
864{
865	sh_eth_write(ndev,
866		(ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
867		(ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
868	sh_eth_write(ndev,
869		(ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
870}
871
872/*
873 * Get MAC address from SuperH MAC address register
874 *
875 * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
876 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
877 * When you want use this device, you must set MAC address in bootloader.
878 *
879 */
880static void read_mac_address(struct net_device *ndev, unsigned char *mac)
881{
882	if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
883		memcpy(ndev->dev_addr, mac, 6);
884	} else {
885		ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24);
886		ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF;
887		ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF;
888		ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF);
889		ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF;
890		ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF);
891	}
892}
893
894static int sh_eth_is_gether(struct sh_eth_private *mdp)
895{
896	if (mdp->reg_offset == sh_eth_offset_gigabit)
897		return 1;
898	else
899		return 0;
900}
901
902static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
903{
904	if (sh_eth_is_gether(mdp))
905		return EDTRR_TRNS_GETHER;
906	else
907		return EDTRR_TRNS_ETHER;
908}
909
910struct bb_info {
911	void (*set_gate)(void *addr);
912	struct mdiobb_ctrl ctrl;
913	void *addr;
914	u32 mmd_msk;/* MMD */
915	u32 mdo_msk;
916	u32 mdi_msk;
917	u32 mdc_msk;
918};
919
920/* PHY bit set */
921static void bb_set(void *addr, u32 msk)
922{
923	iowrite32(ioread32(addr) | msk, addr);
924}
925
926/* PHY bit clear */
927static void bb_clr(void *addr, u32 msk)
928{
929	iowrite32((ioread32(addr) & ~msk), addr);
930}
931
932/* PHY bit read */
933static int bb_read(void *addr, u32 msk)
934{
935	return (ioread32(addr) & msk) != 0;
936}
937
938/* Data I/O pin control */
939static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
940{
941	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
942
943	if (bitbang->set_gate)
944		bitbang->set_gate(bitbang->addr);
945
946	if (bit)
947		bb_set(bitbang->addr, bitbang->mmd_msk);
948	else
949		bb_clr(bitbang->addr, bitbang->mmd_msk);
950}
951
952/* Set bit data*/
953static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
954{
955	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
956
957	if (bitbang->set_gate)
958		bitbang->set_gate(bitbang->addr);
959
960	if (bit)
961		bb_set(bitbang->addr, bitbang->mdo_msk);
962	else
963		bb_clr(bitbang->addr, bitbang->mdo_msk);
964}
965
966/* Get bit data*/
967static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
968{
969	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
970
971	if (bitbang->set_gate)
972		bitbang->set_gate(bitbang->addr);
973
974	return bb_read(bitbang->addr, bitbang->mdi_msk);
975}
976
977/* MDC pin control */
978static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
979{
980	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
981
982	if (bitbang->set_gate)
983		bitbang->set_gate(bitbang->addr);
984
985	if (bit)
986		bb_set(bitbang->addr, bitbang->mdc_msk);
987	else
988		bb_clr(bitbang->addr, bitbang->mdc_msk);
989}
990
991/* mdio bus control struct */
992static struct mdiobb_ops bb_ops = {
993	.owner = THIS_MODULE,
994	.set_mdc = sh_mdc_ctrl,
995	.set_mdio_dir = sh_mmd_ctrl,
996	.set_mdio_data = sh_set_mdio,
997	.get_mdio_data = sh_get_mdio,
998};
999
1000/* free skb and descriptor buffer */
1001static void sh_eth_ring_free(struct net_device *ndev)
1002{
1003	struct sh_eth_private *mdp = netdev_priv(ndev);
1004	int i;
1005
1006	/* Free Rx skb ringbuffer */
1007	if (mdp->rx_skbuff) {
1008		for (i = 0; i < mdp->num_rx_ring; i++) {
1009			if (mdp->rx_skbuff[i])
1010				dev_kfree_skb(mdp->rx_skbuff[i]);
1011		}
1012	}
1013	kfree(mdp->rx_skbuff);
1014	mdp->rx_skbuff = NULL;
1015
1016	/* Free Tx skb ringbuffer */
1017	if (mdp->tx_skbuff) {
1018		for (i = 0; i < mdp->num_tx_ring; i++) {
1019			if (mdp->tx_skbuff[i])
1020				dev_kfree_skb(mdp->tx_skbuff[i]);
1021		}
1022	}
1023	kfree(mdp->tx_skbuff);
1024	mdp->tx_skbuff = NULL;
1025}
1026
1027/* format skb and descriptor buffer */
1028static void sh_eth_ring_format(struct net_device *ndev)
1029{
1030	struct sh_eth_private *mdp = netdev_priv(ndev);
1031	int i;
1032	struct sk_buff *skb;
1033	struct sh_eth_rxdesc *rxdesc = NULL;
1034	struct sh_eth_txdesc *txdesc = NULL;
1035	int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
1036	int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
1037
1038	mdp->cur_rx = mdp->cur_tx = 0;
1039	mdp->dirty_rx = mdp->dirty_tx = 0;
1040
1041	memset(mdp->rx_ring, 0, rx_ringsize);
1042
1043	/* build Rx ring buffer */
1044	for (i = 0; i < mdp->num_rx_ring; i++) {
1045		/* skb */
1046		mdp->rx_skbuff[i] = NULL;
1047		skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
1048		mdp->rx_skbuff[i] = skb;
1049		if (skb == NULL)
1050			break;
1051		dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
1052				DMA_FROM_DEVICE);
1053		sh_eth_set_receive_align(skb);
1054
1055		/* RX descriptor */
1056		rxdesc = &mdp->rx_ring[i];
1057		rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
1058		rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
1059
1060		/* The size of the buffer is 16 byte boundary. */
1061		rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1062		/* Rx descriptor address set */
1063		if (i == 0) {
1064			sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
1065			if (sh_eth_is_gether(mdp))
1066				sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
1067		}
1068	}
1069
1070	mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
1071
1072	/* Mark the last entry as wrapping the ring. */
1073	rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
1074
1075	memset(mdp->tx_ring, 0, tx_ringsize);
1076
1077	/* build Tx ring buffer */
1078	for (i = 0; i < mdp->num_tx_ring; i++) {
1079		mdp->tx_skbuff[i] = NULL;
1080		txdesc = &mdp->tx_ring[i];
1081		txdesc->status = cpu_to_edmac(mdp, TD_TFP);
1082		txdesc->buffer_length = 0;
1083		if (i == 0) {
1084			/* Tx descriptor address set */
1085			sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
1086			if (sh_eth_is_gether(mdp))
1087				sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
1088		}
1089	}
1090
1091	txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
1092}
1093
1094/* Get skb and descriptor buffer */
1095static int sh_eth_ring_init(struct net_device *ndev)
1096{
1097	struct sh_eth_private *mdp = netdev_priv(ndev);
1098	int rx_ringsize, tx_ringsize, ret = 0;
1099
1100	/*
1101	 * +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
1102	 * card needs room to do 8 byte alignment, +2 so we can reserve
1103	 * the first 2 bytes, and +16 gets room for the status word from the
1104	 * card.
1105	 */
1106	mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
1107			  (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
1108	if (mdp->cd->rpadir)
1109		mdp->rx_buf_sz += NET_IP_ALIGN;
1110
1111	/* Allocate RX and TX skb rings */
1112	mdp->rx_skbuff = kmalloc_array(mdp->num_rx_ring,
1113				       sizeof(*mdp->rx_skbuff), GFP_KERNEL);
1114	if (!mdp->rx_skbuff) {
1115		ret = -ENOMEM;
1116		return ret;
1117	}
1118
1119	mdp->tx_skbuff = kmalloc_array(mdp->num_tx_ring,
1120				       sizeof(*mdp->tx_skbuff), GFP_KERNEL);
1121	if (!mdp->tx_skbuff) {
1122		ret = -ENOMEM;
1123		goto skb_ring_free;
1124	}
1125
1126	/* Allocate all Rx descriptors. */
1127	rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1128	mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
1129					  GFP_KERNEL);
1130	if (!mdp->rx_ring) {
1131		ret = -ENOMEM;
1132		goto desc_ring_free;
1133	}
1134
1135	mdp->dirty_rx = 0;
1136
1137	/* Allocate all Tx descriptors. */
1138	tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1139	mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
1140					  GFP_KERNEL);
1141	if (!mdp->tx_ring) {
1142		ret = -ENOMEM;
1143		goto desc_ring_free;
1144	}
1145	return ret;
1146
1147desc_ring_free:
1148	/* free DMA buffer */
1149	dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma);
1150
1151skb_ring_free:
1152	/* Free Rx and Tx skb ring buffer */
1153	sh_eth_ring_free(ndev);
1154	mdp->tx_ring = NULL;
1155	mdp->rx_ring = NULL;
1156
1157	return ret;
1158}
1159
1160static void sh_eth_free_dma_buffer(struct sh_eth_private *mdp)
1161{
1162	int ringsize;
1163
1164	if (mdp->rx_ring) {
1165		ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1166		dma_free_coherent(NULL, ringsize, mdp->rx_ring,
1167				  mdp->rx_desc_dma);
1168		mdp->rx_ring = NULL;
1169	}
1170
1171	if (mdp->tx_ring) {
1172		ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1173		dma_free_coherent(NULL, ringsize, mdp->tx_ring,
1174				  mdp->tx_desc_dma);
1175		mdp->tx_ring = NULL;
1176	}
1177}
1178
1179static int sh_eth_dev_init(struct net_device *ndev, bool start)
1180{
1181	int ret = 0;
1182	struct sh_eth_private *mdp = netdev_priv(ndev);
1183	u32 val;
1184
1185	/* Soft Reset */
1186	ret = sh_eth_reset(ndev);
1187	if (ret)
1188		goto out;
1189
1190	/* Descriptor format */
1191	sh_eth_ring_format(ndev);
1192	if (mdp->cd->rpadir)
1193		sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
1194
1195	/* all sh_eth int mask */
1196	sh_eth_write(ndev, 0, EESIPR);
1197
1198#if defined(__LITTLE_ENDIAN)
1199	if (mdp->cd->hw_swap)
1200		sh_eth_write(ndev, EDMR_EL, EDMR);
1201	else
1202#endif
1203		sh_eth_write(ndev, 0, EDMR);
1204
1205	/* FIFO size set */
1206	sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
1207	sh_eth_write(ndev, 0, TFTR);
1208
1209	/* Frame recv control */
1210	sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR);
1211
1212	sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER);
1213
1214	if (mdp->cd->bculr)
1215		sh_eth_write(ndev, 0x800, BCULR);	/* Burst sycle set */
1216
1217	sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
1218
1219	if (!mdp->cd->no_trimd)
1220		sh_eth_write(ndev, 0, TRIMD);
1221
1222	/* Recv frame limit set register */
1223	sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
1224		     RFLR);
1225
1226	sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
1227	if (start)
1228		sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1229
1230	/* PAUSE Prohibition */
1231	val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
1232		ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
1233
1234	sh_eth_write(ndev, val, ECMR);
1235
1236	if (mdp->cd->set_rate)
1237		mdp->cd->set_rate(ndev);
1238
1239	/* E-MAC Status Register clear */
1240	sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
1241
1242	/* E-MAC Interrupt Enable register */
1243	if (start)
1244		sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
1245
1246	/* Set MAC address */
1247	update_mac_address(ndev);
1248
1249	/* mask reset */
1250	if (mdp->cd->apr)
1251		sh_eth_write(ndev, APR_AP, APR);
1252	if (mdp->cd->mpr)
1253		sh_eth_write(ndev, MPR_MP, MPR);
1254	if (mdp->cd->tpauser)
1255		sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
1256
1257	if (start) {
1258		/* Setting the Rx mode will start the Rx process. */
1259		sh_eth_write(ndev, EDRRR_R, EDRRR);
1260
1261		netif_start_queue(ndev);
1262	}
1263
1264out:
1265	return ret;
1266}
1267
1268/* free Tx skb function */
1269static int sh_eth_txfree(struct net_device *ndev)
1270{
1271	struct sh_eth_private *mdp = netdev_priv(ndev);
1272	struct sh_eth_txdesc *txdesc;
1273	int freeNum = 0;
1274	int entry = 0;
1275
1276	for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
1277		entry = mdp->dirty_tx % mdp->num_tx_ring;
1278		txdesc = &mdp->tx_ring[entry];
1279		if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
1280			break;
1281		/* Free the original skb. */
1282		if (mdp->tx_skbuff[entry]) {
1283			dma_unmap_single(&ndev->dev, txdesc->addr,
1284					 txdesc->buffer_length, DMA_TO_DEVICE);
1285			dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
1286			mdp->tx_skbuff[entry] = NULL;
1287			freeNum++;
1288		}
1289		txdesc->status = cpu_to_edmac(mdp, TD_TFP);
1290		if (entry >= mdp->num_tx_ring - 1)
1291			txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
1292
1293		ndev->stats.tx_packets++;
1294		ndev->stats.tx_bytes += txdesc->buffer_length;
1295	}
1296	return freeNum;
1297}
1298
1299/* Packet receive function */
1300static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
1301{
1302	struct sh_eth_private *mdp = netdev_priv(ndev);
1303	struct sh_eth_rxdesc *rxdesc;
1304
1305	int entry = mdp->cur_rx % mdp->num_rx_ring;
1306	int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
1307	struct sk_buff *skb;
1308	u16 pkt_len = 0;
1309	u32 desc_status;
1310
1311	rxdesc = &mdp->rx_ring[entry];
1312	while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
1313		desc_status = edmac_to_cpu(mdp, rxdesc->status);
1314		pkt_len = rxdesc->frame_length;
1315
1316#if defined(CONFIG_ARCH_R8A7740)
1317		desc_status >>= 16;
1318#endif
1319
1320		if (--boguscnt < 0)
1321			break;
1322
1323		if (!(desc_status & RDFEND))
1324			ndev->stats.rx_length_errors++;
1325
1326		if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
1327				   RD_RFS5 | RD_RFS6 | RD_RFS10)) {
1328			ndev->stats.rx_errors++;
1329			if (desc_status & RD_RFS1)
1330				ndev->stats.rx_crc_errors++;
1331			if (desc_status & RD_RFS2)
1332				ndev->stats.rx_frame_errors++;
1333			if (desc_status & RD_RFS3)
1334				ndev->stats.rx_length_errors++;
1335			if (desc_status & RD_RFS4)
1336				ndev->stats.rx_length_errors++;
1337			if (desc_status & RD_RFS6)
1338				ndev->stats.rx_missed_errors++;
1339			if (desc_status & RD_RFS10)
1340				ndev->stats.rx_over_errors++;
1341		} else {
1342			if (!mdp->cd->hw_swap)
1343				sh_eth_soft_swap(
1344					phys_to_virt(ALIGN(rxdesc->addr, 4)),
1345					pkt_len + 2);
1346			skb = mdp->rx_skbuff[entry];
1347			mdp->rx_skbuff[entry] = NULL;
1348			if (mdp->cd->rpadir)
1349				skb_reserve(skb, NET_IP_ALIGN);
1350			skb_put(skb, pkt_len);
1351			skb->protocol = eth_type_trans(skb, ndev);
1352			netif_rx(skb);
1353			ndev->stats.rx_packets++;
1354			ndev->stats.rx_bytes += pkt_len;
1355		}
1356		rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
1357		entry = (++mdp->cur_rx) % mdp->num_rx_ring;
1358		rxdesc = &mdp->rx_ring[entry];
1359	}
1360
1361	/* Refill the Rx ring buffers. */
1362	for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
1363		entry = mdp->dirty_rx % mdp->num_rx_ring;
1364		rxdesc = &mdp->rx_ring[entry];
1365		/* The size of the buffer is 16 byte boundary. */
1366		rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1367
1368		if (mdp->rx_skbuff[entry] == NULL) {
1369			skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
1370			mdp->rx_skbuff[entry] = skb;
1371			if (skb == NULL)
1372				break;	/* Better luck next round. */
1373			dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
1374					DMA_FROM_DEVICE);
1375			sh_eth_set_receive_align(skb);
1376
1377			skb_checksum_none_assert(skb);
1378			rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
1379		}
1380		if (entry >= mdp->num_rx_ring - 1)
1381			rxdesc->status |=
1382				cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
1383		else
1384			rxdesc->status |=
1385				cpu_to_edmac(mdp, RD_RACT | RD_RFP);
1386	}
1387
1388	/* Restart Rx engine if stopped. */
1389	/* If we don't need to check status, don't. -KDU */
1390	if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
1391		/* fix the values for the next receiving if RDE is set */
1392		if (intr_status & EESR_RDE)
1393			mdp->cur_rx = mdp->dirty_rx =
1394				(sh_eth_read(ndev, RDFAR) -
1395				 sh_eth_read(ndev, RDLAR)) >> 4;
1396		sh_eth_write(ndev, EDRRR_R, EDRRR);
1397	}
1398
1399	return 0;
1400}
1401
1402static void sh_eth_rcv_snd_disable(struct net_device *ndev)
1403{
1404	/* disable tx and rx */
1405	sh_eth_write(ndev, sh_eth_read(ndev, ECMR) &
1406		~(ECMR_RE | ECMR_TE), ECMR);
1407}
1408
1409static void sh_eth_rcv_snd_enable(struct net_device *ndev)
1410{
1411	/* enable tx and rx */
1412	sh_eth_write(ndev, sh_eth_read(ndev, ECMR) |
1413		(ECMR_RE | ECMR_TE), ECMR);
1414}
1415
1416/* error control function */
1417static void sh_eth_error(struct net_device *ndev, int intr_status)
1418{
1419	struct sh_eth_private *mdp = netdev_priv(ndev);
1420	u32 felic_stat;
1421	u32 link_stat;
1422	u32 mask;
1423
1424	if (intr_status & EESR_ECI) {
1425		felic_stat = sh_eth_read(ndev, ECSR);
1426		sh_eth_write(ndev, felic_stat, ECSR);	/* clear int */
1427		if (felic_stat & ECSR_ICD)
1428			ndev->stats.tx_carrier_errors++;
1429		if (felic_stat & ECSR_LCHNG) {
1430			/* Link Changed */
1431			if (mdp->cd->no_psr || mdp->no_ether_link) {
1432				if (mdp->link == PHY_DOWN)
1433					link_stat = 0;
1434				else
1435					link_stat = PHY_ST_LINK;
1436			} else {
1437				link_stat = (sh_eth_read(ndev, PSR));
1438				if (mdp->ether_link_active_low)
1439					link_stat = ~link_stat;
1440			}
1441			if (!(link_stat & PHY_ST_LINK))
1442				sh_eth_rcv_snd_disable(ndev);
1443			else {
1444				/* Link Up */
1445				sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
1446					  ~DMAC_M_ECI, EESIPR);
1447				/*clear int */
1448				sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
1449					  ECSR);
1450				sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
1451					  DMAC_M_ECI, EESIPR);
1452				/* enable tx and rx */
1453				sh_eth_rcv_snd_enable(ndev);
1454			}
1455		}
1456	}
1457
1458	if (intr_status & EESR_TWB) {
1459		/* Write buck end. unused write back interrupt */
1460		if (intr_status & EESR_TABT)	/* Transmit Abort int */
1461			ndev->stats.tx_aborted_errors++;
1462			if (netif_msg_tx_err(mdp))
1463				dev_err(&ndev->dev, "Transmit Abort\n");
1464	}
1465
1466	if (intr_status & EESR_RABT) {
1467		/* Receive Abort int */
1468		if (intr_status & EESR_RFRMER) {
1469			/* Receive Frame Overflow int */
1470			ndev->stats.rx_frame_errors++;
1471			if (netif_msg_rx_err(mdp))
1472				dev_err(&ndev->dev, "Receive Abort\n");
1473		}
1474	}
1475
1476	if (intr_status & EESR_TDE) {
1477		/* Transmit Descriptor Empty int */
1478		ndev->stats.tx_fifo_errors++;
1479		if (netif_msg_tx_err(mdp))
1480			dev_err(&ndev->dev, "Transmit Descriptor Empty\n");
1481	}
1482
1483	if (intr_status & EESR_TFE) {
1484		/* FIFO under flow */
1485		ndev->stats.tx_fifo_errors++;
1486		if (netif_msg_tx_err(mdp))
1487			dev_err(&ndev->dev, "Transmit FIFO Under flow\n");
1488	}
1489
1490	if (intr_status & EESR_RDE) {
1491		/* Receive Descriptor Empty int */
1492		ndev->stats.rx_over_errors++;
1493
1494		if (netif_msg_rx_err(mdp))
1495			dev_err(&ndev->dev, "Receive Descriptor Empty\n");
1496	}
1497
1498	if (intr_status & EESR_RFE) {
1499		/* Receive FIFO Overflow int */
1500		ndev->stats.rx_fifo_errors++;
1501		if (netif_msg_rx_err(mdp))
1502			dev_err(&ndev->dev, "Receive FIFO Overflow\n");
1503	}
1504
1505	if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
1506		/* Address Error */
1507		ndev->stats.tx_fifo_errors++;
1508		if (netif_msg_tx_err(mdp))
1509			dev_err(&ndev->dev, "Address Error\n");
1510	}
1511
1512	mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
1513	if (mdp->cd->no_ade)
1514		mask &= ~EESR_ADE;
1515	if (intr_status & mask) {
1516		/* Tx error */
1517		u32 edtrr = sh_eth_read(ndev, EDTRR);
1518		/* dmesg */
1519		dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ",
1520				intr_status, mdp->cur_tx);
1521		dev_err(&ndev->dev, "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
1522				mdp->dirty_tx, (u32) ndev->state, edtrr);
1523		/* dirty buffer free */
1524		sh_eth_txfree(ndev);
1525
1526		/* SH7712 BUG */
1527		if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
1528			/* tx dma start */
1529			sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
1530		}
1531		/* wakeup */
1532		netif_wake_queue(ndev);
1533	}
1534}
1535
1536static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1537{
1538	struct net_device *ndev = netdev;
1539	struct sh_eth_private *mdp = netdev_priv(ndev);
1540	struct sh_eth_cpu_data *cd = mdp->cd;
1541	irqreturn_t ret = IRQ_NONE;
1542	u32 intr_status = 0;
1543
1544	spin_lock(&mdp->lock);
1545
1546	/* Get interrpt stat */
1547	intr_status = sh_eth_read(ndev, EESR);
1548	/* Clear interrupt */
1549	if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |
1550			EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
1551			cd->tx_check | cd->eesr_err_check)) {
1552		sh_eth_write(ndev, intr_status, EESR);
1553		ret = IRQ_HANDLED;
1554	} else
1555		goto other_irq;
1556
1557	if (intr_status & (EESR_FRC | /* Frame recv*/
1558			EESR_RMAF | /* Multi cast address recv*/
1559			EESR_RRF  | /* Bit frame recv */
1560			EESR_RTLF | /* Long frame recv*/
1561			EESR_RTSF | /* short frame recv */
1562			EESR_PRE  | /* PHY-LSI recv error */
1563			EESR_CERF)){ /* recv frame CRC error */
1564		sh_eth_rx(ndev, intr_status);
1565	}
1566
1567	/* Tx Check */
1568	if (intr_status & cd->tx_check) {
1569		sh_eth_txfree(ndev);
1570		netif_wake_queue(ndev);
1571	}
1572
1573	if (intr_status & cd->eesr_err_check)
1574		sh_eth_error(ndev, intr_status);
1575
1576other_irq:
1577	spin_unlock(&mdp->lock);
1578
1579	return ret;
1580}
1581
1582/* PHY state control function */
1583static void sh_eth_adjust_link(struct net_device *ndev)
1584{
1585	struct sh_eth_private *mdp = netdev_priv(ndev);
1586	struct phy_device *phydev = mdp->phydev;
1587	int new_state = 0;
1588
1589	if (phydev->link != PHY_DOWN) {
1590		if (phydev->duplex != mdp->duplex) {
1591			new_state = 1;
1592			mdp->duplex = phydev->duplex;
1593			if (mdp->cd->set_duplex)
1594				mdp->cd->set_duplex(ndev);
1595		}
1596
1597		if (phydev->speed != mdp->speed) {
1598			new_state = 1;
1599			mdp->speed = phydev->speed;
1600			if (mdp->cd->set_rate)
1601				mdp->cd->set_rate(ndev);
1602		}
1603		if (mdp->link == PHY_DOWN) {
1604			sh_eth_write(ndev,
1605				(sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR);
1606			new_state = 1;
1607			mdp->link = phydev->link;
1608		}
1609	} else if (mdp->link) {
1610		new_state = 1;
1611		mdp->link = PHY_DOWN;
1612		mdp->speed = 0;
1613		mdp->duplex = -1;
1614	}
1615
1616	if (new_state && netif_msg_link(mdp))
1617		phy_print_status(phydev);
1618}
1619
1620/* PHY init function */
1621static int sh_eth_phy_init(struct net_device *ndev)
1622{
1623	struct sh_eth_private *mdp = netdev_priv(ndev);
1624	char phy_id[MII_BUS_ID_SIZE + 3];
1625	struct phy_device *phydev = NULL;
1626
1627	snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
1628		mdp->mii_bus->id , mdp->phy_id);
1629
1630	mdp->link = PHY_DOWN;
1631	mdp->speed = 0;
1632	mdp->duplex = -1;
1633
1634	/* Try connect to PHY */
1635	phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
1636			     mdp->phy_interface);
1637	if (IS_ERR(phydev)) {
1638		dev_err(&ndev->dev, "phy_connect failed\n");
1639		return PTR_ERR(phydev);
1640	}
1641
1642	dev_info(&ndev->dev, "attached phy %i to driver %s\n",
1643		phydev->addr, phydev->drv->name);
1644
1645	mdp->phydev = phydev;
1646
1647	return 0;
1648}
1649
1650/* PHY control start function */
1651static int sh_eth_phy_start(struct net_device *ndev)
1652{
1653	struct sh_eth_private *mdp = netdev_priv(ndev);
1654	int ret;
1655
1656	ret = sh_eth_phy_init(ndev);
1657	if (ret)
1658		return ret;
1659
1660	/* reset phy - this also wakes it from PDOWN */
1661	phy_write(mdp->phydev, MII_BMCR, BMCR_RESET);
1662	phy_start(mdp->phydev);
1663
1664	return 0;
1665}
1666
1667static int sh_eth_get_settings(struct net_device *ndev,
1668			struct ethtool_cmd *ecmd)
1669{
1670	struct sh_eth_private *mdp = netdev_priv(ndev);
1671	unsigned long flags;
1672	int ret;
1673
1674	spin_lock_irqsave(&mdp->lock, flags);
1675	ret = phy_ethtool_gset(mdp->phydev, ecmd);
1676	spin_unlock_irqrestore(&mdp->lock, flags);
1677
1678	return ret;
1679}
1680
1681static int sh_eth_set_settings(struct net_device *ndev,
1682		struct ethtool_cmd *ecmd)
1683{
1684	struct sh_eth_private *mdp = netdev_priv(ndev);
1685	unsigned long flags;
1686	int ret;
1687
1688	spin_lock_irqsave(&mdp->lock, flags);
1689
1690	/* disable tx and rx */
1691	sh_eth_rcv_snd_disable(ndev);
1692
1693	ret = phy_ethtool_sset(mdp->phydev, ecmd);
1694	if (ret)
1695		goto error_exit;
1696
1697	if (ecmd->duplex == DUPLEX_FULL)
1698		mdp->duplex = 1;
1699	else
1700		mdp->duplex = 0;
1701
1702	if (mdp->cd->set_duplex)
1703		mdp->cd->set_duplex(ndev);
1704
1705error_exit:
1706	mdelay(1);
1707
1708	/* enable tx and rx */
1709	sh_eth_rcv_snd_enable(ndev);
1710
1711	spin_unlock_irqrestore(&mdp->lock, flags);
1712
1713	return ret;
1714}
1715
1716static int sh_eth_nway_reset(struct net_device *ndev)
1717{
1718	struct sh_eth_private *mdp = netdev_priv(ndev);
1719	unsigned long flags;
1720	int ret;
1721
1722	spin_lock_irqsave(&mdp->lock, flags);
1723	ret = phy_start_aneg(mdp->phydev);
1724	spin_unlock_irqrestore(&mdp->lock, flags);
1725
1726	return ret;
1727}
1728
1729static u32 sh_eth_get_msglevel(struct net_device *ndev)
1730{
1731	struct sh_eth_private *mdp = netdev_priv(ndev);
1732	return mdp->msg_enable;
1733}
1734
1735static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
1736{
1737	struct sh_eth_private *mdp = netdev_priv(ndev);
1738	mdp->msg_enable = value;
1739}
1740
1741static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
1742	"rx_current", "tx_current",
1743	"rx_dirty", "tx_dirty",
1744};
1745#define SH_ETH_STATS_LEN  ARRAY_SIZE(sh_eth_gstrings_stats)
1746
1747static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
1748{
1749	switch (sset) {
1750	case ETH_SS_STATS:
1751		return SH_ETH_STATS_LEN;
1752	default:
1753		return -EOPNOTSUPP;
1754	}
1755}
1756
1757static void sh_eth_get_ethtool_stats(struct net_device *ndev,
1758			struct ethtool_stats *stats, u64 *data)
1759{
1760	struct sh_eth_private *mdp = netdev_priv(ndev);
1761	int i = 0;
1762
1763	/* device-specific stats */
1764	data[i++] = mdp->cur_rx;
1765	data[i++] = mdp->cur_tx;
1766	data[i++] = mdp->dirty_rx;
1767	data[i++] = mdp->dirty_tx;
1768}
1769
1770static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1771{
1772	switch (stringset) {
1773	case ETH_SS_STATS:
1774		memcpy(data, *sh_eth_gstrings_stats,
1775					sizeof(sh_eth_gstrings_stats));
1776		break;
1777	}
1778}
1779
1780static void sh_eth_get_ringparam(struct net_device *ndev,
1781				 struct ethtool_ringparam *ring)
1782{
1783	struct sh_eth_private *mdp = netdev_priv(ndev);
1784
1785	ring->rx_max_pending = RX_RING_MAX;
1786	ring->tx_max_pending = TX_RING_MAX;
1787	ring->rx_pending = mdp->num_rx_ring;
1788	ring->tx_pending = mdp->num_tx_ring;
1789}
1790
1791static int sh_eth_set_ringparam(struct net_device *ndev,
1792				struct ethtool_ringparam *ring)
1793{
1794	struct sh_eth_private *mdp = netdev_priv(ndev);
1795	int ret;
1796
1797	if (ring->tx_pending > TX_RING_MAX ||
1798	    ring->rx_pending > RX_RING_MAX ||
1799	    ring->tx_pending < TX_RING_MIN ||
1800	    ring->rx_pending < RX_RING_MIN)
1801		return -EINVAL;
1802	if (ring->rx_mini_pending || ring->rx_jumbo_pending)
1803		return -EINVAL;
1804
1805	if (netif_running(ndev)) {
1806		netif_tx_disable(ndev);
1807		/* Disable interrupts by clearing the interrupt mask. */
1808		sh_eth_write(ndev, 0x0000, EESIPR);
1809		/* Stop the chip's Tx and Rx processes. */
1810		sh_eth_write(ndev, 0, EDTRR);
1811		sh_eth_write(ndev, 0, EDRRR);
1812		synchronize_irq(ndev->irq);
1813	}
1814
1815	/* Free all the skbuffs in the Rx queue. */
1816	sh_eth_ring_free(ndev);
1817	/* Free DMA buffer */
1818	sh_eth_free_dma_buffer(mdp);
1819
1820	/* Set new parameters */
1821	mdp->num_rx_ring = ring->rx_pending;
1822	mdp->num_tx_ring = ring->tx_pending;
1823
1824	ret = sh_eth_ring_init(ndev);
1825	if (ret < 0) {
1826		dev_err(&ndev->dev, "%s: sh_eth_ring_init failed.\n", __func__);
1827		return ret;
1828	}
1829	ret = sh_eth_dev_init(ndev, false);
1830	if (ret < 0) {
1831		dev_err(&ndev->dev, "%s: sh_eth_dev_init failed.\n", __func__);
1832		return ret;
1833	}
1834
1835	if (netif_running(ndev)) {
1836		sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1837		/* Setting the Rx mode will start the Rx process. */
1838		sh_eth_write(ndev, EDRRR_R, EDRRR);
1839		netif_wake_queue(ndev);
1840	}
1841
1842	return 0;
1843}
1844
1845static const struct ethtool_ops sh_eth_ethtool_ops = {
1846	.get_settings	= sh_eth_get_settings,
1847	.set_settings	= sh_eth_set_settings,
1848	.nway_reset	= sh_eth_nway_reset,
1849	.get_msglevel	= sh_eth_get_msglevel,
1850	.set_msglevel	= sh_eth_set_msglevel,
1851	.get_link	= ethtool_op_get_link,
1852	.get_strings	= sh_eth_get_strings,
1853	.get_ethtool_stats  = sh_eth_get_ethtool_stats,
1854	.get_sset_count     = sh_eth_get_sset_count,
1855	.get_ringparam	= sh_eth_get_ringparam,
1856	.set_ringparam	= sh_eth_set_ringparam,
1857};
1858
1859/* network device open function */
1860static int sh_eth_open(struct net_device *ndev)
1861{
1862	int ret = 0;
1863	struct sh_eth_private *mdp = netdev_priv(ndev);
1864
1865	pm_runtime_get_sync(&mdp->pdev->dev);
1866
1867	ret = request_irq(ndev->irq, sh_eth_interrupt,
1868#if defined(CONFIG_CPU_SUBTYPE_SH7763) || \
1869	defined(CONFIG_CPU_SUBTYPE_SH7764) || \
1870	defined(CONFIG_CPU_SUBTYPE_SH7757)
1871				IRQF_SHARED,
1872#else
1873				0,
1874#endif
1875				ndev->name, ndev);
1876	if (ret) {
1877		dev_err(&ndev->dev, "Can not assign IRQ number\n");
1878		return ret;
1879	}
1880
1881	/* Descriptor set */
1882	ret = sh_eth_ring_init(ndev);
1883	if (ret)
1884		goto out_free_irq;
1885
1886	/* device init */
1887	ret = sh_eth_dev_init(ndev, true);
1888	if (ret)
1889		goto out_free_irq;
1890
1891	/* PHY control start*/
1892	ret = sh_eth_phy_start(ndev);
1893	if (ret)
1894		goto out_free_irq;
1895
1896	return ret;
1897
1898out_free_irq:
1899	free_irq(ndev->irq, ndev);
1900	pm_runtime_put_sync(&mdp->pdev->dev);
1901	return ret;
1902}
1903
1904/* Timeout function */
1905static void sh_eth_tx_timeout(struct net_device *ndev)
1906{
1907	struct sh_eth_private *mdp = netdev_priv(ndev);
1908	struct sh_eth_rxdesc *rxdesc;
1909	int i;
1910
1911	netif_stop_queue(ndev);
1912
1913	if (netif_msg_timer(mdp))
1914		dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x,"
1915	       " resetting...\n", ndev->name, (int)sh_eth_read(ndev, EESR));
1916
1917	/* tx_errors count up */
1918	ndev->stats.tx_errors++;
1919
1920	/* Free all the skbuffs in the Rx queue. */
1921	for (i = 0; i < mdp->num_rx_ring; i++) {
1922		rxdesc = &mdp->rx_ring[i];
1923		rxdesc->status = 0;
1924		rxdesc->addr = 0xBADF00D0;
1925		if (mdp->rx_skbuff[i])
1926			dev_kfree_skb(mdp->rx_skbuff[i]);
1927		mdp->rx_skbuff[i] = NULL;
1928	}
1929	for (i = 0; i < mdp->num_tx_ring; i++) {
1930		if (mdp->tx_skbuff[i])
1931			dev_kfree_skb(mdp->tx_skbuff[i]);
1932		mdp->tx_skbuff[i] = NULL;
1933	}
1934
1935	/* device init */
1936	sh_eth_dev_init(ndev, true);
1937}
1938
1939/* Packet transmit function */
1940static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1941{
1942	struct sh_eth_private *mdp = netdev_priv(ndev);
1943	struct sh_eth_txdesc *txdesc;
1944	u32 entry;
1945	unsigned long flags;
1946
1947	spin_lock_irqsave(&mdp->lock, flags);
1948	if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
1949		if (!sh_eth_txfree(ndev)) {
1950			if (netif_msg_tx_queued(mdp))
1951				dev_warn(&ndev->dev, "TxFD exhausted.\n");
1952			netif_stop_queue(ndev);
1953			spin_unlock_irqrestore(&mdp->lock, flags);
1954			return NETDEV_TX_BUSY;
1955		}
1956	}
1957	spin_unlock_irqrestore(&mdp->lock, flags);
1958
1959	entry = mdp->cur_tx % mdp->num_tx_ring;
1960	mdp->tx_skbuff[entry] = skb;
1961	txdesc = &mdp->tx_ring[entry];
1962	/* soft swap. */
1963	if (!mdp->cd->hw_swap)
1964		sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)),
1965				 skb->len + 2);
1966	txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
1967				      DMA_TO_DEVICE);
1968	if (skb->len < ETHERSMALL)
1969		txdesc->buffer_length = ETHERSMALL;
1970	else
1971		txdesc->buffer_length = skb->len;
1972
1973	if (entry >= mdp->num_tx_ring - 1)
1974		txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
1975	else
1976		txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
1977
1978	mdp->cur_tx++;
1979
1980	if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp)))
1981		sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
1982
1983	return NETDEV_TX_OK;
1984}
1985
1986/* device close function */
1987static int sh_eth_close(struct net_device *ndev)
1988{
1989	struct sh_eth_private *mdp = netdev_priv(ndev);
1990
1991	netif_stop_queue(ndev);
1992
1993	/* Disable interrupts by clearing the interrupt mask. */
1994	sh_eth_write(ndev, 0x0000, EESIPR);
1995
1996	/* Stop the chip's Tx and Rx processes. */
1997	sh_eth_write(ndev, 0, EDTRR);
1998	sh_eth_write(ndev, 0, EDRRR);
1999
2000	/* PHY Disconnect */
2001	if (mdp->phydev) {
2002		phy_stop(mdp->phydev);
2003		phy_disconnect(mdp->phydev);
2004	}
2005
2006	free_irq(ndev->irq, ndev);
2007
2008	/* Free all the skbuffs in the Rx queue. */
2009	sh_eth_ring_free(ndev);
2010
2011	/* free DMA buffer */
2012	sh_eth_free_dma_buffer(mdp);
2013
2014	pm_runtime_put_sync(&mdp->pdev->dev);
2015
2016	return 0;
2017}
2018
2019static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
2020{
2021	struct sh_eth_private *mdp = netdev_priv(ndev);
2022
2023	pm_runtime_get_sync(&mdp->pdev->dev);
2024
2025	ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
2026	sh_eth_write(ndev, 0, TROCR);	/* (write clear) */
2027	ndev->stats.collisions += sh_eth_read(ndev, CDCR);
2028	sh_eth_write(ndev, 0, CDCR);	/* (write clear) */
2029	ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
2030	sh_eth_write(ndev, 0, LCCR);	/* (write clear) */
2031	if (sh_eth_is_gether(mdp)) {
2032		ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
2033		sh_eth_write(ndev, 0, CERCR);	/* (write clear) */
2034		ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
2035		sh_eth_write(ndev, 0, CEECR);	/* (write clear) */
2036	} else {
2037		ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
2038		sh_eth_write(ndev, 0, CNDCR);	/* (write clear) */
2039	}
2040	pm_runtime_put_sync(&mdp->pdev->dev);
2041
2042	return &ndev->stats;
2043}
2044
2045/* ioctl to device function */
2046static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq,
2047				int cmd)
2048{
2049	struct sh_eth_private *mdp = netdev_priv(ndev);
2050	struct phy_device *phydev = mdp->phydev;
2051
2052	if (!netif_running(ndev))
2053		return -EINVAL;
2054
2055	if (!phydev)
2056		return -ENODEV;
2057
2058	return phy_mii_ioctl(phydev, rq, cmd);
2059}
2060
2061#if defined(SH_ETH_HAS_TSU)
2062/* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
2063static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp,
2064					    int entry)
2065{
2066	return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4);
2067}
2068
2069static u32 sh_eth_tsu_get_post_mask(int entry)
2070{
2071	return 0x0f << (28 - ((entry % 8) * 4));
2072}
2073
2074static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry)
2075{
2076	return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4));
2077}
2078
2079static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev,
2080					     int entry)
2081{
2082	struct sh_eth_private *mdp = netdev_priv(ndev);
2083	u32 tmp;
2084	void *reg_offset;
2085
2086	reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2087	tmp = ioread32(reg_offset);
2088	iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset);
2089}
2090
2091static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev,
2092					      int entry)
2093{
2094	struct sh_eth_private *mdp = netdev_priv(ndev);
2095	u32 post_mask, ref_mask, tmp;
2096	void *reg_offset;
2097
2098	reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2099	post_mask = sh_eth_tsu_get_post_mask(entry);
2100	ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask;
2101
2102	tmp = ioread32(reg_offset);
2103	iowrite32(tmp & ~post_mask, reg_offset);
2104
2105	/* If other port enables, the function returns "true" */
2106	return tmp & ref_mask;
2107}
2108
2109static int sh_eth_tsu_busy(struct net_device *ndev)
2110{
2111	int timeout = SH_ETH_TSU_TIMEOUT_MS * 100;
2112	struct sh_eth_private *mdp = netdev_priv(ndev);
2113
2114	while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) {
2115		udelay(10);
2116		timeout--;
2117		if (timeout <= 0) {
2118			dev_err(&ndev->dev, "%s: timeout\n", __func__);
2119			return -ETIMEDOUT;
2120		}
2121	}
2122
2123	return 0;
2124}
2125
2126static int sh_eth_tsu_write_entry(struct net_device *ndev, void *reg,
2127				  const u8 *addr)
2128{
2129	u32 val;
2130
2131	val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3];
2132	iowrite32(val, reg);
2133	if (sh_eth_tsu_busy(ndev) < 0)
2134		return -EBUSY;
2135
2136	val = addr[4] << 8 | addr[5];
2137	iowrite32(val, reg + 4);
2138	if (sh_eth_tsu_busy(ndev) < 0)
2139		return -EBUSY;
2140
2141	return 0;
2142}
2143
2144static void sh_eth_tsu_read_entry(void *reg, u8 *addr)
2145{
2146	u32 val;
2147
2148	val = ioread32(reg);
2149	addr[0] = (val >> 24) & 0xff;
2150	addr[1] = (val >> 16) & 0xff;
2151	addr[2] = (val >> 8) & 0xff;
2152	addr[3] = val & 0xff;
2153	val = ioread32(reg + 4);
2154	addr[4] = (val >> 8) & 0xff;
2155	addr[5] = val & 0xff;
2156}
2157
2158
2159static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr)
2160{
2161	struct sh_eth_private *mdp = netdev_priv(ndev);
2162	void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2163	int i;
2164	u8 c_addr[ETH_ALEN];
2165
2166	for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2167		sh_eth_tsu_read_entry(reg_offset, c_addr);
2168		if (memcmp(addr, c_addr, ETH_ALEN) == 0)
2169			return i;
2170	}
2171
2172	return -ENOENT;
2173}
2174
2175static int sh_eth_tsu_find_empty(struct net_device *ndev)
2176{
2177	u8 blank[ETH_ALEN];
2178	int entry;
2179
2180	memset(blank, 0, sizeof(blank));
2181	entry = sh_eth_tsu_find_entry(ndev, blank);
2182	return (entry < 0) ? -ENOMEM : entry;
2183}
2184
2185static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev,
2186					      int entry)
2187{
2188	struct sh_eth_private *mdp = netdev_priv(ndev);
2189	void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2190	int ret;
2191	u8 blank[ETH_ALEN];
2192
2193	sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) &
2194			 ~(1 << (31 - entry)), TSU_TEN);
2195
2196	memset(blank, 0, sizeof(blank));
2197	ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank);
2198	if (ret < 0)
2199		return ret;
2200	return 0;
2201}
2202
2203static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr)
2204{
2205	struct sh_eth_private *mdp = netdev_priv(ndev);
2206	void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2207	int i, ret;
2208
2209	if (!mdp->cd->tsu)
2210		return 0;
2211
2212	i = sh_eth_tsu_find_entry(ndev, addr);
2213	if (i < 0) {
2214		/* No entry found, create one */
2215		i = sh_eth_tsu_find_empty(ndev);
2216		if (i < 0)
2217			return -ENOMEM;
2218		ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr);
2219		if (ret < 0)
2220			return ret;
2221
2222		/* Enable the entry */
2223		sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) |
2224				 (1 << (31 - i)), TSU_TEN);
2225	}
2226
2227	/* Entry found or created, enable POST */
2228	sh_eth_tsu_enable_cam_entry_post(ndev, i);
2229
2230	return 0;
2231}
2232
2233static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr)
2234{
2235	struct sh_eth_private *mdp = netdev_priv(ndev);
2236	int i, ret;
2237
2238	if (!mdp->cd->tsu)
2239		return 0;
2240
2241	i = sh_eth_tsu_find_entry(ndev, addr);
2242	if (i) {
2243		/* Entry found */
2244		if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2245			goto done;
2246
2247		/* Disable the entry if both ports was disabled */
2248		ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2249		if (ret < 0)
2250			return ret;
2251	}
2252done:
2253	return 0;
2254}
2255
2256static int sh_eth_tsu_purge_all(struct net_device *ndev)
2257{
2258	struct sh_eth_private *mdp = netdev_priv(ndev);
2259	int i, ret;
2260
2261	if (unlikely(!mdp->cd->tsu))
2262		return 0;
2263
2264	for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) {
2265		if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2266			continue;
2267
2268		/* Disable the entry if both ports was disabled */
2269		ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2270		if (ret < 0)
2271			return ret;
2272	}
2273
2274	return 0;
2275}
2276
2277static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
2278{
2279	struct sh_eth_private *mdp = netdev_priv(ndev);
2280	u8 addr[ETH_ALEN];
2281	void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2282	int i;
2283
2284	if (unlikely(!mdp->cd->tsu))
2285		return;
2286
2287	for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2288		sh_eth_tsu_read_entry(reg_offset, addr);
2289		if (is_multicast_ether_addr(addr))
2290			sh_eth_tsu_del_entry(ndev, addr);
2291	}
2292}
2293
2294/* Multicast reception directions set */
2295static void sh_eth_set_multicast_list(struct net_device *ndev)
2296{
2297	struct sh_eth_private *mdp = netdev_priv(ndev);
2298	u32 ecmr_bits;
2299	int mcast_all = 0;
2300	unsigned long flags;
2301
2302	spin_lock_irqsave(&mdp->lock, flags);
2303	/*
2304	 * Initial condition is MCT = 1, PRM = 0.
2305	 * Depending on ndev->flags, set PRM or clear MCT
2306	 */
2307	ecmr_bits = (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | ECMR_MCT;
2308
2309	if (!(ndev->flags & IFF_MULTICAST)) {
2310		sh_eth_tsu_purge_mcast(ndev);
2311		mcast_all = 1;
2312	}
2313	if (ndev->flags & IFF_ALLMULTI) {
2314		sh_eth_tsu_purge_mcast(ndev);
2315		ecmr_bits &= ~ECMR_MCT;
2316		mcast_all = 1;
2317	}
2318
2319	if (ndev->flags & IFF_PROMISC) {
2320		sh_eth_tsu_purge_all(ndev);
2321		ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM;
2322	} else if (mdp->cd->tsu) {
2323		struct netdev_hw_addr *ha;
2324		netdev_for_each_mc_addr(ha, ndev) {
2325			if (mcast_all && is_multicast_ether_addr(ha->addr))
2326				continue;
2327
2328			if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) {
2329				if (!mcast_all) {
2330					sh_eth_tsu_purge_mcast(ndev);
2331					ecmr_bits &= ~ECMR_MCT;
2332					mcast_all = 1;
2333				}
2334			}
2335		}
2336	} else {
2337		/* Normal, unicast/broadcast-only mode. */
2338		ecmr_bits = (ecmr_bits & ~ECMR_PRM) | ECMR_MCT;
2339	}
2340
2341	/* update the ethernet mode */
2342	sh_eth_write(ndev, ecmr_bits, ECMR);
2343
2344	spin_unlock_irqrestore(&mdp->lock, flags);
2345}
2346
2347static int sh_eth_get_vtag_index(struct sh_eth_private *mdp)
2348{
2349	if (!mdp->port)
2350		return TSU_VTAG0;
2351	else
2352		return TSU_VTAG1;
2353}
2354
2355static int sh_eth_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
2356{
2357	struct sh_eth_private *mdp = netdev_priv(ndev);
2358	int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2359
2360	if (unlikely(!mdp->cd->tsu))
2361		return -EPERM;
2362
2363	/* No filtering if vid = 0 */
2364	if (!vid)
2365		return 0;
2366
2367	mdp->vlan_num_ids++;
2368
2369	/*
2370	 * The controller has one VLAN tag HW filter. So, if the filter is
2371	 * already enabled, the driver disables it and the filte
2372	 */
2373	if (mdp->vlan_num_ids > 1) {
2374		/* disable VLAN filter */
2375		sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2376		return 0;
2377	}
2378
2379	sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK),
2380			 vtag_reg_index);
2381
2382	return 0;
2383}
2384
2385static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2386{
2387	struct sh_eth_private *mdp = netdev_priv(ndev);
2388	int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2389
2390	if (unlikely(!mdp->cd->tsu))
2391		return -EPERM;
2392
2393	/* No filtering if vid = 0 */
2394	if (!vid)
2395		return 0;
2396
2397	mdp->vlan_num_ids--;
2398	sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2399
2400	return 0;
2401}
2402#endif /* SH_ETH_HAS_TSU */
2403
2404/* SuperH's TSU register init function */
2405static void sh_eth_tsu_init(struct sh_eth_private *mdp)
2406{
2407	sh_eth_tsu_write(mdp, 0, TSU_FWEN0);	/* Disable forward(0->1) */
2408	sh_eth_tsu_write(mdp, 0, TSU_FWEN1);	/* Disable forward(1->0) */
2409	sh_eth_tsu_write(mdp, 0, TSU_FCM);	/* forward fifo 3k-3k */
2410	sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
2411	sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
2412	sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
2413	sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
2414	sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
2415	sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
2416	sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
2417	if (sh_eth_is_gether(mdp)) {
2418		sh_eth_tsu_write(mdp, 0, TSU_QTAG0);	/* Disable QTAG(0->1) */
2419		sh_eth_tsu_write(mdp, 0, TSU_QTAG1);	/* Disable QTAG(1->0) */
2420	} else {
2421		sh_eth_tsu_write(mdp, 0, TSU_QTAGM0);	/* Disable QTAG(0->1) */
2422		sh_eth_tsu_write(mdp, 0, TSU_QTAGM1);	/* Disable QTAG(1->0) */
2423	}
2424	sh_eth_tsu_write(mdp, 0, TSU_FWSR);	/* all interrupt status clear */
2425	sh_eth_tsu_write(mdp, 0, TSU_FWINMK);	/* Disable all interrupt */
2426	sh_eth_tsu_write(mdp, 0, TSU_TEN);	/* Disable all CAM entry */
2427	sh_eth_tsu_write(mdp, 0, TSU_POST1);	/* Disable CAM entry [ 0- 7] */
2428	sh_eth_tsu_write(mdp, 0, TSU_POST2);	/* Disable CAM entry [ 8-15] */
2429	sh_eth_tsu_write(mdp, 0, TSU_POST3);	/* Disable CAM entry [16-23] */
2430	sh_eth_tsu_write(mdp, 0, TSU_POST4);	/* Disable CAM entry [24-31] */
2431}
2432
2433/* MDIO bus release function */
2434static int sh_mdio_release(struct net_device *ndev)
2435{
2436	struct mii_bus *bus = dev_get_drvdata(&ndev->dev);
2437
2438	/* unregister mdio bus */
2439	mdiobus_unregister(bus);
2440
2441	/* remove mdio bus info from net_device */
2442	dev_set_drvdata(&ndev->dev, NULL);
2443
2444	/* free bitbang info */
2445	free_mdio_bitbang(bus);
2446
2447	return 0;
2448}
2449
2450/* MDIO bus init function */
2451static int sh_mdio_init(struct net_device *ndev, int id,
2452			struct sh_eth_plat_data *pd)
2453{
2454	int ret, i;
2455	struct bb_info *bitbang;
2456	struct sh_eth_private *mdp = netdev_priv(ndev);
2457
2458	/* create bit control struct for PHY */
2459	bitbang = devm_kzalloc(&ndev->dev, sizeof(struct bb_info),
2460			       GFP_KERNEL);
2461	if (!bitbang) {
2462		ret = -ENOMEM;
2463		goto out;
2464	}
2465
2466	/* bitbang init */
2467	bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
2468	bitbang->set_gate = pd->set_mdio_gate;
2469	bitbang->mdi_msk = PIR_MDI;
2470	bitbang->mdo_msk = PIR_MDO;
2471	bitbang->mmd_msk = PIR_MMD;
2472	bitbang->mdc_msk = PIR_MDC;
2473	bitbang->ctrl.ops = &bb_ops;
2474
2475	/* MII controller setting */
2476	mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
2477	if (!mdp->mii_bus) {
2478		ret = -ENOMEM;
2479		goto out;
2480	}
2481
2482	/* Hook up MII support for ethtool */
2483	mdp->mii_bus->name = "sh_mii";
2484	mdp->mii_bus->parent = &ndev->dev;
2485	snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2486		mdp->pdev->name, id);
2487
2488	/* PHY IRQ */
2489	mdp->mii_bus->irq = devm_kzalloc(&ndev->dev,
2490					 sizeof(int) * PHY_MAX_ADDR,
2491					 GFP_KERNEL);
2492	if (!mdp->mii_bus->irq) {
2493		ret = -ENOMEM;
2494		goto out_free_bus;
2495	}
2496
2497	for (i = 0; i < PHY_MAX_ADDR; i++)
2498		mdp->mii_bus->irq[i] = PHY_POLL;
2499
2500	/* register mdio bus */
2501	ret = mdiobus_register(mdp->mii_bus);
2502	if (ret)
2503		goto out_free_bus;
2504
2505	dev_set_drvdata(&ndev->dev, mdp->mii_bus);
2506
2507	return 0;
2508
2509out_free_bus:
2510	free_mdio_bitbang(mdp->mii_bus);
2511
2512out:
2513	return ret;
2514}
2515
2516static const u16 *sh_eth_get_register_offset(int register_type)
2517{
2518	const u16 *reg_offset = NULL;
2519
2520	switch (register_type) {
2521	case SH_ETH_REG_GIGABIT:
2522		reg_offset = sh_eth_offset_gigabit;
2523		break;
2524	case SH_ETH_REG_FAST_SH4:
2525		reg_offset = sh_eth_offset_fast_sh4;
2526		break;
2527	case SH_ETH_REG_FAST_SH3_SH2:
2528		reg_offset = sh_eth_offset_fast_sh3_sh2;
2529		break;
2530	default:
2531		pr_err("Unknown register type (%d)\n", register_type);
2532		break;
2533	}
2534
2535	return reg_offset;
2536}
2537
2538static const struct net_device_ops sh_eth_netdev_ops = {
2539	.ndo_open		= sh_eth_open,
2540	.ndo_stop		= sh_eth_close,
2541	.ndo_start_xmit		= sh_eth_start_xmit,
2542	.ndo_get_stats		= sh_eth_get_stats,
2543#if defined(SH_ETH_HAS_TSU)
2544	.ndo_set_rx_mode	= sh_eth_set_multicast_list,
2545	.ndo_vlan_rx_add_vid	= sh_eth_vlan_rx_add_vid,
2546	.ndo_vlan_rx_kill_vid	= sh_eth_vlan_rx_kill_vid,
2547#endif
2548	.ndo_tx_timeout		= sh_eth_tx_timeout,
2549	.ndo_do_ioctl		= sh_eth_do_ioctl,
2550	.ndo_validate_addr	= eth_validate_addr,
2551	.ndo_set_mac_address	= eth_mac_addr,
2552	.ndo_change_mtu		= eth_change_mtu,
2553};
2554
2555static int sh_eth_drv_probe(struct platform_device *pdev)
2556{
2557	int ret, devno = 0;
2558	struct resource *res;
2559	struct net_device *ndev = NULL;
2560	struct sh_eth_private *mdp = NULL;
2561	struct sh_eth_plat_data *pd = pdev->dev.platform_data;
2562
2563	/* get base addr */
2564	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2565	if (unlikely(res == NULL)) {
2566		dev_err(&pdev->dev, "invalid resource\n");
2567		ret = -EINVAL;
2568		goto out;
2569	}
2570
2571	ndev = alloc_etherdev(sizeof(struct sh_eth_private));
2572	if (!ndev) {
2573		ret = -ENOMEM;
2574		goto out;
2575	}
2576
2577	/* The sh Ether-specific entries in the device structure. */
2578	ndev->base_addr = res->start;
2579	devno = pdev->id;
2580	if (devno < 0)
2581		devno = 0;
2582
2583	ndev->dma = -1;
2584	ret = platform_get_irq(pdev, 0);
2585	if (ret < 0) {
2586		ret = -ENODEV;
2587		goto out_release;
2588	}
2589	ndev->irq = ret;
2590
2591	SET_NETDEV_DEV(ndev, &pdev->dev);
2592
2593	/* Fill in the fields of the device structure with ethernet values. */
2594	ether_setup(ndev);
2595
2596	mdp = netdev_priv(ndev);
2597	mdp->num_tx_ring = TX_RING_SIZE;
2598	mdp->num_rx_ring = RX_RING_SIZE;
2599	mdp->addr = devm_ioremap_resource(&pdev->dev, res);
2600	if (IS_ERR(mdp->addr)) {
2601		ret = PTR_ERR(mdp->addr);
2602		goto out_release;
2603	}
2604
2605	spin_lock_init(&mdp->lock);
2606	mdp->pdev = pdev;
2607	pm_runtime_enable(&pdev->dev);
2608	pm_runtime_resume(&pdev->dev);
2609
2610	/* get PHY ID */
2611	mdp->phy_id = pd->phy;
2612	mdp->phy_interface = pd->phy_interface;
2613	/* EDMAC endian */
2614	mdp->edmac_endian = pd->edmac_endian;
2615	mdp->no_ether_link = pd->no_ether_link;
2616	mdp->ether_link_active_low = pd->ether_link_active_low;
2617	mdp->reg_offset = sh_eth_get_register_offset(pd->register_type);
2618
2619	/* set cpu data */
2620#if defined(SH_ETH_HAS_BOTH_MODULES)
2621	mdp->cd = sh_eth_get_cpu_data(mdp);
2622#else
2623	mdp->cd = &sh_eth_my_cpu_data;
2624#endif
2625	sh_eth_set_default_cpu_data(mdp->cd);
2626
2627	/* set function */
2628	ndev->netdev_ops = &sh_eth_netdev_ops;
2629	SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops);
2630	ndev->watchdog_timeo = TX_TIMEOUT;
2631
2632	/* debug message level */
2633	mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
2634
2635	/* read and set MAC address */
2636	read_mac_address(ndev, pd->mac_addr);
2637
2638	/* ioremap the TSU registers */
2639	if (mdp->cd->tsu) {
2640		struct resource *rtsu;
2641		rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2642		if (!rtsu) {
2643			dev_err(&pdev->dev, "Not found TSU resource\n");
2644			ret = -ENODEV;
2645			goto out_release;
2646		}
2647		mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu);
2648		if (IS_ERR(mdp->tsu_addr)) {
2649			ret = PTR_ERR(mdp->tsu_addr);
2650			goto out_release;
2651		}
2652		mdp->port = devno % 2;
2653		ndev->features = NETIF_F_HW_VLAN_FILTER;
2654	}
2655
2656	/* initialize first or needed device */
2657	if (!devno || pd->needs_init) {
2658		if (mdp->cd->chip_reset)
2659			mdp->cd->chip_reset(ndev);
2660
2661		if (mdp->cd->tsu) {
2662			/* TSU init (Init only)*/
2663			sh_eth_tsu_init(mdp);
2664		}
2665	}
2666
2667	/* network device register */
2668	ret = register_netdev(ndev);
2669	if (ret)
2670		goto out_release;
2671
2672	/* mdio bus init */
2673	ret = sh_mdio_init(ndev, pdev->id, pd);
2674	if (ret)
2675		goto out_unregister;
2676
2677	/* print device information */
2678	pr_info("Base address at 0x%x, %pM, IRQ %d.\n",
2679	       (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
2680
2681	platform_set_drvdata(pdev, ndev);
2682
2683	return ret;
2684
2685out_unregister:
2686	unregister_netdev(ndev);
2687
2688out_release:
2689	/* net_dev free */
2690	if (ndev)
2691		free_netdev(ndev);
2692
2693out:
2694	return ret;
2695}
2696
2697static int sh_eth_drv_remove(struct platform_device *pdev)
2698{
2699	struct net_device *ndev = platform_get_drvdata(pdev);
2700
2701	sh_mdio_release(ndev);
2702	unregister_netdev(ndev);
2703	pm_runtime_disable(&pdev->dev);
2704	free_netdev(ndev);
2705	platform_set_drvdata(pdev, NULL);
2706
2707	return 0;
2708}
2709
2710static int sh_eth_runtime_nop(struct device *dev)
2711{
2712	/*
2713	 * Runtime PM callback shared between ->runtime_suspend()
2714	 * and ->runtime_resume(). Simply returns success.
2715	 *
2716	 * This driver re-initializes all registers after
2717	 * pm_runtime_get_sync() anyway so there is no need
2718	 * to save and restore registers here.
2719	 */
2720	return 0;
2721}
2722
2723static struct dev_pm_ops sh_eth_dev_pm_ops = {
2724	.runtime_suspend = sh_eth_runtime_nop,
2725	.runtime_resume = sh_eth_runtime_nop,
2726};
2727
2728static struct platform_driver sh_eth_driver = {
2729	.probe = sh_eth_drv_probe,
2730	.remove = sh_eth_drv_remove,
2731	.driver = {
2732		   .name = CARDNAME,
2733		   .pm = &sh_eth_dev_pm_ops,
2734	},
2735};
2736
2737module_platform_driver(sh_eth_driver);
2738
2739MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
2740MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
2741MODULE_LICENSE("GPL v2");
2742