sh_eth.c revision 530aa2d0d9d55ab2775d47621ddf4b5b15bc1110
1/*  SuperH Ethernet device driver
2 *
3 *  Copyright (C) 2006-2012 Nobuhiro Iwamatsu
4 *  Copyright (C) 2008-2014 Renesas Solutions Corp.
5 *  Copyright (C) 2013-2014 Cogent Embedded, Inc.
6 *  Copyright (C) 2014 Codethink Limited
7 *
8 *  This program is free software; you can redistribute it and/or modify it
9 *  under the terms and conditions of the GNU General Public License,
10 *  version 2, as published by the Free Software Foundation.
11 *
12 *  This program is distributed in the hope it will be useful, but WITHOUT
13 *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15 *  more details.
16 *
17 *  The full GNU General Public License is included in this distribution in
18 *  the file called "COPYING".
19 */
20
21#include <linux/module.h>
22#include <linux/kernel.h>
23#include <linux/spinlock.h>
24#include <linux/interrupt.h>
25#include <linux/dma-mapping.h>
26#include <linux/etherdevice.h>
27#include <linux/delay.h>
28#include <linux/platform_device.h>
29#include <linux/mdio-bitbang.h>
30#include <linux/netdevice.h>
31#include <linux/of.h>
32#include <linux/of_device.h>
33#include <linux/of_irq.h>
34#include <linux/of_net.h>
35#include <linux/phy.h>
36#include <linux/cache.h>
37#include <linux/io.h>
38#include <linux/pm_runtime.h>
39#include <linux/slab.h>
40#include <linux/ethtool.h>
41#include <linux/if_vlan.h>
42#include <linux/clk.h>
43#include <linux/sh_eth.h>
44#include <linux/of_mdio.h>
45
46#include "sh_eth.h"
47
48#define SH_ETH_DEF_MSG_ENABLE \
49		(NETIF_MSG_LINK	| \
50		NETIF_MSG_TIMER	| \
51		NETIF_MSG_RX_ERR| \
52		NETIF_MSG_TX_ERR)
53
54static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
55	[EDSR]		= 0x0000,
56	[EDMR]		= 0x0400,
57	[EDTRR]		= 0x0408,
58	[EDRRR]		= 0x0410,
59	[EESR]		= 0x0428,
60	[EESIPR]	= 0x0430,
61	[TDLAR]		= 0x0010,
62	[TDFAR]		= 0x0014,
63	[TDFXR]		= 0x0018,
64	[TDFFR]		= 0x001c,
65	[RDLAR]		= 0x0030,
66	[RDFAR]		= 0x0034,
67	[RDFXR]		= 0x0038,
68	[RDFFR]		= 0x003c,
69	[TRSCER]	= 0x0438,
70	[RMFCR]		= 0x0440,
71	[TFTR]		= 0x0448,
72	[FDR]		= 0x0450,
73	[RMCR]		= 0x0458,
74	[RPADIR]	= 0x0460,
75	[FCFTR]		= 0x0468,
76	[CSMR]		= 0x04E4,
77
78	[ECMR]		= 0x0500,
79	[ECSR]		= 0x0510,
80	[ECSIPR]	= 0x0518,
81	[PIR]		= 0x0520,
82	[PSR]		= 0x0528,
83	[PIPR]		= 0x052c,
84	[RFLR]		= 0x0508,
85	[APR]		= 0x0554,
86	[MPR]		= 0x0558,
87	[PFTCR]		= 0x055c,
88	[PFRCR]		= 0x0560,
89	[TPAUSER]	= 0x0564,
90	[GECMR]		= 0x05b0,
91	[BCULR]		= 0x05b4,
92	[MAHR]		= 0x05c0,
93	[MALR]		= 0x05c8,
94	[TROCR]		= 0x0700,
95	[CDCR]		= 0x0708,
96	[LCCR]		= 0x0710,
97	[CEFCR]		= 0x0740,
98	[FRECR]		= 0x0748,
99	[TSFRCR]	= 0x0750,
100	[TLFRCR]	= 0x0758,
101	[RFCR]		= 0x0760,
102	[CERCR]		= 0x0768,
103	[CEECR]		= 0x0770,
104	[MAFCR]		= 0x0778,
105	[RMII_MII]	= 0x0790,
106
107	[ARSTR]		= 0x0000,
108	[TSU_CTRST]	= 0x0004,
109	[TSU_FWEN0]	= 0x0010,
110	[TSU_FWEN1]	= 0x0014,
111	[TSU_FCM]	= 0x0018,
112	[TSU_BSYSL0]	= 0x0020,
113	[TSU_BSYSL1]	= 0x0024,
114	[TSU_PRISL0]	= 0x0028,
115	[TSU_PRISL1]	= 0x002c,
116	[TSU_FWSL0]	= 0x0030,
117	[TSU_FWSL1]	= 0x0034,
118	[TSU_FWSLC]	= 0x0038,
119	[TSU_QTAG0]	= 0x0040,
120	[TSU_QTAG1]	= 0x0044,
121	[TSU_FWSR]	= 0x0050,
122	[TSU_FWINMK]	= 0x0054,
123	[TSU_ADQT0]	= 0x0048,
124	[TSU_ADQT1]	= 0x004c,
125	[TSU_VTAG0]	= 0x0058,
126	[TSU_VTAG1]	= 0x005c,
127	[TSU_ADSBSY]	= 0x0060,
128	[TSU_TEN]	= 0x0064,
129	[TSU_POST1]	= 0x0070,
130	[TSU_POST2]	= 0x0074,
131	[TSU_POST3]	= 0x0078,
132	[TSU_POST4]	= 0x007c,
133	[TSU_ADRH0]	= 0x0100,
134	[TSU_ADRL0]	= 0x0104,
135	[TSU_ADRH31]	= 0x01f8,
136	[TSU_ADRL31]	= 0x01fc,
137
138	[TXNLCR0]	= 0x0080,
139	[TXALCR0]	= 0x0084,
140	[RXNLCR0]	= 0x0088,
141	[RXALCR0]	= 0x008c,
142	[FWNLCR0]	= 0x0090,
143	[FWALCR0]	= 0x0094,
144	[TXNLCR1]	= 0x00a0,
145	[TXALCR1]	= 0x00a0,
146	[RXNLCR1]	= 0x00a8,
147	[RXALCR1]	= 0x00ac,
148	[FWNLCR1]	= 0x00b0,
149	[FWALCR1]	= 0x00b4,
150};
151
152static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
153	[EDSR]		= 0x0000,
154	[EDMR]		= 0x0400,
155	[EDTRR]		= 0x0408,
156	[EDRRR]		= 0x0410,
157	[EESR]		= 0x0428,
158	[EESIPR]	= 0x0430,
159	[TDLAR]		= 0x0010,
160	[TDFAR]		= 0x0014,
161	[TDFXR]		= 0x0018,
162	[TDFFR]		= 0x001c,
163	[RDLAR]		= 0x0030,
164	[RDFAR]		= 0x0034,
165	[RDFXR]		= 0x0038,
166	[RDFFR]		= 0x003c,
167	[TRSCER]	= 0x0438,
168	[RMFCR]		= 0x0440,
169	[TFTR]		= 0x0448,
170	[FDR]		= 0x0450,
171	[RMCR]		= 0x0458,
172	[RPADIR]	= 0x0460,
173	[FCFTR]		= 0x0468,
174	[CSMR]		= 0x04E4,
175
176	[ECMR]		= 0x0500,
177	[RFLR]		= 0x0508,
178	[ECSR]		= 0x0510,
179	[ECSIPR]	= 0x0518,
180	[PIR]		= 0x0520,
181	[APR]		= 0x0554,
182	[MPR]		= 0x0558,
183	[PFTCR]		= 0x055c,
184	[PFRCR]		= 0x0560,
185	[TPAUSER]	= 0x0564,
186	[MAHR]		= 0x05c0,
187	[MALR]		= 0x05c8,
188	[CEFCR]		= 0x0740,
189	[FRECR]		= 0x0748,
190	[TSFRCR]	= 0x0750,
191	[TLFRCR]	= 0x0758,
192	[RFCR]		= 0x0760,
193	[MAFCR]		= 0x0778,
194
195	[ARSTR]		= 0x0000,
196	[TSU_CTRST]	= 0x0004,
197	[TSU_VTAG0]	= 0x0058,
198	[TSU_ADSBSY]	= 0x0060,
199	[TSU_TEN]	= 0x0064,
200	[TSU_ADRH0]	= 0x0100,
201	[TSU_ADRL0]	= 0x0104,
202	[TSU_ADRH31]	= 0x01f8,
203	[TSU_ADRL31]	= 0x01fc,
204
205	[TXNLCR0]	= 0x0080,
206	[TXALCR0]	= 0x0084,
207	[RXNLCR0]	= 0x0088,
208	[RXALCR0]	= 0x008C,
209};
210
211static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
212	[ECMR]		= 0x0300,
213	[RFLR]		= 0x0308,
214	[ECSR]		= 0x0310,
215	[ECSIPR]	= 0x0318,
216	[PIR]		= 0x0320,
217	[PSR]		= 0x0328,
218	[RDMLR]		= 0x0340,
219	[IPGR]		= 0x0350,
220	[APR]		= 0x0354,
221	[MPR]		= 0x0358,
222	[RFCF]		= 0x0360,
223	[TPAUSER]	= 0x0364,
224	[TPAUSECR]	= 0x0368,
225	[MAHR]		= 0x03c0,
226	[MALR]		= 0x03c8,
227	[TROCR]		= 0x03d0,
228	[CDCR]		= 0x03d4,
229	[LCCR]		= 0x03d8,
230	[CNDCR]		= 0x03dc,
231	[CEFCR]		= 0x03e4,
232	[FRECR]		= 0x03e8,
233	[TSFRCR]	= 0x03ec,
234	[TLFRCR]	= 0x03f0,
235	[RFCR]		= 0x03f4,
236	[MAFCR]		= 0x03f8,
237
238	[EDMR]		= 0x0200,
239	[EDTRR]		= 0x0208,
240	[EDRRR]		= 0x0210,
241	[TDLAR]		= 0x0218,
242	[RDLAR]		= 0x0220,
243	[EESR]		= 0x0228,
244	[EESIPR]	= 0x0230,
245	[TRSCER]	= 0x0238,
246	[RMFCR]		= 0x0240,
247	[TFTR]		= 0x0248,
248	[FDR]		= 0x0250,
249	[RMCR]		= 0x0258,
250	[TFUCR]		= 0x0264,
251	[RFOCR]		= 0x0268,
252	[RMIIMODE]      = 0x026c,
253	[FCFTR]		= 0x0270,
254	[TRIMD]		= 0x027c,
255};
256
257static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
258	[ECMR]		= 0x0100,
259	[RFLR]		= 0x0108,
260	[ECSR]		= 0x0110,
261	[ECSIPR]	= 0x0118,
262	[PIR]		= 0x0120,
263	[PSR]		= 0x0128,
264	[RDMLR]		= 0x0140,
265	[IPGR]		= 0x0150,
266	[APR]		= 0x0154,
267	[MPR]		= 0x0158,
268	[TPAUSER]	= 0x0164,
269	[RFCF]		= 0x0160,
270	[TPAUSECR]	= 0x0168,
271	[BCFRR]		= 0x016c,
272	[MAHR]		= 0x01c0,
273	[MALR]		= 0x01c8,
274	[TROCR]		= 0x01d0,
275	[CDCR]		= 0x01d4,
276	[LCCR]		= 0x01d8,
277	[CNDCR]		= 0x01dc,
278	[CEFCR]		= 0x01e4,
279	[FRECR]		= 0x01e8,
280	[TSFRCR]	= 0x01ec,
281	[TLFRCR]	= 0x01f0,
282	[RFCR]		= 0x01f4,
283	[MAFCR]		= 0x01f8,
284	[RTRATE]	= 0x01fc,
285
286	[EDMR]		= 0x0000,
287	[EDTRR]		= 0x0008,
288	[EDRRR]		= 0x0010,
289	[TDLAR]		= 0x0018,
290	[RDLAR]		= 0x0020,
291	[EESR]		= 0x0028,
292	[EESIPR]	= 0x0030,
293	[TRSCER]	= 0x0038,
294	[RMFCR]		= 0x0040,
295	[TFTR]		= 0x0048,
296	[FDR]		= 0x0050,
297	[RMCR]		= 0x0058,
298	[TFUCR]		= 0x0064,
299	[RFOCR]		= 0x0068,
300	[FCFTR]		= 0x0070,
301	[RPADIR]	= 0x0078,
302	[TRIMD]		= 0x007c,
303	[RBWAR]		= 0x00c8,
304	[RDFAR]		= 0x00cc,
305	[TBRAR]		= 0x00d4,
306	[TDFAR]		= 0x00d8,
307};
308
309static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
310	[ECMR]		= 0x0160,
311	[ECSR]		= 0x0164,
312	[ECSIPR]	= 0x0168,
313	[PIR]		= 0x016c,
314	[MAHR]		= 0x0170,
315	[MALR]		= 0x0174,
316	[RFLR]		= 0x0178,
317	[PSR]		= 0x017c,
318	[TROCR]		= 0x0180,
319	[CDCR]		= 0x0184,
320	[LCCR]		= 0x0188,
321	[CNDCR]		= 0x018c,
322	[CEFCR]		= 0x0194,
323	[FRECR]		= 0x0198,
324	[TSFRCR]	= 0x019c,
325	[TLFRCR]	= 0x01a0,
326	[RFCR]		= 0x01a4,
327	[MAFCR]		= 0x01a8,
328	[IPGR]		= 0x01b4,
329	[APR]		= 0x01b8,
330	[MPR]		= 0x01bc,
331	[TPAUSER]	= 0x01c4,
332	[BCFR]		= 0x01cc,
333
334	[ARSTR]		= 0x0000,
335	[TSU_CTRST]	= 0x0004,
336	[TSU_FWEN0]	= 0x0010,
337	[TSU_FWEN1]	= 0x0014,
338	[TSU_FCM]	= 0x0018,
339	[TSU_BSYSL0]	= 0x0020,
340	[TSU_BSYSL1]	= 0x0024,
341	[TSU_PRISL0]	= 0x0028,
342	[TSU_PRISL1]	= 0x002c,
343	[TSU_FWSL0]	= 0x0030,
344	[TSU_FWSL1]	= 0x0034,
345	[TSU_FWSLC]	= 0x0038,
346	[TSU_QTAGM0]	= 0x0040,
347	[TSU_QTAGM1]	= 0x0044,
348	[TSU_ADQT0]	= 0x0048,
349	[TSU_ADQT1]	= 0x004c,
350	[TSU_FWSR]	= 0x0050,
351	[TSU_FWINMK]	= 0x0054,
352	[TSU_ADSBSY]	= 0x0060,
353	[TSU_TEN]	= 0x0064,
354	[TSU_POST1]	= 0x0070,
355	[TSU_POST2]	= 0x0074,
356	[TSU_POST3]	= 0x0078,
357	[TSU_POST4]	= 0x007c,
358
359	[TXNLCR0]	= 0x0080,
360	[TXALCR0]	= 0x0084,
361	[RXNLCR0]	= 0x0088,
362	[RXALCR0]	= 0x008c,
363	[FWNLCR0]	= 0x0090,
364	[FWALCR0]	= 0x0094,
365	[TXNLCR1]	= 0x00a0,
366	[TXALCR1]	= 0x00a0,
367	[RXNLCR1]	= 0x00a8,
368	[RXALCR1]	= 0x00ac,
369	[FWNLCR1]	= 0x00b0,
370	[FWALCR1]	= 0x00b4,
371
372	[TSU_ADRH0]	= 0x0100,
373	[TSU_ADRL0]	= 0x0104,
374	[TSU_ADRL31]	= 0x01fc,
375};
376
377static bool sh_eth_is_gether(struct sh_eth_private *mdp)
378{
379	return mdp->reg_offset == sh_eth_offset_gigabit;
380}
381
382static bool sh_eth_is_rz_fast_ether(struct sh_eth_private *mdp)
383{
384	return mdp->reg_offset == sh_eth_offset_fast_rz;
385}
386
387static void sh_eth_select_mii(struct net_device *ndev)
388{
389	u32 value = 0x0;
390	struct sh_eth_private *mdp = netdev_priv(ndev);
391
392	switch (mdp->phy_interface) {
393	case PHY_INTERFACE_MODE_GMII:
394		value = 0x2;
395		break;
396	case PHY_INTERFACE_MODE_MII:
397		value = 0x1;
398		break;
399	case PHY_INTERFACE_MODE_RMII:
400		value = 0x0;
401		break;
402	default:
403		netdev_warn(ndev,
404			    "PHY interface mode was not setup. Set to MII.\n");
405		value = 0x1;
406		break;
407	}
408
409	sh_eth_write(ndev, value, RMII_MII);
410}
411
412static void sh_eth_set_duplex(struct net_device *ndev)
413{
414	struct sh_eth_private *mdp = netdev_priv(ndev);
415
416	if (mdp->duplex) /* Full */
417		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
418	else		/* Half */
419		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
420}
421
422/* There is CPU dependent code */
423static void sh_eth_set_rate_r8a777x(struct net_device *ndev)
424{
425	struct sh_eth_private *mdp = netdev_priv(ndev);
426
427	switch (mdp->speed) {
428	case 10: /* 10BASE */
429		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_ELB, ECMR);
430		break;
431	case 100:/* 100BASE */
432		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_ELB, ECMR);
433		break;
434	default:
435		break;
436	}
437}
438
439/* R8A7778/9 */
440static struct sh_eth_cpu_data r8a777x_data = {
441	.set_duplex	= sh_eth_set_duplex,
442	.set_rate	= sh_eth_set_rate_r8a777x,
443
444	.register_type	= SH_ETH_REG_FAST_RCAR,
445
446	.ecsr_value	= ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
447	.ecsipr_value	= ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
448	.eesipr_value	= 0x01ff009f,
449
450	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
451	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
452			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
453			  EESR_ECI,
454
455	.apr		= 1,
456	.mpr		= 1,
457	.tpauser	= 1,
458	.hw_swap	= 1,
459};
460
461/* R8A7790/1 */
462static struct sh_eth_cpu_data r8a779x_data = {
463	.set_duplex	= sh_eth_set_duplex,
464	.set_rate	= sh_eth_set_rate_r8a777x,
465
466	.register_type	= SH_ETH_REG_FAST_RCAR,
467
468	.ecsr_value	= ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
469	.ecsipr_value	= ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
470	.eesipr_value	= 0x01ff009f,
471
472	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
473	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
474			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
475			  EESR_ECI,
476
477	.apr		= 1,
478	.mpr		= 1,
479	.tpauser	= 1,
480	.hw_swap	= 1,
481	.rmiimode	= 1,
482	.shift_rd0	= 1,
483};
484
485static void sh_eth_set_rate_sh7724(struct net_device *ndev)
486{
487	struct sh_eth_private *mdp = netdev_priv(ndev);
488
489	switch (mdp->speed) {
490	case 10: /* 10BASE */
491		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR);
492		break;
493	case 100:/* 100BASE */
494		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR);
495		break;
496	default:
497		break;
498	}
499}
500
501/* SH7724 */
502static struct sh_eth_cpu_data sh7724_data = {
503	.set_duplex	= sh_eth_set_duplex,
504	.set_rate	= sh_eth_set_rate_sh7724,
505
506	.register_type	= SH_ETH_REG_FAST_SH4,
507
508	.ecsr_value	= ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
509	.ecsipr_value	= ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
510	.eesipr_value	= 0x01ff009f,
511
512	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
513	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
514			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
515			  EESR_ECI,
516
517	.apr		= 1,
518	.mpr		= 1,
519	.tpauser	= 1,
520	.hw_swap	= 1,
521	.rpadir		= 1,
522	.rpadir_value	= 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
523};
524
525static void sh_eth_set_rate_sh7757(struct net_device *ndev)
526{
527	struct sh_eth_private *mdp = netdev_priv(ndev);
528
529	switch (mdp->speed) {
530	case 10: /* 10BASE */
531		sh_eth_write(ndev, 0, RTRATE);
532		break;
533	case 100:/* 100BASE */
534		sh_eth_write(ndev, 1, RTRATE);
535		break;
536	default:
537		break;
538	}
539}
540
541/* SH7757 */
542static struct sh_eth_cpu_data sh7757_data = {
543	.set_duplex	= sh_eth_set_duplex,
544	.set_rate	= sh_eth_set_rate_sh7757,
545
546	.register_type	= SH_ETH_REG_FAST_SH4,
547
548	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
549
550	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
551	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
552			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
553			  EESR_ECI,
554
555	.irq_flags	= IRQF_SHARED,
556	.apr		= 1,
557	.mpr		= 1,
558	.tpauser	= 1,
559	.hw_swap	= 1,
560	.no_ade		= 1,
561	.rpadir		= 1,
562	.rpadir_value   = 2 << 16,
563};
564
565#define SH_GIGA_ETH_BASE	0xfee00000UL
566#define GIGA_MALR(port)		(SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
567#define GIGA_MAHR(port)		(SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
568static void sh_eth_chip_reset_giga(struct net_device *ndev)
569{
570	int i;
571	unsigned long mahr[2], malr[2];
572
573	/* save MAHR and MALR */
574	for (i = 0; i < 2; i++) {
575		malr[i] = ioread32((void *)GIGA_MALR(i));
576		mahr[i] = ioread32((void *)GIGA_MAHR(i));
577	}
578
579	/* reset device */
580	iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800));
581	mdelay(1);
582
583	/* restore MAHR and MALR */
584	for (i = 0; i < 2; i++) {
585		iowrite32(malr[i], (void *)GIGA_MALR(i));
586		iowrite32(mahr[i], (void *)GIGA_MAHR(i));
587	}
588}
589
590static void sh_eth_set_rate_giga(struct net_device *ndev)
591{
592	struct sh_eth_private *mdp = netdev_priv(ndev);
593
594	switch (mdp->speed) {
595	case 10: /* 10BASE */
596		sh_eth_write(ndev, 0x00000000, GECMR);
597		break;
598	case 100:/* 100BASE */
599		sh_eth_write(ndev, 0x00000010, GECMR);
600		break;
601	case 1000: /* 1000BASE */
602		sh_eth_write(ndev, 0x00000020, GECMR);
603		break;
604	default:
605		break;
606	}
607}
608
609/* SH7757(GETHERC) */
610static struct sh_eth_cpu_data sh7757_data_giga = {
611	.chip_reset	= sh_eth_chip_reset_giga,
612	.set_duplex	= sh_eth_set_duplex,
613	.set_rate	= sh_eth_set_rate_giga,
614
615	.register_type	= SH_ETH_REG_GIGABIT,
616
617	.ecsr_value	= ECSR_ICD | ECSR_MPD,
618	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
619	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
620
621	.tx_check	= EESR_TC1 | EESR_FTC,
622	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
623			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
624			  EESR_TDE | EESR_ECI,
625	.fdr_value	= 0x0000072f,
626
627	.irq_flags	= IRQF_SHARED,
628	.apr		= 1,
629	.mpr		= 1,
630	.tpauser	= 1,
631	.bculr		= 1,
632	.hw_swap	= 1,
633	.rpadir		= 1,
634	.rpadir_value   = 2 << 16,
635	.no_trimd	= 1,
636	.no_ade		= 1,
637	.tsu		= 1,
638};
639
640static void sh_eth_chip_reset(struct net_device *ndev)
641{
642	struct sh_eth_private *mdp = netdev_priv(ndev);
643
644	/* reset device */
645	sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
646	mdelay(1);
647}
648
649static void sh_eth_set_rate_gether(struct net_device *ndev)
650{
651	struct sh_eth_private *mdp = netdev_priv(ndev);
652
653	switch (mdp->speed) {
654	case 10: /* 10BASE */
655		sh_eth_write(ndev, GECMR_10, GECMR);
656		break;
657	case 100:/* 100BASE */
658		sh_eth_write(ndev, GECMR_100, GECMR);
659		break;
660	case 1000: /* 1000BASE */
661		sh_eth_write(ndev, GECMR_1000, GECMR);
662		break;
663	default:
664		break;
665	}
666}
667
668/* SH7734 */
669static struct sh_eth_cpu_data sh7734_data = {
670	.chip_reset	= sh_eth_chip_reset,
671	.set_duplex	= sh_eth_set_duplex,
672	.set_rate	= sh_eth_set_rate_gether,
673
674	.register_type	= SH_ETH_REG_GIGABIT,
675
676	.ecsr_value	= ECSR_ICD | ECSR_MPD,
677	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
678	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
679
680	.tx_check	= EESR_TC1 | EESR_FTC,
681	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
682			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
683			  EESR_TDE | EESR_ECI,
684
685	.apr		= 1,
686	.mpr		= 1,
687	.tpauser	= 1,
688	.bculr		= 1,
689	.hw_swap	= 1,
690	.no_trimd	= 1,
691	.no_ade		= 1,
692	.tsu		= 1,
693	.hw_crc		= 1,
694	.select_mii	= 1,
695};
696
697/* SH7763 */
698static struct sh_eth_cpu_data sh7763_data = {
699	.chip_reset	= sh_eth_chip_reset,
700	.set_duplex	= sh_eth_set_duplex,
701	.set_rate	= sh_eth_set_rate_gether,
702
703	.register_type	= SH_ETH_REG_GIGABIT,
704
705	.ecsr_value	= ECSR_ICD | ECSR_MPD,
706	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
707	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
708
709	.tx_check	= EESR_TC1 | EESR_FTC,
710	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
711			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
712			  EESR_ECI,
713
714	.apr		= 1,
715	.mpr		= 1,
716	.tpauser	= 1,
717	.bculr		= 1,
718	.hw_swap	= 1,
719	.no_trimd	= 1,
720	.no_ade		= 1,
721	.tsu		= 1,
722	.irq_flags	= IRQF_SHARED,
723};
724
725static void sh_eth_chip_reset_r8a7740(struct net_device *ndev)
726{
727	struct sh_eth_private *mdp = netdev_priv(ndev);
728
729	/* reset device */
730	sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
731	mdelay(1);
732
733	sh_eth_select_mii(ndev);
734}
735
736/* R8A7740 */
737static struct sh_eth_cpu_data r8a7740_data = {
738	.chip_reset	= sh_eth_chip_reset_r8a7740,
739	.set_duplex	= sh_eth_set_duplex,
740	.set_rate	= sh_eth_set_rate_gether,
741
742	.register_type	= SH_ETH_REG_GIGABIT,
743
744	.ecsr_value	= ECSR_ICD | ECSR_MPD,
745	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
746	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
747
748	.tx_check	= EESR_TC1 | EESR_FTC,
749	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
750			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
751			  EESR_TDE | EESR_ECI,
752	.fdr_value	= 0x0000070f,
753
754	.apr		= 1,
755	.mpr		= 1,
756	.tpauser	= 1,
757	.bculr		= 1,
758	.hw_swap	= 1,
759	.rpadir		= 1,
760	.rpadir_value   = 2 << 16,
761	.no_trimd	= 1,
762	.no_ade		= 1,
763	.tsu		= 1,
764	.select_mii	= 1,
765	.shift_rd0	= 1,
766};
767
768/* R7S72100 */
769static struct sh_eth_cpu_data r7s72100_data = {
770	.chip_reset	= sh_eth_chip_reset,
771	.set_duplex	= sh_eth_set_duplex,
772
773	.register_type	= SH_ETH_REG_FAST_RZ,
774
775	.ecsr_value	= ECSR_ICD,
776	.ecsipr_value	= ECSIPR_ICDIP,
777	.eesipr_value	= 0xff7f009f,
778
779	.tx_check	= EESR_TC1 | EESR_FTC,
780	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
781			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
782			  EESR_TDE | EESR_ECI,
783	.fdr_value	= 0x0000070f,
784
785	.no_psr		= 1,
786	.apr		= 1,
787	.mpr		= 1,
788	.tpauser	= 1,
789	.hw_swap	= 1,
790	.rpadir		= 1,
791	.rpadir_value   = 2 << 16,
792	.no_trimd	= 1,
793	.no_ade		= 1,
794	.hw_crc		= 1,
795	.tsu		= 1,
796	.shift_rd0	= 1,
797};
798
799static struct sh_eth_cpu_data sh7619_data = {
800	.register_type	= SH_ETH_REG_FAST_SH3_SH2,
801
802	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
803
804	.apr		= 1,
805	.mpr		= 1,
806	.tpauser	= 1,
807	.hw_swap	= 1,
808};
809
810static struct sh_eth_cpu_data sh771x_data = {
811	.register_type	= SH_ETH_REG_FAST_SH3_SH2,
812
813	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
814	.tsu		= 1,
815};
816
817static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
818{
819	if (!cd->ecsr_value)
820		cd->ecsr_value = DEFAULT_ECSR_INIT;
821
822	if (!cd->ecsipr_value)
823		cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
824
825	if (!cd->fcftr_value)
826		cd->fcftr_value = DEFAULT_FIFO_F_D_RFF |
827				  DEFAULT_FIFO_F_D_RFD;
828
829	if (!cd->fdr_value)
830		cd->fdr_value = DEFAULT_FDR_INIT;
831
832	if (!cd->tx_check)
833		cd->tx_check = DEFAULT_TX_CHECK;
834
835	if (!cd->eesr_err_check)
836		cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
837}
838
839static int sh_eth_check_reset(struct net_device *ndev)
840{
841	int ret = 0;
842	int cnt = 100;
843
844	while (cnt > 0) {
845		if (!(sh_eth_read(ndev, EDMR) & 0x3))
846			break;
847		mdelay(1);
848		cnt--;
849	}
850	if (cnt <= 0) {
851		netdev_err(ndev, "Device reset failed\n");
852		ret = -ETIMEDOUT;
853	}
854	return ret;
855}
856
857static int sh_eth_reset(struct net_device *ndev)
858{
859	struct sh_eth_private *mdp = netdev_priv(ndev);
860	int ret = 0;
861
862	if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp)) {
863		sh_eth_write(ndev, EDSR_ENALL, EDSR);
864		sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
865			     EDMR);
866
867		ret = sh_eth_check_reset(ndev);
868		if (ret)
869			return ret;
870
871		/* Table Init */
872		sh_eth_write(ndev, 0x0, TDLAR);
873		sh_eth_write(ndev, 0x0, TDFAR);
874		sh_eth_write(ndev, 0x0, TDFXR);
875		sh_eth_write(ndev, 0x0, TDFFR);
876		sh_eth_write(ndev, 0x0, RDLAR);
877		sh_eth_write(ndev, 0x0, RDFAR);
878		sh_eth_write(ndev, 0x0, RDFXR);
879		sh_eth_write(ndev, 0x0, RDFFR);
880
881		/* Reset HW CRC register */
882		if (mdp->cd->hw_crc)
883			sh_eth_write(ndev, 0x0, CSMR);
884
885		/* Select MII mode */
886		if (mdp->cd->select_mii)
887			sh_eth_select_mii(ndev);
888	} else {
889		sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER,
890			     EDMR);
891		mdelay(3);
892		sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
893			     EDMR);
894	}
895
896	return ret;
897}
898
899#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
900static void sh_eth_set_receive_align(struct sk_buff *skb)
901{
902	int reserve;
903
904	reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1));
905	if (reserve)
906		skb_reserve(skb, reserve);
907}
908#else
909static void sh_eth_set_receive_align(struct sk_buff *skb)
910{
911	skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN);
912}
913#endif
914
915
916/* CPU <-> EDMAC endian convert */
917static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x)
918{
919	switch (mdp->edmac_endian) {
920	case EDMAC_LITTLE_ENDIAN:
921		return cpu_to_le32(x);
922	case EDMAC_BIG_ENDIAN:
923		return cpu_to_be32(x);
924	}
925	return x;
926}
927
928static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
929{
930	switch (mdp->edmac_endian) {
931	case EDMAC_LITTLE_ENDIAN:
932		return le32_to_cpu(x);
933	case EDMAC_BIG_ENDIAN:
934		return be32_to_cpu(x);
935	}
936	return x;
937}
938
939/* Program the hardware MAC address from dev->dev_addr. */
940static void update_mac_address(struct net_device *ndev)
941{
942	sh_eth_write(ndev,
943		     (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
944		     (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
945	sh_eth_write(ndev,
946		     (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
947}
948
949/* Get MAC address from SuperH MAC address register
950 *
951 * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
952 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
953 * When you want use this device, you must set MAC address in bootloader.
954 *
955 */
956static void read_mac_address(struct net_device *ndev, unsigned char *mac)
957{
958	if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
959		memcpy(ndev->dev_addr, mac, ETH_ALEN);
960	} else {
961		ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24);
962		ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF;
963		ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF;
964		ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF);
965		ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF;
966		ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF);
967	}
968}
969
970static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
971{
972	if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp))
973		return EDTRR_TRNS_GETHER;
974	else
975		return EDTRR_TRNS_ETHER;
976}
977
978struct bb_info {
979	void (*set_gate)(void *addr);
980	struct mdiobb_ctrl ctrl;
981	void *addr;
982	u32 mmd_msk;/* MMD */
983	u32 mdo_msk;
984	u32 mdi_msk;
985	u32 mdc_msk;
986};
987
988/* PHY bit set */
989static void bb_set(void *addr, u32 msk)
990{
991	iowrite32(ioread32(addr) | msk, addr);
992}
993
994/* PHY bit clear */
995static void bb_clr(void *addr, u32 msk)
996{
997	iowrite32((ioread32(addr) & ~msk), addr);
998}
999
1000/* PHY bit read */
1001static int bb_read(void *addr, u32 msk)
1002{
1003	return (ioread32(addr) & msk) != 0;
1004}
1005
1006/* Data I/O pin control */
1007static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
1008{
1009	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1010
1011	if (bitbang->set_gate)
1012		bitbang->set_gate(bitbang->addr);
1013
1014	if (bit)
1015		bb_set(bitbang->addr, bitbang->mmd_msk);
1016	else
1017		bb_clr(bitbang->addr, bitbang->mmd_msk);
1018}
1019
1020/* Set bit data*/
1021static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
1022{
1023	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1024
1025	if (bitbang->set_gate)
1026		bitbang->set_gate(bitbang->addr);
1027
1028	if (bit)
1029		bb_set(bitbang->addr, bitbang->mdo_msk);
1030	else
1031		bb_clr(bitbang->addr, bitbang->mdo_msk);
1032}
1033
1034/* Get bit data*/
1035static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
1036{
1037	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1038
1039	if (bitbang->set_gate)
1040		bitbang->set_gate(bitbang->addr);
1041
1042	return bb_read(bitbang->addr, bitbang->mdi_msk);
1043}
1044
1045/* MDC pin control */
1046static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
1047{
1048	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1049
1050	if (bitbang->set_gate)
1051		bitbang->set_gate(bitbang->addr);
1052
1053	if (bit)
1054		bb_set(bitbang->addr, bitbang->mdc_msk);
1055	else
1056		bb_clr(bitbang->addr, bitbang->mdc_msk);
1057}
1058
1059/* mdio bus control struct */
1060static struct mdiobb_ops bb_ops = {
1061	.owner = THIS_MODULE,
1062	.set_mdc = sh_mdc_ctrl,
1063	.set_mdio_dir = sh_mmd_ctrl,
1064	.set_mdio_data = sh_set_mdio,
1065	.get_mdio_data = sh_get_mdio,
1066};
1067
1068/* free skb and descriptor buffer */
1069static void sh_eth_ring_free(struct net_device *ndev)
1070{
1071	struct sh_eth_private *mdp = netdev_priv(ndev);
1072	int i;
1073
1074	/* Free Rx skb ringbuffer */
1075	if (mdp->rx_skbuff) {
1076		for (i = 0; i < mdp->num_rx_ring; i++) {
1077			if (mdp->rx_skbuff[i])
1078				dev_kfree_skb(mdp->rx_skbuff[i]);
1079		}
1080	}
1081	kfree(mdp->rx_skbuff);
1082	mdp->rx_skbuff = NULL;
1083
1084	/* Free Tx skb ringbuffer */
1085	if (mdp->tx_skbuff) {
1086		for (i = 0; i < mdp->num_tx_ring; i++) {
1087			if (mdp->tx_skbuff[i])
1088				dev_kfree_skb(mdp->tx_skbuff[i]);
1089		}
1090	}
1091	kfree(mdp->tx_skbuff);
1092	mdp->tx_skbuff = NULL;
1093}
1094
1095/* format skb and descriptor buffer */
1096static void sh_eth_ring_format(struct net_device *ndev)
1097{
1098	struct sh_eth_private *mdp = netdev_priv(ndev);
1099	int i;
1100	struct sk_buff *skb;
1101	struct sh_eth_rxdesc *rxdesc = NULL;
1102	struct sh_eth_txdesc *txdesc = NULL;
1103	int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
1104	int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
1105
1106	mdp->cur_rx = 0;
1107	mdp->cur_tx = 0;
1108	mdp->dirty_rx = 0;
1109	mdp->dirty_tx = 0;
1110
1111	memset(mdp->rx_ring, 0, rx_ringsize);
1112
1113	/* build Rx ring buffer */
1114	for (i = 0; i < mdp->num_rx_ring; i++) {
1115		/* skb */
1116		mdp->rx_skbuff[i] = NULL;
1117		skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
1118		mdp->rx_skbuff[i] = skb;
1119		if (skb == NULL)
1120			break;
1121		dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
1122			       DMA_FROM_DEVICE);
1123		sh_eth_set_receive_align(skb);
1124
1125		/* RX descriptor */
1126		rxdesc = &mdp->rx_ring[i];
1127		rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
1128		rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
1129
1130		/* The size of the buffer is 16 byte boundary. */
1131		rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1132		/* Rx descriptor address set */
1133		if (i == 0) {
1134			sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
1135			if (sh_eth_is_gether(mdp) ||
1136			    sh_eth_is_rz_fast_ether(mdp))
1137				sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
1138		}
1139	}
1140
1141	mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
1142
1143	/* Mark the last entry as wrapping the ring. */
1144	rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
1145
1146	memset(mdp->tx_ring, 0, tx_ringsize);
1147
1148	/* build Tx ring buffer */
1149	for (i = 0; i < mdp->num_tx_ring; i++) {
1150		mdp->tx_skbuff[i] = NULL;
1151		txdesc = &mdp->tx_ring[i];
1152		txdesc->status = cpu_to_edmac(mdp, TD_TFP);
1153		txdesc->buffer_length = 0;
1154		if (i == 0) {
1155			/* Tx descriptor address set */
1156			sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
1157			if (sh_eth_is_gether(mdp) ||
1158			    sh_eth_is_rz_fast_ether(mdp))
1159				sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
1160		}
1161	}
1162
1163	txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
1164}
1165
1166/* Get skb and descriptor buffer */
1167static int sh_eth_ring_init(struct net_device *ndev)
1168{
1169	struct sh_eth_private *mdp = netdev_priv(ndev);
1170	int rx_ringsize, tx_ringsize, ret = 0;
1171
1172	/* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
1173	 * card needs room to do 8 byte alignment, +2 so we can reserve
1174	 * the first 2 bytes, and +16 gets room for the status word from the
1175	 * card.
1176	 */
1177	mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
1178			  (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
1179	if (mdp->cd->rpadir)
1180		mdp->rx_buf_sz += NET_IP_ALIGN;
1181
1182	/* Allocate RX and TX skb rings */
1183	mdp->rx_skbuff = kmalloc_array(mdp->num_rx_ring,
1184				       sizeof(*mdp->rx_skbuff), GFP_KERNEL);
1185	if (!mdp->rx_skbuff) {
1186		ret = -ENOMEM;
1187		return ret;
1188	}
1189
1190	mdp->tx_skbuff = kmalloc_array(mdp->num_tx_ring,
1191				       sizeof(*mdp->tx_skbuff), GFP_KERNEL);
1192	if (!mdp->tx_skbuff) {
1193		ret = -ENOMEM;
1194		goto skb_ring_free;
1195	}
1196
1197	/* Allocate all Rx descriptors. */
1198	rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1199	mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
1200					  GFP_KERNEL);
1201	if (!mdp->rx_ring) {
1202		ret = -ENOMEM;
1203		goto desc_ring_free;
1204	}
1205
1206	mdp->dirty_rx = 0;
1207
1208	/* Allocate all Tx descriptors. */
1209	tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1210	mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
1211					  GFP_KERNEL);
1212	if (!mdp->tx_ring) {
1213		ret = -ENOMEM;
1214		goto desc_ring_free;
1215	}
1216	return ret;
1217
1218desc_ring_free:
1219	/* free DMA buffer */
1220	dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma);
1221
1222skb_ring_free:
1223	/* Free Rx and Tx skb ring buffer */
1224	sh_eth_ring_free(ndev);
1225	mdp->tx_ring = NULL;
1226	mdp->rx_ring = NULL;
1227
1228	return ret;
1229}
1230
1231static void sh_eth_free_dma_buffer(struct sh_eth_private *mdp)
1232{
1233	int ringsize;
1234
1235	if (mdp->rx_ring) {
1236		ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1237		dma_free_coherent(NULL, ringsize, mdp->rx_ring,
1238				  mdp->rx_desc_dma);
1239		mdp->rx_ring = NULL;
1240	}
1241
1242	if (mdp->tx_ring) {
1243		ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1244		dma_free_coherent(NULL, ringsize, mdp->tx_ring,
1245				  mdp->tx_desc_dma);
1246		mdp->tx_ring = NULL;
1247	}
1248}
1249
1250static int sh_eth_dev_init(struct net_device *ndev, bool start)
1251{
1252	int ret = 0;
1253	struct sh_eth_private *mdp = netdev_priv(ndev);
1254	u32 val;
1255
1256	/* Soft Reset */
1257	ret = sh_eth_reset(ndev);
1258	if (ret)
1259		return ret;
1260
1261	if (mdp->cd->rmiimode)
1262		sh_eth_write(ndev, 0x1, RMIIMODE);
1263
1264	/* Descriptor format */
1265	sh_eth_ring_format(ndev);
1266	if (mdp->cd->rpadir)
1267		sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
1268
1269	/* all sh_eth int mask */
1270	sh_eth_write(ndev, 0, EESIPR);
1271
1272#if defined(__LITTLE_ENDIAN)
1273	if (mdp->cd->hw_swap)
1274		sh_eth_write(ndev, EDMR_EL, EDMR);
1275	else
1276#endif
1277		sh_eth_write(ndev, 0, EDMR);
1278
1279	/* FIFO size set */
1280	sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
1281	sh_eth_write(ndev, 0, TFTR);
1282
1283	/* Frame recv control (enable multiple-packets per rx irq) */
1284	sh_eth_write(ndev, RMCR_RNC, RMCR);
1285
1286	sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER);
1287
1288	if (mdp->cd->bculr)
1289		sh_eth_write(ndev, 0x800, BCULR);	/* Burst sycle set */
1290
1291	sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
1292
1293	if (!mdp->cd->no_trimd)
1294		sh_eth_write(ndev, 0, TRIMD);
1295
1296	/* Recv frame limit set register */
1297	sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
1298		     RFLR);
1299
1300	sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
1301	if (start)
1302		sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1303
1304	/* PAUSE Prohibition */
1305	val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
1306		ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
1307
1308	sh_eth_write(ndev, val, ECMR);
1309
1310	if (mdp->cd->set_rate)
1311		mdp->cd->set_rate(ndev);
1312
1313	/* E-MAC Status Register clear */
1314	sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
1315
1316	/* E-MAC Interrupt Enable register */
1317	if (start)
1318		sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
1319
1320	/* Set MAC address */
1321	update_mac_address(ndev);
1322
1323	/* mask reset */
1324	if (mdp->cd->apr)
1325		sh_eth_write(ndev, APR_AP, APR);
1326	if (mdp->cd->mpr)
1327		sh_eth_write(ndev, MPR_MP, MPR);
1328	if (mdp->cd->tpauser)
1329		sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
1330
1331	if (start) {
1332		/* Setting the Rx mode will start the Rx process. */
1333		sh_eth_write(ndev, EDRRR_R, EDRRR);
1334
1335		netif_start_queue(ndev);
1336	}
1337
1338	return ret;
1339}
1340
1341/* free Tx skb function */
1342static int sh_eth_txfree(struct net_device *ndev)
1343{
1344	struct sh_eth_private *mdp = netdev_priv(ndev);
1345	struct sh_eth_txdesc *txdesc;
1346	int free_num = 0;
1347	int entry = 0;
1348
1349	for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
1350		entry = mdp->dirty_tx % mdp->num_tx_ring;
1351		txdesc = &mdp->tx_ring[entry];
1352		if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
1353			break;
1354		/* Free the original skb. */
1355		if (mdp->tx_skbuff[entry]) {
1356			dma_unmap_single(&ndev->dev, txdesc->addr,
1357					 txdesc->buffer_length, DMA_TO_DEVICE);
1358			dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
1359			mdp->tx_skbuff[entry] = NULL;
1360			free_num++;
1361		}
1362		txdesc->status = cpu_to_edmac(mdp, TD_TFP);
1363		if (entry >= mdp->num_tx_ring - 1)
1364			txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
1365
1366		ndev->stats.tx_packets++;
1367		ndev->stats.tx_bytes += txdesc->buffer_length;
1368	}
1369	return free_num;
1370}
1371
1372/* Packet receive function */
1373static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1374{
1375	struct sh_eth_private *mdp = netdev_priv(ndev);
1376	struct sh_eth_rxdesc *rxdesc;
1377
1378	int entry = mdp->cur_rx % mdp->num_rx_ring;
1379	int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
1380	struct sk_buff *skb;
1381	int exceeded = 0;
1382	u16 pkt_len = 0;
1383	u32 desc_status;
1384
1385	rxdesc = &mdp->rx_ring[entry];
1386	while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
1387		desc_status = edmac_to_cpu(mdp, rxdesc->status);
1388		pkt_len = rxdesc->frame_length;
1389
1390		if (--boguscnt < 0)
1391			break;
1392
1393		if (*quota <= 0) {
1394			exceeded = 1;
1395			break;
1396		}
1397		(*quota)--;
1398
1399		if (!(desc_status & RDFEND))
1400			ndev->stats.rx_length_errors++;
1401
1402		/* In case of almost all GETHER/ETHERs, the Receive Frame State
1403		 * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
1404		 * bit 0. However, in case of the R8A7740, R8A779x, and
1405		 * R7S72100 the RFS bits are from bit 25 to bit 16. So, the
1406		 * driver needs right shifting by 16.
1407		 */
1408		if (mdp->cd->shift_rd0)
1409			desc_status >>= 16;
1410
1411		if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
1412				   RD_RFS5 | RD_RFS6 | RD_RFS10)) {
1413			ndev->stats.rx_errors++;
1414			if (desc_status & RD_RFS1)
1415				ndev->stats.rx_crc_errors++;
1416			if (desc_status & RD_RFS2)
1417				ndev->stats.rx_frame_errors++;
1418			if (desc_status & RD_RFS3)
1419				ndev->stats.rx_length_errors++;
1420			if (desc_status & RD_RFS4)
1421				ndev->stats.rx_length_errors++;
1422			if (desc_status & RD_RFS6)
1423				ndev->stats.rx_missed_errors++;
1424			if (desc_status & RD_RFS10)
1425				ndev->stats.rx_over_errors++;
1426		} else {
1427			if (!mdp->cd->hw_swap)
1428				sh_eth_soft_swap(
1429					phys_to_virt(ALIGN(rxdesc->addr, 4)),
1430					pkt_len + 2);
1431			skb = mdp->rx_skbuff[entry];
1432			mdp->rx_skbuff[entry] = NULL;
1433			if (mdp->cd->rpadir)
1434				skb_reserve(skb, NET_IP_ALIGN);
1435			dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr,
1436						mdp->rx_buf_sz,
1437						DMA_FROM_DEVICE);
1438			skb_put(skb, pkt_len);
1439			skb->protocol = eth_type_trans(skb, ndev);
1440			netif_receive_skb(skb);
1441			ndev->stats.rx_packets++;
1442			ndev->stats.rx_bytes += pkt_len;
1443		}
1444		rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
1445		entry = (++mdp->cur_rx) % mdp->num_rx_ring;
1446		rxdesc = &mdp->rx_ring[entry];
1447	}
1448
1449	/* Refill the Rx ring buffers. */
1450	for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
1451		entry = mdp->dirty_rx % mdp->num_rx_ring;
1452		rxdesc = &mdp->rx_ring[entry];
1453		/* The size of the buffer is 16 byte boundary. */
1454		rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1455
1456		if (mdp->rx_skbuff[entry] == NULL) {
1457			skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
1458			mdp->rx_skbuff[entry] = skb;
1459			if (skb == NULL)
1460				break;	/* Better luck next round. */
1461			dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
1462				       DMA_FROM_DEVICE);
1463			sh_eth_set_receive_align(skb);
1464
1465			skb_checksum_none_assert(skb);
1466			rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
1467		}
1468		if (entry >= mdp->num_rx_ring - 1)
1469			rxdesc->status |=
1470				cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
1471		else
1472			rxdesc->status |=
1473				cpu_to_edmac(mdp, RD_RACT | RD_RFP);
1474	}
1475
1476	/* Restart Rx engine if stopped. */
1477	/* If we don't need to check status, don't. -KDU */
1478	if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
1479		/* fix the values for the next receiving if RDE is set */
1480		if (intr_status & EESR_RDE) {
1481			u32 count = (sh_eth_read(ndev, RDFAR) -
1482				     sh_eth_read(ndev, RDLAR)) >> 4;
1483
1484			mdp->cur_rx = count;
1485			mdp->dirty_rx = count;
1486		}
1487		sh_eth_write(ndev, EDRRR_R, EDRRR);
1488	}
1489
1490	return exceeded;
1491}
1492
1493static void sh_eth_rcv_snd_disable(struct net_device *ndev)
1494{
1495	/* disable tx and rx */
1496	sh_eth_write(ndev, sh_eth_read(ndev, ECMR) &
1497		~(ECMR_RE | ECMR_TE), ECMR);
1498}
1499
1500static void sh_eth_rcv_snd_enable(struct net_device *ndev)
1501{
1502	/* enable tx and rx */
1503	sh_eth_write(ndev, sh_eth_read(ndev, ECMR) |
1504		(ECMR_RE | ECMR_TE), ECMR);
1505}
1506
1507/* error control function */
1508static void sh_eth_error(struct net_device *ndev, int intr_status)
1509{
1510	struct sh_eth_private *mdp = netdev_priv(ndev);
1511	u32 felic_stat;
1512	u32 link_stat;
1513	u32 mask;
1514
1515	if (intr_status & EESR_ECI) {
1516		felic_stat = sh_eth_read(ndev, ECSR);
1517		sh_eth_write(ndev, felic_stat, ECSR);	/* clear int */
1518		if (felic_stat & ECSR_ICD)
1519			ndev->stats.tx_carrier_errors++;
1520		if (felic_stat & ECSR_LCHNG) {
1521			/* Link Changed */
1522			if (mdp->cd->no_psr || mdp->no_ether_link) {
1523				goto ignore_link;
1524			} else {
1525				link_stat = (sh_eth_read(ndev, PSR));
1526				if (mdp->ether_link_active_low)
1527					link_stat = ~link_stat;
1528			}
1529			if (!(link_stat & PHY_ST_LINK)) {
1530				sh_eth_rcv_snd_disable(ndev);
1531			} else {
1532				/* Link Up */
1533				sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
1534						   ~DMAC_M_ECI, EESIPR);
1535				/* clear int */
1536				sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
1537					     ECSR);
1538				sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
1539						   DMAC_M_ECI, EESIPR);
1540				/* enable tx and rx */
1541				sh_eth_rcv_snd_enable(ndev);
1542			}
1543		}
1544	}
1545
1546ignore_link:
1547	if (intr_status & EESR_TWB) {
1548		/* Unused write back interrupt */
1549		if (intr_status & EESR_TABT) {	/* Transmit Abort int */
1550			ndev->stats.tx_aborted_errors++;
1551			netif_err(mdp, tx_err, ndev, "Transmit Abort\n");
1552		}
1553	}
1554
1555	if (intr_status & EESR_RABT) {
1556		/* Receive Abort int */
1557		if (intr_status & EESR_RFRMER) {
1558			/* Receive Frame Overflow int */
1559			ndev->stats.rx_frame_errors++;
1560			netif_err(mdp, rx_err, ndev, "Receive Abort\n");
1561		}
1562	}
1563
1564	if (intr_status & EESR_TDE) {
1565		/* Transmit Descriptor Empty int */
1566		ndev->stats.tx_fifo_errors++;
1567		netif_err(mdp, tx_err, ndev, "Transmit Descriptor Empty\n");
1568	}
1569
1570	if (intr_status & EESR_TFE) {
1571		/* FIFO under flow */
1572		ndev->stats.tx_fifo_errors++;
1573		netif_err(mdp, tx_err, ndev, "Transmit FIFO Under flow\n");
1574	}
1575
1576	if (intr_status & EESR_RDE) {
1577		/* Receive Descriptor Empty int */
1578		ndev->stats.rx_over_errors++;
1579		netif_err(mdp, rx_err, ndev, "Receive Descriptor Empty\n");
1580	}
1581
1582	if (intr_status & EESR_RFE) {
1583		/* Receive FIFO Overflow int */
1584		ndev->stats.rx_fifo_errors++;
1585		netif_err(mdp, rx_err, ndev, "Receive FIFO Overflow\n");
1586	}
1587
1588	if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
1589		/* Address Error */
1590		ndev->stats.tx_fifo_errors++;
1591		netif_err(mdp, tx_err, ndev, "Address Error\n");
1592	}
1593
1594	mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
1595	if (mdp->cd->no_ade)
1596		mask &= ~EESR_ADE;
1597	if (intr_status & mask) {
1598		/* Tx error */
1599		u32 edtrr = sh_eth_read(ndev, EDTRR);
1600
1601		/* dmesg */
1602		netdev_err(ndev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
1603			   intr_status, mdp->cur_tx, mdp->dirty_tx,
1604			   (u32)ndev->state, edtrr);
1605		/* dirty buffer free */
1606		sh_eth_txfree(ndev);
1607
1608		/* SH7712 BUG */
1609		if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
1610			/* tx dma start */
1611			sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
1612		}
1613		/* wakeup */
1614		netif_wake_queue(ndev);
1615	}
1616}
1617
1618static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1619{
1620	struct net_device *ndev = netdev;
1621	struct sh_eth_private *mdp = netdev_priv(ndev);
1622	struct sh_eth_cpu_data *cd = mdp->cd;
1623	irqreturn_t ret = IRQ_NONE;
1624	unsigned long intr_status, intr_enable;
1625
1626	spin_lock(&mdp->lock);
1627
1628	/* Get interrupt status */
1629	intr_status = sh_eth_read(ndev, EESR);
1630	/* Mask it with the interrupt mask, forcing ECI interrupt to be always
1631	 * enabled since it's the one that  comes thru regardless of the mask,
1632	 * and we need to fully handle it in sh_eth_error() in order to quench
1633	 * it as it doesn't get cleared by just writing 1 to the ECI bit...
1634	 */
1635	intr_enable = sh_eth_read(ndev, EESIPR);
1636	intr_status &= intr_enable | DMAC_M_ECI;
1637	if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check))
1638		ret = IRQ_HANDLED;
1639	else
1640		goto other_irq;
1641
1642	if (intr_status & EESR_RX_CHECK) {
1643		if (napi_schedule_prep(&mdp->napi)) {
1644			/* Mask Rx interrupts */
1645			sh_eth_write(ndev, intr_enable & ~EESR_RX_CHECK,
1646				     EESIPR);
1647			__napi_schedule(&mdp->napi);
1648		} else {
1649			netdev_warn(ndev,
1650				    "ignoring interrupt, status 0x%08lx, mask 0x%08lx.\n",
1651				    intr_status, intr_enable);
1652		}
1653	}
1654
1655	/* Tx Check */
1656	if (intr_status & cd->tx_check) {
1657		/* Clear Tx interrupts */
1658		sh_eth_write(ndev, intr_status & cd->tx_check, EESR);
1659
1660		sh_eth_txfree(ndev);
1661		netif_wake_queue(ndev);
1662	}
1663
1664	if (intr_status & cd->eesr_err_check) {
1665		/* Clear error interrupts */
1666		sh_eth_write(ndev, intr_status & cd->eesr_err_check, EESR);
1667
1668		sh_eth_error(ndev, intr_status);
1669	}
1670
1671other_irq:
1672	spin_unlock(&mdp->lock);
1673
1674	return ret;
1675}
1676
1677static int sh_eth_poll(struct napi_struct *napi, int budget)
1678{
1679	struct sh_eth_private *mdp = container_of(napi, struct sh_eth_private,
1680						  napi);
1681	struct net_device *ndev = napi->dev;
1682	int quota = budget;
1683	unsigned long intr_status;
1684
1685	for (;;) {
1686		intr_status = sh_eth_read(ndev, EESR);
1687		if (!(intr_status & EESR_RX_CHECK))
1688			break;
1689		/* Clear Rx interrupts */
1690		sh_eth_write(ndev, intr_status & EESR_RX_CHECK, EESR);
1691
1692		if (sh_eth_rx(ndev, intr_status, &quota))
1693			goto out;
1694	}
1695
1696	napi_complete(napi);
1697
1698	/* Reenable Rx interrupts */
1699	sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1700out:
1701	return budget - quota;
1702}
1703
1704/* PHY state control function */
1705static void sh_eth_adjust_link(struct net_device *ndev)
1706{
1707	struct sh_eth_private *mdp = netdev_priv(ndev);
1708	struct phy_device *phydev = mdp->phydev;
1709	int new_state = 0;
1710
1711	if (phydev->link) {
1712		if (phydev->duplex != mdp->duplex) {
1713			new_state = 1;
1714			mdp->duplex = phydev->duplex;
1715			if (mdp->cd->set_duplex)
1716				mdp->cd->set_duplex(ndev);
1717		}
1718
1719		if (phydev->speed != mdp->speed) {
1720			new_state = 1;
1721			mdp->speed = phydev->speed;
1722			if (mdp->cd->set_rate)
1723				mdp->cd->set_rate(ndev);
1724		}
1725		if (!mdp->link) {
1726			sh_eth_write(ndev,
1727				     sh_eth_read(ndev, ECMR) & ~ECMR_TXF,
1728				     ECMR);
1729			new_state = 1;
1730			mdp->link = phydev->link;
1731			if (mdp->cd->no_psr || mdp->no_ether_link)
1732				sh_eth_rcv_snd_enable(ndev);
1733		}
1734	} else if (mdp->link) {
1735		new_state = 1;
1736		mdp->link = 0;
1737		mdp->speed = 0;
1738		mdp->duplex = -1;
1739		if (mdp->cd->no_psr || mdp->no_ether_link)
1740			sh_eth_rcv_snd_disable(ndev);
1741	}
1742
1743	if (new_state && netif_msg_link(mdp))
1744		phy_print_status(phydev);
1745}
1746
1747/* PHY init function */
1748static int sh_eth_phy_init(struct net_device *ndev)
1749{
1750	struct device_node *np = ndev->dev.parent->of_node;
1751	struct sh_eth_private *mdp = netdev_priv(ndev);
1752	struct phy_device *phydev = NULL;
1753
1754	mdp->link = 0;
1755	mdp->speed = 0;
1756	mdp->duplex = -1;
1757
1758	/* Try connect to PHY */
1759	if (np) {
1760		struct device_node *pn;
1761
1762		pn = of_parse_phandle(np, "phy-handle", 0);
1763		phydev = of_phy_connect(ndev, pn,
1764					sh_eth_adjust_link, 0,
1765					mdp->phy_interface);
1766
1767		if (!phydev)
1768			phydev = ERR_PTR(-ENOENT);
1769	} else {
1770		char phy_id[MII_BUS_ID_SIZE + 3];
1771
1772		snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
1773			 mdp->mii_bus->id, mdp->phy_id);
1774
1775		phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
1776				     mdp->phy_interface);
1777	}
1778
1779	if (IS_ERR(phydev)) {
1780		netdev_err(ndev, "failed to connect PHY\n");
1781		return PTR_ERR(phydev);
1782	}
1783
1784	netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n",
1785		    phydev->addr, phydev->irq, phydev->drv->name);
1786
1787	mdp->phydev = phydev;
1788
1789	return 0;
1790}
1791
1792/* PHY control start function */
1793static int sh_eth_phy_start(struct net_device *ndev)
1794{
1795	struct sh_eth_private *mdp = netdev_priv(ndev);
1796	int ret;
1797
1798	ret = sh_eth_phy_init(ndev);
1799	if (ret)
1800		return ret;
1801
1802	phy_start(mdp->phydev);
1803
1804	return 0;
1805}
1806
1807static int sh_eth_get_settings(struct net_device *ndev,
1808			       struct ethtool_cmd *ecmd)
1809{
1810	struct sh_eth_private *mdp = netdev_priv(ndev);
1811	unsigned long flags;
1812	int ret;
1813
1814	spin_lock_irqsave(&mdp->lock, flags);
1815	ret = phy_ethtool_gset(mdp->phydev, ecmd);
1816	spin_unlock_irqrestore(&mdp->lock, flags);
1817
1818	return ret;
1819}
1820
1821static int sh_eth_set_settings(struct net_device *ndev,
1822			       struct ethtool_cmd *ecmd)
1823{
1824	struct sh_eth_private *mdp = netdev_priv(ndev);
1825	unsigned long flags;
1826	int ret;
1827
1828	spin_lock_irqsave(&mdp->lock, flags);
1829
1830	/* disable tx and rx */
1831	sh_eth_rcv_snd_disable(ndev);
1832
1833	ret = phy_ethtool_sset(mdp->phydev, ecmd);
1834	if (ret)
1835		goto error_exit;
1836
1837	if (ecmd->duplex == DUPLEX_FULL)
1838		mdp->duplex = 1;
1839	else
1840		mdp->duplex = 0;
1841
1842	if (mdp->cd->set_duplex)
1843		mdp->cd->set_duplex(ndev);
1844
1845error_exit:
1846	mdelay(1);
1847
1848	/* enable tx and rx */
1849	sh_eth_rcv_snd_enable(ndev);
1850
1851	spin_unlock_irqrestore(&mdp->lock, flags);
1852
1853	return ret;
1854}
1855
1856static int sh_eth_nway_reset(struct net_device *ndev)
1857{
1858	struct sh_eth_private *mdp = netdev_priv(ndev);
1859	unsigned long flags;
1860	int ret;
1861
1862	spin_lock_irqsave(&mdp->lock, flags);
1863	ret = phy_start_aneg(mdp->phydev);
1864	spin_unlock_irqrestore(&mdp->lock, flags);
1865
1866	return ret;
1867}
1868
1869static u32 sh_eth_get_msglevel(struct net_device *ndev)
1870{
1871	struct sh_eth_private *mdp = netdev_priv(ndev);
1872	return mdp->msg_enable;
1873}
1874
1875static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
1876{
1877	struct sh_eth_private *mdp = netdev_priv(ndev);
1878	mdp->msg_enable = value;
1879}
1880
1881static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
1882	"rx_current", "tx_current",
1883	"rx_dirty", "tx_dirty",
1884};
1885#define SH_ETH_STATS_LEN  ARRAY_SIZE(sh_eth_gstrings_stats)
1886
1887static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
1888{
1889	switch (sset) {
1890	case ETH_SS_STATS:
1891		return SH_ETH_STATS_LEN;
1892	default:
1893		return -EOPNOTSUPP;
1894	}
1895}
1896
1897static void sh_eth_get_ethtool_stats(struct net_device *ndev,
1898				     struct ethtool_stats *stats, u64 *data)
1899{
1900	struct sh_eth_private *mdp = netdev_priv(ndev);
1901	int i = 0;
1902
1903	/* device-specific stats */
1904	data[i++] = mdp->cur_rx;
1905	data[i++] = mdp->cur_tx;
1906	data[i++] = mdp->dirty_rx;
1907	data[i++] = mdp->dirty_tx;
1908}
1909
1910static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1911{
1912	switch (stringset) {
1913	case ETH_SS_STATS:
1914		memcpy(data, *sh_eth_gstrings_stats,
1915		       sizeof(sh_eth_gstrings_stats));
1916		break;
1917	}
1918}
1919
1920static void sh_eth_get_ringparam(struct net_device *ndev,
1921				 struct ethtool_ringparam *ring)
1922{
1923	struct sh_eth_private *mdp = netdev_priv(ndev);
1924
1925	ring->rx_max_pending = RX_RING_MAX;
1926	ring->tx_max_pending = TX_RING_MAX;
1927	ring->rx_pending = mdp->num_rx_ring;
1928	ring->tx_pending = mdp->num_tx_ring;
1929}
1930
1931static int sh_eth_set_ringparam(struct net_device *ndev,
1932				struct ethtool_ringparam *ring)
1933{
1934	struct sh_eth_private *mdp = netdev_priv(ndev);
1935	int ret;
1936
1937	if (ring->tx_pending > TX_RING_MAX ||
1938	    ring->rx_pending > RX_RING_MAX ||
1939	    ring->tx_pending < TX_RING_MIN ||
1940	    ring->rx_pending < RX_RING_MIN)
1941		return -EINVAL;
1942	if (ring->rx_mini_pending || ring->rx_jumbo_pending)
1943		return -EINVAL;
1944
1945	if (netif_running(ndev)) {
1946		netif_tx_disable(ndev);
1947		/* Disable interrupts by clearing the interrupt mask. */
1948		sh_eth_write(ndev, 0x0000, EESIPR);
1949		/* Stop the chip's Tx and Rx processes. */
1950		sh_eth_write(ndev, 0, EDTRR);
1951		sh_eth_write(ndev, 0, EDRRR);
1952		synchronize_irq(ndev->irq);
1953	}
1954
1955	/* Free all the skbuffs in the Rx queue. */
1956	sh_eth_ring_free(ndev);
1957	/* Free DMA buffer */
1958	sh_eth_free_dma_buffer(mdp);
1959
1960	/* Set new parameters */
1961	mdp->num_rx_ring = ring->rx_pending;
1962	mdp->num_tx_ring = ring->tx_pending;
1963
1964	ret = sh_eth_ring_init(ndev);
1965	if (ret < 0) {
1966		netdev_err(ndev, "%s: sh_eth_ring_init failed.\n", __func__);
1967		return ret;
1968	}
1969	ret = sh_eth_dev_init(ndev, false);
1970	if (ret < 0) {
1971		netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", __func__);
1972		return ret;
1973	}
1974
1975	if (netif_running(ndev)) {
1976		sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1977		/* Setting the Rx mode will start the Rx process. */
1978		sh_eth_write(ndev, EDRRR_R, EDRRR);
1979		netif_wake_queue(ndev);
1980	}
1981
1982	return 0;
1983}
1984
1985static const struct ethtool_ops sh_eth_ethtool_ops = {
1986	.get_settings	= sh_eth_get_settings,
1987	.set_settings	= sh_eth_set_settings,
1988	.nway_reset	= sh_eth_nway_reset,
1989	.get_msglevel	= sh_eth_get_msglevel,
1990	.set_msglevel	= sh_eth_set_msglevel,
1991	.get_link	= ethtool_op_get_link,
1992	.get_strings	= sh_eth_get_strings,
1993	.get_ethtool_stats  = sh_eth_get_ethtool_stats,
1994	.get_sset_count     = sh_eth_get_sset_count,
1995	.get_ringparam	= sh_eth_get_ringparam,
1996	.set_ringparam	= sh_eth_set_ringparam,
1997};
1998
1999/* network device open function */
2000static int sh_eth_open(struct net_device *ndev)
2001{
2002	int ret = 0;
2003	struct sh_eth_private *mdp = netdev_priv(ndev);
2004
2005	pm_runtime_get_sync(&mdp->pdev->dev);
2006
2007	napi_enable(&mdp->napi);
2008
2009	ret = request_irq(ndev->irq, sh_eth_interrupt,
2010			  mdp->cd->irq_flags, ndev->name, ndev);
2011	if (ret) {
2012		netdev_err(ndev, "Can not assign IRQ number\n");
2013		goto out_napi_off;
2014	}
2015
2016	/* Descriptor set */
2017	ret = sh_eth_ring_init(ndev);
2018	if (ret)
2019		goto out_free_irq;
2020
2021	/* device init */
2022	ret = sh_eth_dev_init(ndev, true);
2023	if (ret)
2024		goto out_free_irq;
2025
2026	/* PHY control start*/
2027	ret = sh_eth_phy_start(ndev);
2028	if (ret)
2029		goto out_free_irq;
2030
2031	return ret;
2032
2033out_free_irq:
2034	free_irq(ndev->irq, ndev);
2035out_napi_off:
2036	napi_disable(&mdp->napi);
2037	pm_runtime_put_sync(&mdp->pdev->dev);
2038	return ret;
2039}
2040
2041/* Timeout function */
2042static void sh_eth_tx_timeout(struct net_device *ndev)
2043{
2044	struct sh_eth_private *mdp = netdev_priv(ndev);
2045	struct sh_eth_rxdesc *rxdesc;
2046	int i;
2047
2048	netif_stop_queue(ndev);
2049
2050	netif_err(mdp, timer, ndev,
2051		  "transmit timed out, status %8.8x, resetting...\n",
2052		  (int)sh_eth_read(ndev, EESR));
2053
2054	/* tx_errors count up */
2055	ndev->stats.tx_errors++;
2056
2057	/* Free all the skbuffs in the Rx queue. */
2058	for (i = 0; i < mdp->num_rx_ring; i++) {
2059		rxdesc = &mdp->rx_ring[i];
2060		rxdesc->status = 0;
2061		rxdesc->addr = 0xBADF00D0;
2062		if (mdp->rx_skbuff[i])
2063			dev_kfree_skb(mdp->rx_skbuff[i]);
2064		mdp->rx_skbuff[i] = NULL;
2065	}
2066	for (i = 0; i < mdp->num_tx_ring; i++) {
2067		if (mdp->tx_skbuff[i])
2068			dev_kfree_skb(mdp->tx_skbuff[i]);
2069		mdp->tx_skbuff[i] = NULL;
2070	}
2071
2072	/* device init */
2073	sh_eth_dev_init(ndev, true);
2074}
2075
2076/* Packet transmit function */
2077static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2078{
2079	struct sh_eth_private *mdp = netdev_priv(ndev);
2080	struct sh_eth_txdesc *txdesc;
2081	u32 entry;
2082	unsigned long flags;
2083
2084	spin_lock_irqsave(&mdp->lock, flags);
2085	if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
2086		if (!sh_eth_txfree(ndev)) {
2087			netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
2088			netif_stop_queue(ndev);
2089			spin_unlock_irqrestore(&mdp->lock, flags);
2090			return NETDEV_TX_BUSY;
2091		}
2092	}
2093	spin_unlock_irqrestore(&mdp->lock, flags);
2094
2095	entry = mdp->cur_tx % mdp->num_tx_ring;
2096	mdp->tx_skbuff[entry] = skb;
2097	txdesc = &mdp->tx_ring[entry];
2098	/* soft swap. */
2099	if (!mdp->cd->hw_swap)
2100		sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)),
2101				 skb->len + 2);
2102	txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
2103				      DMA_TO_DEVICE);
2104	if (skb->len < ETH_ZLEN)
2105		txdesc->buffer_length = ETH_ZLEN;
2106	else
2107		txdesc->buffer_length = skb->len;
2108
2109	if (entry >= mdp->num_tx_ring - 1)
2110		txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
2111	else
2112		txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
2113
2114	mdp->cur_tx++;
2115
2116	if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp)))
2117		sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
2118
2119	return NETDEV_TX_OK;
2120}
2121
2122/* device close function */
2123static int sh_eth_close(struct net_device *ndev)
2124{
2125	struct sh_eth_private *mdp = netdev_priv(ndev);
2126
2127	netif_stop_queue(ndev);
2128
2129	/* Disable interrupts by clearing the interrupt mask. */
2130	sh_eth_write(ndev, 0x0000, EESIPR);
2131
2132	/* Stop the chip's Tx and Rx processes. */
2133	sh_eth_write(ndev, 0, EDTRR);
2134	sh_eth_write(ndev, 0, EDRRR);
2135
2136	/* PHY Disconnect */
2137	if (mdp->phydev) {
2138		phy_stop(mdp->phydev);
2139		phy_disconnect(mdp->phydev);
2140	}
2141
2142	free_irq(ndev->irq, ndev);
2143
2144	napi_disable(&mdp->napi);
2145
2146	/* Free all the skbuffs in the Rx queue. */
2147	sh_eth_ring_free(ndev);
2148
2149	/* free DMA buffer */
2150	sh_eth_free_dma_buffer(mdp);
2151
2152	pm_runtime_put_sync(&mdp->pdev->dev);
2153
2154	return 0;
2155}
2156
2157static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
2158{
2159	struct sh_eth_private *mdp = netdev_priv(ndev);
2160
2161	if (sh_eth_is_rz_fast_ether(mdp))
2162		return &ndev->stats;
2163
2164	pm_runtime_get_sync(&mdp->pdev->dev);
2165
2166	ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
2167	sh_eth_write(ndev, 0, TROCR);	/* (write clear) */
2168	ndev->stats.collisions += sh_eth_read(ndev, CDCR);
2169	sh_eth_write(ndev, 0, CDCR);	/* (write clear) */
2170	ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
2171	sh_eth_write(ndev, 0, LCCR);	/* (write clear) */
2172	if (sh_eth_is_gether(mdp)) {
2173		ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
2174		sh_eth_write(ndev, 0, CERCR);	/* (write clear) */
2175		ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
2176		sh_eth_write(ndev, 0, CEECR);	/* (write clear) */
2177	} else {
2178		ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
2179		sh_eth_write(ndev, 0, CNDCR);	/* (write clear) */
2180	}
2181	pm_runtime_put_sync(&mdp->pdev->dev);
2182
2183	return &ndev->stats;
2184}
2185
2186/* ioctl to device function */
2187static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2188{
2189	struct sh_eth_private *mdp = netdev_priv(ndev);
2190	struct phy_device *phydev = mdp->phydev;
2191
2192	if (!netif_running(ndev))
2193		return -EINVAL;
2194
2195	if (!phydev)
2196		return -ENODEV;
2197
2198	return phy_mii_ioctl(phydev, rq, cmd);
2199}
2200
2201/* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
2202static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp,
2203					    int entry)
2204{
2205	return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4);
2206}
2207
2208static u32 sh_eth_tsu_get_post_mask(int entry)
2209{
2210	return 0x0f << (28 - ((entry % 8) * 4));
2211}
2212
2213static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry)
2214{
2215	return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4));
2216}
2217
2218static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev,
2219					     int entry)
2220{
2221	struct sh_eth_private *mdp = netdev_priv(ndev);
2222	u32 tmp;
2223	void *reg_offset;
2224
2225	reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2226	tmp = ioread32(reg_offset);
2227	iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset);
2228}
2229
2230static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev,
2231					      int entry)
2232{
2233	struct sh_eth_private *mdp = netdev_priv(ndev);
2234	u32 post_mask, ref_mask, tmp;
2235	void *reg_offset;
2236
2237	reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2238	post_mask = sh_eth_tsu_get_post_mask(entry);
2239	ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask;
2240
2241	tmp = ioread32(reg_offset);
2242	iowrite32(tmp & ~post_mask, reg_offset);
2243
2244	/* If other port enables, the function returns "true" */
2245	return tmp & ref_mask;
2246}
2247
2248static int sh_eth_tsu_busy(struct net_device *ndev)
2249{
2250	int timeout = SH_ETH_TSU_TIMEOUT_MS * 100;
2251	struct sh_eth_private *mdp = netdev_priv(ndev);
2252
2253	while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) {
2254		udelay(10);
2255		timeout--;
2256		if (timeout <= 0) {
2257			netdev_err(ndev, "%s: timeout\n", __func__);
2258			return -ETIMEDOUT;
2259		}
2260	}
2261
2262	return 0;
2263}
2264
2265static int sh_eth_tsu_write_entry(struct net_device *ndev, void *reg,
2266				  const u8 *addr)
2267{
2268	u32 val;
2269
2270	val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3];
2271	iowrite32(val, reg);
2272	if (sh_eth_tsu_busy(ndev) < 0)
2273		return -EBUSY;
2274
2275	val = addr[4] << 8 | addr[5];
2276	iowrite32(val, reg + 4);
2277	if (sh_eth_tsu_busy(ndev) < 0)
2278		return -EBUSY;
2279
2280	return 0;
2281}
2282
2283static void sh_eth_tsu_read_entry(void *reg, u8 *addr)
2284{
2285	u32 val;
2286
2287	val = ioread32(reg);
2288	addr[0] = (val >> 24) & 0xff;
2289	addr[1] = (val >> 16) & 0xff;
2290	addr[2] = (val >> 8) & 0xff;
2291	addr[3] = val & 0xff;
2292	val = ioread32(reg + 4);
2293	addr[4] = (val >> 8) & 0xff;
2294	addr[5] = val & 0xff;
2295}
2296
2297
2298static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr)
2299{
2300	struct sh_eth_private *mdp = netdev_priv(ndev);
2301	void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2302	int i;
2303	u8 c_addr[ETH_ALEN];
2304
2305	for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2306		sh_eth_tsu_read_entry(reg_offset, c_addr);
2307		if (ether_addr_equal(addr, c_addr))
2308			return i;
2309	}
2310
2311	return -ENOENT;
2312}
2313
2314static int sh_eth_tsu_find_empty(struct net_device *ndev)
2315{
2316	u8 blank[ETH_ALEN];
2317	int entry;
2318
2319	memset(blank, 0, sizeof(blank));
2320	entry = sh_eth_tsu_find_entry(ndev, blank);
2321	return (entry < 0) ? -ENOMEM : entry;
2322}
2323
2324static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev,
2325					      int entry)
2326{
2327	struct sh_eth_private *mdp = netdev_priv(ndev);
2328	void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2329	int ret;
2330	u8 blank[ETH_ALEN];
2331
2332	sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) &
2333			 ~(1 << (31 - entry)), TSU_TEN);
2334
2335	memset(blank, 0, sizeof(blank));
2336	ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank);
2337	if (ret < 0)
2338		return ret;
2339	return 0;
2340}
2341
2342static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr)
2343{
2344	struct sh_eth_private *mdp = netdev_priv(ndev);
2345	void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2346	int i, ret;
2347
2348	if (!mdp->cd->tsu)
2349		return 0;
2350
2351	i = sh_eth_tsu_find_entry(ndev, addr);
2352	if (i < 0) {
2353		/* No entry found, create one */
2354		i = sh_eth_tsu_find_empty(ndev);
2355		if (i < 0)
2356			return -ENOMEM;
2357		ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr);
2358		if (ret < 0)
2359			return ret;
2360
2361		/* Enable the entry */
2362		sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) |
2363				 (1 << (31 - i)), TSU_TEN);
2364	}
2365
2366	/* Entry found or created, enable POST */
2367	sh_eth_tsu_enable_cam_entry_post(ndev, i);
2368
2369	return 0;
2370}
2371
2372static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr)
2373{
2374	struct sh_eth_private *mdp = netdev_priv(ndev);
2375	int i, ret;
2376
2377	if (!mdp->cd->tsu)
2378		return 0;
2379
2380	i = sh_eth_tsu_find_entry(ndev, addr);
2381	if (i) {
2382		/* Entry found */
2383		if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2384			goto done;
2385
2386		/* Disable the entry if both ports was disabled */
2387		ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2388		if (ret < 0)
2389			return ret;
2390	}
2391done:
2392	return 0;
2393}
2394
2395static int sh_eth_tsu_purge_all(struct net_device *ndev)
2396{
2397	struct sh_eth_private *mdp = netdev_priv(ndev);
2398	int i, ret;
2399
2400	if (unlikely(!mdp->cd->tsu))
2401		return 0;
2402
2403	for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) {
2404		if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2405			continue;
2406
2407		/* Disable the entry if both ports was disabled */
2408		ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2409		if (ret < 0)
2410			return ret;
2411	}
2412
2413	return 0;
2414}
2415
2416static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
2417{
2418	struct sh_eth_private *mdp = netdev_priv(ndev);
2419	u8 addr[ETH_ALEN];
2420	void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2421	int i;
2422
2423	if (unlikely(!mdp->cd->tsu))
2424		return;
2425
2426	for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2427		sh_eth_tsu_read_entry(reg_offset, addr);
2428		if (is_multicast_ether_addr(addr))
2429			sh_eth_tsu_del_entry(ndev, addr);
2430	}
2431}
2432
2433/* Multicast reception directions set */
2434static void sh_eth_set_multicast_list(struct net_device *ndev)
2435{
2436	struct sh_eth_private *mdp = netdev_priv(ndev);
2437	u32 ecmr_bits;
2438	int mcast_all = 0;
2439	unsigned long flags;
2440
2441	spin_lock_irqsave(&mdp->lock, flags);
2442	/* Initial condition is MCT = 1, PRM = 0.
2443	 * Depending on ndev->flags, set PRM or clear MCT
2444	 */
2445	ecmr_bits = (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | ECMR_MCT;
2446
2447	if (!(ndev->flags & IFF_MULTICAST)) {
2448		sh_eth_tsu_purge_mcast(ndev);
2449		mcast_all = 1;
2450	}
2451	if (ndev->flags & IFF_ALLMULTI) {
2452		sh_eth_tsu_purge_mcast(ndev);
2453		ecmr_bits &= ~ECMR_MCT;
2454		mcast_all = 1;
2455	}
2456
2457	if (ndev->flags & IFF_PROMISC) {
2458		sh_eth_tsu_purge_all(ndev);
2459		ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM;
2460	} else if (mdp->cd->tsu) {
2461		struct netdev_hw_addr *ha;
2462		netdev_for_each_mc_addr(ha, ndev) {
2463			if (mcast_all && is_multicast_ether_addr(ha->addr))
2464				continue;
2465
2466			if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) {
2467				if (!mcast_all) {
2468					sh_eth_tsu_purge_mcast(ndev);
2469					ecmr_bits &= ~ECMR_MCT;
2470					mcast_all = 1;
2471				}
2472			}
2473		}
2474	} else {
2475		/* Normal, unicast/broadcast-only mode. */
2476		ecmr_bits = (ecmr_bits & ~ECMR_PRM) | ECMR_MCT;
2477	}
2478
2479	/* update the ethernet mode */
2480	sh_eth_write(ndev, ecmr_bits, ECMR);
2481
2482	spin_unlock_irqrestore(&mdp->lock, flags);
2483}
2484
2485static int sh_eth_get_vtag_index(struct sh_eth_private *mdp)
2486{
2487	if (!mdp->port)
2488		return TSU_VTAG0;
2489	else
2490		return TSU_VTAG1;
2491}
2492
2493static int sh_eth_vlan_rx_add_vid(struct net_device *ndev,
2494				  __be16 proto, u16 vid)
2495{
2496	struct sh_eth_private *mdp = netdev_priv(ndev);
2497	int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2498
2499	if (unlikely(!mdp->cd->tsu))
2500		return -EPERM;
2501
2502	/* No filtering if vid = 0 */
2503	if (!vid)
2504		return 0;
2505
2506	mdp->vlan_num_ids++;
2507
2508	/* The controller has one VLAN tag HW filter. So, if the filter is
2509	 * already enabled, the driver disables it and the filte
2510	 */
2511	if (mdp->vlan_num_ids > 1) {
2512		/* disable VLAN filter */
2513		sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2514		return 0;
2515	}
2516
2517	sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK),
2518			 vtag_reg_index);
2519
2520	return 0;
2521}
2522
2523static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev,
2524				   __be16 proto, u16 vid)
2525{
2526	struct sh_eth_private *mdp = netdev_priv(ndev);
2527	int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2528
2529	if (unlikely(!mdp->cd->tsu))
2530		return -EPERM;
2531
2532	/* No filtering if vid = 0 */
2533	if (!vid)
2534		return 0;
2535
2536	mdp->vlan_num_ids--;
2537	sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2538
2539	return 0;
2540}
2541
2542/* SuperH's TSU register init function */
2543static void sh_eth_tsu_init(struct sh_eth_private *mdp)
2544{
2545	if (sh_eth_is_rz_fast_ether(mdp)) {
2546		sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
2547		return;
2548	}
2549
2550	sh_eth_tsu_write(mdp, 0, TSU_FWEN0);	/* Disable forward(0->1) */
2551	sh_eth_tsu_write(mdp, 0, TSU_FWEN1);	/* Disable forward(1->0) */
2552	sh_eth_tsu_write(mdp, 0, TSU_FCM);	/* forward fifo 3k-3k */
2553	sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
2554	sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
2555	sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
2556	sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
2557	sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
2558	sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
2559	sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
2560	if (sh_eth_is_gether(mdp)) {
2561		sh_eth_tsu_write(mdp, 0, TSU_QTAG0);	/* Disable QTAG(0->1) */
2562		sh_eth_tsu_write(mdp, 0, TSU_QTAG1);	/* Disable QTAG(1->0) */
2563	} else {
2564		sh_eth_tsu_write(mdp, 0, TSU_QTAGM0);	/* Disable QTAG(0->1) */
2565		sh_eth_tsu_write(mdp, 0, TSU_QTAGM1);	/* Disable QTAG(1->0) */
2566	}
2567	sh_eth_tsu_write(mdp, 0, TSU_FWSR);	/* all interrupt status clear */
2568	sh_eth_tsu_write(mdp, 0, TSU_FWINMK);	/* Disable all interrupt */
2569	sh_eth_tsu_write(mdp, 0, TSU_TEN);	/* Disable all CAM entry */
2570	sh_eth_tsu_write(mdp, 0, TSU_POST1);	/* Disable CAM entry [ 0- 7] */
2571	sh_eth_tsu_write(mdp, 0, TSU_POST2);	/* Disable CAM entry [ 8-15] */
2572	sh_eth_tsu_write(mdp, 0, TSU_POST3);	/* Disable CAM entry [16-23] */
2573	sh_eth_tsu_write(mdp, 0, TSU_POST4);	/* Disable CAM entry [24-31] */
2574}
2575
2576/* MDIO bus release function */
2577static int sh_mdio_release(struct sh_eth_private *mdp)
2578{
2579	/* unregister mdio bus */
2580	mdiobus_unregister(mdp->mii_bus);
2581
2582	/* free bitbang info */
2583	free_mdio_bitbang(mdp->mii_bus);
2584
2585	return 0;
2586}
2587
2588/* MDIO bus init function */
2589static int sh_mdio_init(struct sh_eth_private *mdp,
2590			struct sh_eth_plat_data *pd)
2591{
2592	int ret, i;
2593	struct bb_info *bitbang;
2594	struct platform_device *pdev = mdp->pdev;
2595	struct device *dev = &mdp->pdev->dev;
2596
2597	/* create bit control struct for PHY */
2598	bitbang = devm_kzalloc(dev, sizeof(struct bb_info), GFP_KERNEL);
2599	if (!bitbang)
2600		return -ENOMEM;
2601
2602	/* bitbang init */
2603	bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
2604	bitbang->set_gate = pd->set_mdio_gate;
2605	bitbang->mdi_msk = PIR_MDI;
2606	bitbang->mdo_msk = PIR_MDO;
2607	bitbang->mmd_msk = PIR_MMD;
2608	bitbang->mdc_msk = PIR_MDC;
2609	bitbang->ctrl.ops = &bb_ops;
2610
2611	/* MII controller setting */
2612	mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
2613	if (!mdp->mii_bus)
2614		return -ENOMEM;
2615
2616	/* Hook up MII support for ethtool */
2617	mdp->mii_bus->name = "sh_mii";
2618	mdp->mii_bus->parent = dev;
2619	snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2620		 pdev->name, pdev->id);
2621
2622	/* PHY IRQ */
2623	mdp->mii_bus->irq = devm_kzalloc(dev, sizeof(int) * PHY_MAX_ADDR,
2624					 GFP_KERNEL);
2625	if (!mdp->mii_bus->irq) {
2626		ret = -ENOMEM;
2627		goto out_free_bus;
2628	}
2629
2630	/* register MDIO bus */
2631	if (dev->of_node) {
2632		ret = of_mdiobus_register(mdp->mii_bus, dev->of_node);
2633	} else {
2634		for (i = 0; i < PHY_MAX_ADDR; i++)
2635			mdp->mii_bus->irq[i] = PHY_POLL;
2636		if (pd->phy_irq > 0)
2637			mdp->mii_bus->irq[pd->phy] = pd->phy_irq;
2638
2639		ret = mdiobus_register(mdp->mii_bus);
2640	}
2641
2642	if (ret)
2643		goto out_free_bus;
2644
2645	return 0;
2646
2647out_free_bus:
2648	free_mdio_bitbang(mdp->mii_bus);
2649	return ret;
2650}
2651
2652static const u16 *sh_eth_get_register_offset(int register_type)
2653{
2654	const u16 *reg_offset = NULL;
2655
2656	switch (register_type) {
2657	case SH_ETH_REG_GIGABIT:
2658		reg_offset = sh_eth_offset_gigabit;
2659		break;
2660	case SH_ETH_REG_FAST_RZ:
2661		reg_offset = sh_eth_offset_fast_rz;
2662		break;
2663	case SH_ETH_REG_FAST_RCAR:
2664		reg_offset = sh_eth_offset_fast_rcar;
2665		break;
2666	case SH_ETH_REG_FAST_SH4:
2667		reg_offset = sh_eth_offset_fast_sh4;
2668		break;
2669	case SH_ETH_REG_FAST_SH3_SH2:
2670		reg_offset = sh_eth_offset_fast_sh3_sh2;
2671		break;
2672	default:
2673		break;
2674	}
2675
2676	return reg_offset;
2677}
2678
2679static const struct net_device_ops sh_eth_netdev_ops = {
2680	.ndo_open		= sh_eth_open,
2681	.ndo_stop		= sh_eth_close,
2682	.ndo_start_xmit		= sh_eth_start_xmit,
2683	.ndo_get_stats		= sh_eth_get_stats,
2684	.ndo_tx_timeout		= sh_eth_tx_timeout,
2685	.ndo_do_ioctl		= sh_eth_do_ioctl,
2686	.ndo_validate_addr	= eth_validate_addr,
2687	.ndo_set_mac_address	= eth_mac_addr,
2688	.ndo_change_mtu		= eth_change_mtu,
2689};
2690
2691static const struct net_device_ops sh_eth_netdev_ops_tsu = {
2692	.ndo_open		= sh_eth_open,
2693	.ndo_stop		= sh_eth_close,
2694	.ndo_start_xmit		= sh_eth_start_xmit,
2695	.ndo_get_stats		= sh_eth_get_stats,
2696	.ndo_set_rx_mode	= sh_eth_set_multicast_list,
2697	.ndo_vlan_rx_add_vid	= sh_eth_vlan_rx_add_vid,
2698	.ndo_vlan_rx_kill_vid	= sh_eth_vlan_rx_kill_vid,
2699	.ndo_tx_timeout		= sh_eth_tx_timeout,
2700	.ndo_do_ioctl		= sh_eth_do_ioctl,
2701	.ndo_validate_addr	= eth_validate_addr,
2702	.ndo_set_mac_address	= eth_mac_addr,
2703	.ndo_change_mtu		= eth_change_mtu,
2704};
2705
2706#ifdef CONFIG_OF
2707static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
2708{
2709	struct device_node *np = dev->of_node;
2710	struct sh_eth_plat_data *pdata;
2711	const char *mac_addr;
2712
2713	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2714	if (!pdata)
2715		return NULL;
2716
2717	pdata->phy_interface = of_get_phy_mode(np);
2718
2719	mac_addr = of_get_mac_address(np);
2720	if (mac_addr)
2721		memcpy(pdata->mac_addr, mac_addr, ETH_ALEN);
2722
2723	pdata->no_ether_link =
2724		of_property_read_bool(np, "renesas,no-ether-link");
2725	pdata->ether_link_active_low =
2726		of_property_read_bool(np, "renesas,ether-link-active-low");
2727
2728	return pdata;
2729}
2730
2731static const struct of_device_id sh_eth_match_table[] = {
2732	{ .compatible = "renesas,gether-r8a7740", .data = &r8a7740_data },
2733	{ .compatible = "renesas,ether-r8a7778", .data = &r8a777x_data },
2734	{ .compatible = "renesas,ether-r8a7779", .data = &r8a777x_data },
2735	{ .compatible = "renesas,ether-r8a7790", .data = &r8a779x_data },
2736	{ .compatible = "renesas,ether-r8a7791", .data = &r8a779x_data },
2737	{ .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data },
2738	{ }
2739};
2740MODULE_DEVICE_TABLE(of, sh_eth_match_table);
2741#else
2742static inline struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
2743{
2744	return NULL;
2745}
2746#endif
2747
2748static int sh_eth_drv_probe(struct platform_device *pdev)
2749{
2750	int ret, devno = 0;
2751	struct resource *res;
2752	struct net_device *ndev = NULL;
2753	struct sh_eth_private *mdp = NULL;
2754	struct sh_eth_plat_data *pd = dev_get_platdata(&pdev->dev);
2755	const struct platform_device_id *id = platform_get_device_id(pdev);
2756
2757	/* get base addr */
2758	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2759	if (unlikely(res == NULL)) {
2760		dev_err(&pdev->dev, "invalid resource\n");
2761		return -EINVAL;
2762	}
2763
2764	ndev = alloc_etherdev(sizeof(struct sh_eth_private));
2765	if (!ndev)
2766		return -ENOMEM;
2767
2768	pm_runtime_enable(&pdev->dev);
2769	pm_runtime_get_sync(&pdev->dev);
2770
2771	/* The sh Ether-specific entries in the device structure. */
2772	ndev->base_addr = res->start;
2773	devno = pdev->id;
2774	if (devno < 0)
2775		devno = 0;
2776
2777	ndev->dma = -1;
2778	ret = platform_get_irq(pdev, 0);
2779	if (ret < 0) {
2780		ret = -ENODEV;
2781		goto out_release;
2782	}
2783	ndev->irq = ret;
2784
2785	SET_NETDEV_DEV(ndev, &pdev->dev);
2786
2787	mdp = netdev_priv(ndev);
2788	mdp->num_tx_ring = TX_RING_SIZE;
2789	mdp->num_rx_ring = RX_RING_SIZE;
2790	mdp->addr = devm_ioremap_resource(&pdev->dev, res);
2791	if (IS_ERR(mdp->addr)) {
2792		ret = PTR_ERR(mdp->addr);
2793		goto out_release;
2794	}
2795
2796	spin_lock_init(&mdp->lock);
2797	mdp->pdev = pdev;
2798
2799	if (pdev->dev.of_node)
2800		pd = sh_eth_parse_dt(&pdev->dev);
2801	if (!pd) {
2802		dev_err(&pdev->dev, "no platform data\n");
2803		ret = -EINVAL;
2804		goto out_release;
2805	}
2806
2807	/* get PHY ID */
2808	mdp->phy_id = pd->phy;
2809	mdp->phy_interface = pd->phy_interface;
2810	/* EDMAC endian */
2811	mdp->edmac_endian = pd->edmac_endian;
2812	mdp->no_ether_link = pd->no_ether_link;
2813	mdp->ether_link_active_low = pd->ether_link_active_low;
2814
2815	/* set cpu data */
2816	if (id) {
2817		mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
2818	} else	{
2819		const struct of_device_id *match;
2820
2821		match = of_match_device(of_match_ptr(sh_eth_match_table),
2822					&pdev->dev);
2823		mdp->cd = (struct sh_eth_cpu_data *)match->data;
2824	}
2825	mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
2826	if (!mdp->reg_offset) {
2827		dev_err(&pdev->dev, "Unknown register type (%d)\n",
2828			mdp->cd->register_type);
2829		ret = -EINVAL;
2830		goto out_release;
2831	}
2832	sh_eth_set_default_cpu_data(mdp->cd);
2833
2834	/* set function */
2835	if (mdp->cd->tsu)
2836		ndev->netdev_ops = &sh_eth_netdev_ops_tsu;
2837	else
2838		ndev->netdev_ops = &sh_eth_netdev_ops;
2839	SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops);
2840	ndev->watchdog_timeo = TX_TIMEOUT;
2841
2842	/* debug message level */
2843	mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
2844
2845	/* read and set MAC address */
2846	read_mac_address(ndev, pd->mac_addr);
2847	if (!is_valid_ether_addr(ndev->dev_addr)) {
2848		dev_warn(&pdev->dev,
2849			 "no valid MAC address supplied, using a random one.\n");
2850		eth_hw_addr_random(ndev);
2851	}
2852
2853	/* ioremap the TSU registers */
2854	if (mdp->cd->tsu) {
2855		struct resource *rtsu;
2856		rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2857		mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu);
2858		if (IS_ERR(mdp->tsu_addr)) {
2859			ret = PTR_ERR(mdp->tsu_addr);
2860			goto out_release;
2861		}
2862		mdp->port = devno % 2;
2863		ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
2864	}
2865
2866	/* initialize first or needed device */
2867	if (!devno || pd->needs_init) {
2868		if (mdp->cd->chip_reset)
2869			mdp->cd->chip_reset(ndev);
2870
2871		if (mdp->cd->tsu) {
2872			/* TSU init (Init only)*/
2873			sh_eth_tsu_init(mdp);
2874		}
2875	}
2876
2877	/* MDIO bus init */
2878	ret = sh_mdio_init(mdp, pd);
2879	if (ret) {
2880		dev_err(&ndev->dev, "failed to initialise MDIO\n");
2881		goto out_release;
2882	}
2883
2884	netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64);
2885
2886	/* network device register */
2887	ret = register_netdev(ndev);
2888	if (ret)
2889		goto out_napi_del;
2890
2891	/* print device information */
2892	netdev_info(ndev, "Base address at 0x%x, %pM, IRQ %d.\n",
2893		    (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
2894
2895	pm_runtime_put(&pdev->dev);
2896	platform_set_drvdata(pdev, ndev);
2897
2898	return ret;
2899
2900out_napi_del:
2901	netif_napi_del(&mdp->napi);
2902	sh_mdio_release(mdp);
2903
2904out_release:
2905	/* net_dev free */
2906	if (ndev)
2907		free_netdev(ndev);
2908
2909	pm_runtime_put(&pdev->dev);
2910	pm_runtime_disable(&pdev->dev);
2911	return ret;
2912}
2913
2914static int sh_eth_drv_remove(struct platform_device *pdev)
2915{
2916	struct net_device *ndev = platform_get_drvdata(pdev);
2917	struct sh_eth_private *mdp = netdev_priv(ndev);
2918
2919	unregister_netdev(ndev);
2920	netif_napi_del(&mdp->napi);
2921	sh_mdio_release(mdp);
2922	pm_runtime_disable(&pdev->dev);
2923	free_netdev(ndev);
2924
2925	return 0;
2926}
2927
2928#ifdef CONFIG_PM
2929static int sh_eth_runtime_nop(struct device *dev)
2930{
2931	/* Runtime PM callback shared between ->runtime_suspend()
2932	 * and ->runtime_resume(). Simply returns success.
2933	 *
2934	 * This driver re-initializes all registers after
2935	 * pm_runtime_get_sync() anyway so there is no need
2936	 * to save and restore registers here.
2937	 */
2938	return 0;
2939}
2940
2941static const struct dev_pm_ops sh_eth_dev_pm_ops = {
2942	.runtime_suspend = sh_eth_runtime_nop,
2943	.runtime_resume = sh_eth_runtime_nop,
2944};
2945#define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops)
2946#else
2947#define SH_ETH_PM_OPS NULL
2948#endif
2949
2950static struct platform_device_id sh_eth_id_table[] = {
2951	{ "sh7619-ether", (kernel_ulong_t)&sh7619_data },
2952	{ "sh771x-ether", (kernel_ulong_t)&sh771x_data },
2953	{ "sh7724-ether", (kernel_ulong_t)&sh7724_data },
2954	{ "sh7734-gether", (kernel_ulong_t)&sh7734_data },
2955	{ "sh7757-ether", (kernel_ulong_t)&sh7757_data },
2956	{ "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga },
2957	{ "sh7763-gether", (kernel_ulong_t)&sh7763_data },
2958	{ "r7s72100-ether", (kernel_ulong_t)&r7s72100_data },
2959	{ "r8a7740-gether", (kernel_ulong_t)&r8a7740_data },
2960	{ "r8a777x-ether", (kernel_ulong_t)&r8a777x_data },
2961	{ "r8a7790-ether", (kernel_ulong_t)&r8a779x_data },
2962	{ "r8a7791-ether", (kernel_ulong_t)&r8a779x_data },
2963	{ }
2964};
2965MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
2966
2967static struct platform_driver sh_eth_driver = {
2968	.probe = sh_eth_drv_probe,
2969	.remove = sh_eth_drv_remove,
2970	.id_table = sh_eth_id_table,
2971	.driver = {
2972		   .name = CARDNAME,
2973		   .pm = SH_ETH_PM_OPS,
2974		   .of_match_table = of_match_ptr(sh_eth_match_table),
2975	},
2976};
2977
2978module_platform_driver(sh_eth_driver);
2979
2980MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
2981MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
2982MODULE_LICENSE("GPL v2");
2983