sh_eth.c revision 91c77550000a7d888aaf9f9ac13e3e3485d18560
1/*
2 *  SuperH Ethernet device driver
3 *
4 *  Copyright (C) 2006-2012 Nobuhiro Iwamatsu
5 *  Copyright (C) 2008-2012 Renesas Solutions Corp.
6 *
7 *  This program is free software; you can redistribute it and/or modify it
8 *  under the terms and conditions of the GNU General Public License,
9 *  version 2, as published by the Free Software Foundation.
10 *
11 *  This program is distributed in the hope it will be useful, but WITHOUT
12 *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14 *  more details.
15 *  You should have received a copy of the GNU General Public License along with
16 *  this program; if not, write to the Free Software Foundation, Inc.,
17 *  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 *  The full GNU General Public License is included in this distribution in
20 *  the file called "COPYING".
21 */
22
23#include <linux/init.h>
24#include <linux/module.h>
25#include <linux/kernel.h>
26#include <linux/spinlock.h>
27#include <linux/interrupt.h>
28#include <linux/dma-mapping.h>
29#include <linux/etherdevice.h>
30#include <linux/delay.h>
31#include <linux/platform_device.h>
32#include <linux/mdio-bitbang.h>
33#include <linux/netdevice.h>
34#include <linux/phy.h>
35#include <linux/cache.h>
36#include <linux/io.h>
37#include <linux/pm_runtime.h>
38#include <linux/slab.h>
39#include <linux/ethtool.h>
40#include <linux/if_vlan.h>
41#include <linux/clk.h>
42#include <linux/sh_eth.h>
43
44#include "sh_eth.h"
45
46#define SH_ETH_DEF_MSG_ENABLE \
47		(NETIF_MSG_LINK	| \
48		NETIF_MSG_TIMER	| \
49		NETIF_MSG_RX_ERR| \
50		NETIF_MSG_TX_ERR)
51
52#if defined(CONFIG_CPU_SUBTYPE_SH7734) || \
53	defined(CONFIG_CPU_SUBTYPE_SH7763) || \
54	defined(CONFIG_ARCH_R8A7740)
55static void sh_eth_select_mii(struct net_device *ndev)
56{
57	u32 value = 0x0;
58	struct sh_eth_private *mdp = netdev_priv(ndev);
59
60	switch (mdp->phy_interface) {
61	case PHY_INTERFACE_MODE_GMII:
62		value = 0x2;
63		break;
64	case PHY_INTERFACE_MODE_MII:
65		value = 0x1;
66		break;
67	case PHY_INTERFACE_MODE_RMII:
68		value = 0x0;
69		break;
70	default:
71		pr_warn("PHY interface mode was not setup. Set to MII.\n");
72		value = 0x1;
73		break;
74	}
75
76	sh_eth_write(ndev, value, RMII_MII);
77}
78#endif
79
80/* There is CPU dependent code */
81#if defined(CONFIG_CPU_SUBTYPE_SH7724)
82#define SH_ETH_RESET_DEFAULT	1
83static void sh_eth_set_duplex(struct net_device *ndev)
84{
85	struct sh_eth_private *mdp = netdev_priv(ndev);
86
87	if (mdp->duplex) /* Full */
88		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
89	else		/* Half */
90		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
91}
92
93static void sh_eth_set_rate(struct net_device *ndev)
94{
95	struct sh_eth_private *mdp = netdev_priv(ndev);
96
97	switch (mdp->speed) {
98	case 10: /* 10BASE */
99		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR);
100		break;
101	case 100:/* 100BASE */
102		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR);
103		break;
104	default:
105		break;
106	}
107}
108
109/* SH7724 */
110static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
111	.set_duplex	= sh_eth_set_duplex,
112	.set_rate	= sh_eth_set_rate,
113
114	.ecsr_value	= ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
115	.ecsipr_value	= ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
116	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f,
117
118	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
119	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
120			  EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
121	.tx_error_check	= EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
122
123	.apr		= 1,
124	.mpr		= 1,
125	.tpauser	= 1,
126	.hw_swap	= 1,
127	.rpadir		= 1,
128	.rpadir_value	= 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
129};
130#elif defined(CONFIG_CPU_SUBTYPE_SH7757)
131#define SH_ETH_HAS_BOTH_MODULES	1
132#define SH_ETH_HAS_TSU	1
133static int sh_eth_check_reset(struct net_device *ndev);
134
135static void sh_eth_set_duplex(struct net_device *ndev)
136{
137	struct sh_eth_private *mdp = netdev_priv(ndev);
138
139	if (mdp->duplex) /* Full */
140		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
141	else		/* Half */
142		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
143}
144
145static void sh_eth_set_rate(struct net_device *ndev)
146{
147	struct sh_eth_private *mdp = netdev_priv(ndev);
148
149	switch (mdp->speed) {
150	case 10: /* 10BASE */
151		sh_eth_write(ndev, 0, RTRATE);
152		break;
153	case 100:/* 100BASE */
154		sh_eth_write(ndev, 1, RTRATE);
155		break;
156	default:
157		break;
158	}
159}
160
161/* SH7757 */
162static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
163	.set_duplex		= sh_eth_set_duplex,
164	.set_rate		= sh_eth_set_rate,
165
166	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
167	.rmcr_value	= 0x00000001,
168
169	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
170	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
171			  EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
172	.tx_error_check	= EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
173
174	.apr		= 1,
175	.mpr		= 1,
176	.tpauser	= 1,
177	.hw_swap	= 1,
178	.no_ade		= 1,
179	.rpadir		= 1,
180	.rpadir_value   = 2 << 16,
181};
182
183#define SH_GIGA_ETH_BASE	0xfee00000
184#define GIGA_MALR(port)		(SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
185#define GIGA_MAHR(port)		(SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
186static void sh_eth_chip_reset_giga(struct net_device *ndev)
187{
188	int i;
189	unsigned long mahr[2], malr[2];
190
191	/* save MAHR and MALR */
192	for (i = 0; i < 2; i++) {
193		malr[i] = ioread32((void *)GIGA_MALR(i));
194		mahr[i] = ioread32((void *)GIGA_MAHR(i));
195	}
196
197	/* reset device */
198	iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800));
199	mdelay(1);
200
201	/* restore MAHR and MALR */
202	for (i = 0; i < 2; i++) {
203		iowrite32(malr[i], (void *)GIGA_MALR(i));
204		iowrite32(mahr[i], (void *)GIGA_MAHR(i));
205	}
206}
207
208static int sh_eth_is_gether(struct sh_eth_private *mdp);
209static int sh_eth_reset(struct net_device *ndev)
210{
211	struct sh_eth_private *mdp = netdev_priv(ndev);
212	int ret = 0;
213
214	if (sh_eth_is_gether(mdp)) {
215		sh_eth_write(ndev, 0x03, EDSR);
216		sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
217				EDMR);
218
219		ret = sh_eth_check_reset(ndev);
220		if (ret)
221			goto out;
222
223		/* Table Init */
224		sh_eth_write(ndev, 0x0, TDLAR);
225		sh_eth_write(ndev, 0x0, TDFAR);
226		sh_eth_write(ndev, 0x0, TDFXR);
227		sh_eth_write(ndev, 0x0, TDFFR);
228		sh_eth_write(ndev, 0x0, RDLAR);
229		sh_eth_write(ndev, 0x0, RDFAR);
230		sh_eth_write(ndev, 0x0, RDFXR);
231		sh_eth_write(ndev, 0x0, RDFFR);
232	} else {
233		sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER,
234				EDMR);
235		mdelay(3);
236		sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
237				EDMR);
238	}
239
240out:
241	return ret;
242}
243
244static void sh_eth_set_duplex_giga(struct net_device *ndev)
245{
246	struct sh_eth_private *mdp = netdev_priv(ndev);
247
248	if (mdp->duplex) /* Full */
249		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
250	else		/* Half */
251		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
252}
253
254static void sh_eth_set_rate_giga(struct net_device *ndev)
255{
256	struct sh_eth_private *mdp = netdev_priv(ndev);
257
258	switch (mdp->speed) {
259	case 10: /* 10BASE */
260		sh_eth_write(ndev, 0x00000000, GECMR);
261		break;
262	case 100:/* 100BASE */
263		sh_eth_write(ndev, 0x00000010, GECMR);
264		break;
265	case 1000: /* 1000BASE */
266		sh_eth_write(ndev, 0x00000020, GECMR);
267		break;
268	default:
269		break;
270	}
271}
272
273/* SH7757(GETHERC) */
274static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = {
275	.chip_reset	= sh_eth_chip_reset_giga,
276	.set_duplex	= sh_eth_set_duplex_giga,
277	.set_rate	= sh_eth_set_rate_giga,
278
279	.ecsr_value	= ECSR_ICD | ECSR_MPD,
280	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
281	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
282
283	.tx_check	= EESR_TC1 | EESR_FTC,
284	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
285			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
286			  EESR_ECI,
287	.tx_error_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
288			  EESR_TFE,
289	.fdr_value	= 0x0000072f,
290	.rmcr_value	= 0x00000001,
291
292	.apr		= 1,
293	.mpr		= 1,
294	.tpauser	= 1,
295	.bculr		= 1,
296	.hw_swap	= 1,
297	.rpadir		= 1,
298	.rpadir_value   = 2 << 16,
299	.no_trimd	= 1,
300	.no_ade		= 1,
301	.tsu		= 1,
302};
303
304static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp)
305{
306	if (sh_eth_is_gether(mdp))
307		return &sh_eth_my_cpu_data_giga;
308	else
309		return &sh_eth_my_cpu_data;
310}
311
312#elif defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763)
313#define SH_ETH_HAS_TSU	1
314static int sh_eth_check_reset(struct net_device *ndev);
315static void sh_eth_reset_hw_crc(struct net_device *ndev);
316
317static void sh_eth_chip_reset(struct net_device *ndev)
318{
319	struct sh_eth_private *mdp = netdev_priv(ndev);
320
321	/* reset device */
322	sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
323	mdelay(1);
324}
325
326static void sh_eth_set_duplex(struct net_device *ndev)
327{
328	struct sh_eth_private *mdp = netdev_priv(ndev);
329
330	if (mdp->duplex) /* Full */
331		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
332	else		/* Half */
333		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
334}
335
336static void sh_eth_set_rate(struct net_device *ndev)
337{
338	struct sh_eth_private *mdp = netdev_priv(ndev);
339
340	switch (mdp->speed) {
341	case 10: /* 10BASE */
342		sh_eth_write(ndev, GECMR_10, GECMR);
343		break;
344	case 100:/* 100BASE */
345		sh_eth_write(ndev, GECMR_100, GECMR);
346		break;
347	case 1000: /* 1000BASE */
348		sh_eth_write(ndev, GECMR_1000, GECMR);
349		break;
350	default:
351		break;
352	}
353}
354
355/* sh7763 */
356static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
357	.chip_reset	= sh_eth_chip_reset,
358	.set_duplex	= sh_eth_set_duplex,
359	.set_rate	= sh_eth_set_rate,
360
361	.ecsr_value	= ECSR_ICD | ECSR_MPD,
362	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
363	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
364
365	.tx_check	= EESR_TC1 | EESR_FTC,
366	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
367			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
368			  EESR_ECI,
369	.tx_error_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
370			  EESR_TFE,
371
372	.apr		= 1,
373	.mpr		= 1,
374	.tpauser	= 1,
375	.bculr		= 1,
376	.hw_swap	= 1,
377	.no_trimd	= 1,
378	.no_ade		= 1,
379	.tsu		= 1,
380#if defined(CONFIG_CPU_SUBTYPE_SH7734)
381	.hw_crc     = 1,
382	.select_mii = 1,
383#endif
384};
385
386static int sh_eth_reset(struct net_device *ndev)
387{
388	int ret = 0;
389
390	sh_eth_write(ndev, EDSR_ENALL, EDSR);
391	sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
392
393	ret = sh_eth_check_reset(ndev);
394	if (ret)
395		goto out;
396
397	/* Table Init */
398	sh_eth_write(ndev, 0x0, TDLAR);
399	sh_eth_write(ndev, 0x0, TDFAR);
400	sh_eth_write(ndev, 0x0, TDFXR);
401	sh_eth_write(ndev, 0x0, TDFFR);
402	sh_eth_write(ndev, 0x0, RDLAR);
403	sh_eth_write(ndev, 0x0, RDFAR);
404	sh_eth_write(ndev, 0x0, RDFXR);
405	sh_eth_write(ndev, 0x0, RDFFR);
406
407	/* Reset HW CRC register */
408	sh_eth_reset_hw_crc(ndev);
409
410	/* Select MII mode */
411	if (sh_eth_my_cpu_data.select_mii)
412		sh_eth_select_mii(ndev);
413out:
414	return ret;
415}
416
417static void sh_eth_reset_hw_crc(struct net_device *ndev)
418{
419	if (sh_eth_my_cpu_data.hw_crc)
420		sh_eth_write(ndev, 0x0, CSMR);
421}
422
423#elif defined(CONFIG_ARCH_R8A7740)
424#define SH_ETH_HAS_TSU	1
425static int sh_eth_check_reset(struct net_device *ndev);
426
427static void sh_eth_chip_reset(struct net_device *ndev)
428{
429	struct sh_eth_private *mdp = netdev_priv(ndev);
430
431	/* reset device */
432	sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
433	mdelay(1);
434
435	sh_eth_select_mii(ndev);
436}
437
438static int sh_eth_reset(struct net_device *ndev)
439{
440	int ret = 0;
441
442	sh_eth_write(ndev, EDSR_ENALL, EDSR);
443	sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
444
445	ret = sh_eth_check_reset(ndev);
446	if (ret)
447		goto out;
448
449	/* Table Init */
450	sh_eth_write(ndev, 0x0, TDLAR);
451	sh_eth_write(ndev, 0x0, TDFAR);
452	sh_eth_write(ndev, 0x0, TDFXR);
453	sh_eth_write(ndev, 0x0, TDFFR);
454	sh_eth_write(ndev, 0x0, RDLAR);
455	sh_eth_write(ndev, 0x0, RDFAR);
456	sh_eth_write(ndev, 0x0, RDFXR);
457	sh_eth_write(ndev, 0x0, RDFFR);
458
459out:
460	return ret;
461}
462
463static void sh_eth_set_duplex(struct net_device *ndev)
464{
465	struct sh_eth_private *mdp = netdev_priv(ndev);
466
467	if (mdp->duplex) /* Full */
468		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
469	else		/* Half */
470		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
471}
472
473static void sh_eth_set_rate(struct net_device *ndev)
474{
475	struct sh_eth_private *mdp = netdev_priv(ndev);
476
477	switch (mdp->speed) {
478	case 10: /* 10BASE */
479		sh_eth_write(ndev, GECMR_10, GECMR);
480		break;
481	case 100:/* 100BASE */
482		sh_eth_write(ndev, GECMR_100, GECMR);
483		break;
484	case 1000: /* 1000BASE */
485		sh_eth_write(ndev, GECMR_1000, GECMR);
486		break;
487	default:
488		break;
489	}
490}
491
492/* R8A7740 */
493static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
494	.chip_reset	= sh_eth_chip_reset,
495	.set_duplex	= sh_eth_set_duplex,
496	.set_rate	= sh_eth_set_rate,
497
498	.ecsr_value	= ECSR_ICD | ECSR_MPD,
499	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
500	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
501
502	.tx_check	= EESR_TC1 | EESR_FTC,
503	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
504			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
505			  EESR_ECI,
506	.tx_error_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
507			  EESR_TFE,
508
509	.apr		= 1,
510	.mpr		= 1,
511	.tpauser	= 1,
512	.bculr		= 1,
513	.hw_swap	= 1,
514	.no_trimd	= 1,
515	.no_ade		= 1,
516	.tsu		= 1,
517	.select_mii	= 1,
518};
519
520#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
521#define SH_ETH_RESET_DEFAULT	1
522static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
523	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
524
525	.apr		= 1,
526	.mpr		= 1,
527	.tpauser	= 1,
528	.hw_swap	= 1,
529};
530#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
531#define SH_ETH_RESET_DEFAULT	1
532#define SH_ETH_HAS_TSU	1
533static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
534	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
535	.tsu		= 1,
536};
537#endif
538
539static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
540{
541	if (!cd->ecsr_value)
542		cd->ecsr_value = DEFAULT_ECSR_INIT;
543
544	if (!cd->ecsipr_value)
545		cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
546
547	if (!cd->fcftr_value)
548		cd->fcftr_value = DEFAULT_FIFO_F_D_RFF | \
549				  DEFAULT_FIFO_F_D_RFD;
550
551	if (!cd->fdr_value)
552		cd->fdr_value = DEFAULT_FDR_INIT;
553
554	if (!cd->rmcr_value)
555		cd->rmcr_value = DEFAULT_RMCR_VALUE;
556
557	if (!cd->tx_check)
558		cd->tx_check = DEFAULT_TX_CHECK;
559
560	if (!cd->eesr_err_check)
561		cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
562
563	if (!cd->tx_error_check)
564		cd->tx_error_check = DEFAULT_TX_ERROR_CHECK;
565}
566
567#if defined(SH_ETH_RESET_DEFAULT)
568/* Chip Reset */
569static int  sh_eth_reset(struct net_device *ndev)
570{
571	sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, EDMR);
572	mdelay(3);
573	sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, EDMR);
574
575	return 0;
576}
577#else
578static int sh_eth_check_reset(struct net_device *ndev)
579{
580	int ret = 0;
581	int cnt = 100;
582
583	while (cnt > 0) {
584		if (!(sh_eth_read(ndev, EDMR) & 0x3))
585			break;
586		mdelay(1);
587		cnt--;
588	}
589	if (cnt < 0) {
590		printk(KERN_ERR "Device reset fail\n");
591		ret = -ETIMEDOUT;
592	}
593	return ret;
594}
595#endif
596
597#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
598static void sh_eth_set_receive_align(struct sk_buff *skb)
599{
600	int reserve;
601
602	reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1));
603	if (reserve)
604		skb_reserve(skb, reserve);
605}
606#else
607static void sh_eth_set_receive_align(struct sk_buff *skb)
608{
609	skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN);
610}
611#endif
612
613
614/* CPU <-> EDMAC endian convert */
615static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x)
616{
617	switch (mdp->edmac_endian) {
618	case EDMAC_LITTLE_ENDIAN:
619		return cpu_to_le32(x);
620	case EDMAC_BIG_ENDIAN:
621		return cpu_to_be32(x);
622	}
623	return x;
624}
625
626static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
627{
628	switch (mdp->edmac_endian) {
629	case EDMAC_LITTLE_ENDIAN:
630		return le32_to_cpu(x);
631	case EDMAC_BIG_ENDIAN:
632		return be32_to_cpu(x);
633	}
634	return x;
635}
636
637/*
638 * Program the hardware MAC address from dev->dev_addr.
639 */
640static void update_mac_address(struct net_device *ndev)
641{
642	sh_eth_write(ndev,
643		(ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
644		(ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
645	sh_eth_write(ndev,
646		(ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
647}
648
649/*
650 * Get MAC address from SuperH MAC address register
651 *
652 * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
653 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
654 * When you want use this device, you must set MAC address in bootloader.
655 *
656 */
657static void read_mac_address(struct net_device *ndev, unsigned char *mac)
658{
659	if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
660		memcpy(ndev->dev_addr, mac, 6);
661	} else {
662		ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24);
663		ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF;
664		ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF;
665		ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF);
666		ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF;
667		ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF);
668	}
669}
670
671static int sh_eth_is_gether(struct sh_eth_private *mdp)
672{
673	if (mdp->reg_offset == sh_eth_offset_gigabit)
674		return 1;
675	else
676		return 0;
677}
678
679static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
680{
681	if (sh_eth_is_gether(mdp))
682		return EDTRR_TRNS_GETHER;
683	else
684		return EDTRR_TRNS_ETHER;
685}
686
687struct bb_info {
688	void (*set_gate)(void *addr);
689	struct mdiobb_ctrl ctrl;
690	void *addr;
691	u32 mmd_msk;/* MMD */
692	u32 mdo_msk;
693	u32 mdi_msk;
694	u32 mdc_msk;
695};
696
697/* PHY bit set */
698static void bb_set(void *addr, u32 msk)
699{
700	iowrite32(ioread32(addr) | msk, addr);
701}
702
703/* PHY bit clear */
704static void bb_clr(void *addr, u32 msk)
705{
706	iowrite32((ioread32(addr) & ~msk), addr);
707}
708
709/* PHY bit read */
710static int bb_read(void *addr, u32 msk)
711{
712	return (ioread32(addr) & msk) != 0;
713}
714
715/* Data I/O pin control */
716static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
717{
718	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
719
720	if (bitbang->set_gate)
721		bitbang->set_gate(bitbang->addr);
722
723	if (bit)
724		bb_set(bitbang->addr, bitbang->mmd_msk);
725	else
726		bb_clr(bitbang->addr, bitbang->mmd_msk);
727}
728
729/* Set bit data*/
730static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
731{
732	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
733
734	if (bitbang->set_gate)
735		bitbang->set_gate(bitbang->addr);
736
737	if (bit)
738		bb_set(bitbang->addr, bitbang->mdo_msk);
739	else
740		bb_clr(bitbang->addr, bitbang->mdo_msk);
741}
742
743/* Get bit data*/
744static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
745{
746	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
747
748	if (bitbang->set_gate)
749		bitbang->set_gate(bitbang->addr);
750
751	return bb_read(bitbang->addr, bitbang->mdi_msk);
752}
753
754/* MDC pin control */
755static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
756{
757	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
758
759	if (bitbang->set_gate)
760		bitbang->set_gate(bitbang->addr);
761
762	if (bit)
763		bb_set(bitbang->addr, bitbang->mdc_msk);
764	else
765		bb_clr(bitbang->addr, bitbang->mdc_msk);
766}
767
768/* mdio bus control struct */
769static struct mdiobb_ops bb_ops = {
770	.owner = THIS_MODULE,
771	.set_mdc = sh_mdc_ctrl,
772	.set_mdio_dir = sh_mmd_ctrl,
773	.set_mdio_data = sh_set_mdio,
774	.get_mdio_data = sh_get_mdio,
775};
776
777/* free skb and descriptor buffer */
778static void sh_eth_ring_free(struct net_device *ndev)
779{
780	struct sh_eth_private *mdp = netdev_priv(ndev);
781	int i;
782
783	/* Free Rx skb ringbuffer */
784	if (mdp->rx_skbuff) {
785		for (i = 0; i < RX_RING_SIZE; i++) {
786			if (mdp->rx_skbuff[i])
787				dev_kfree_skb(mdp->rx_skbuff[i]);
788		}
789	}
790	kfree(mdp->rx_skbuff);
791	mdp->rx_skbuff = NULL;
792
793	/* Free Tx skb ringbuffer */
794	if (mdp->tx_skbuff) {
795		for (i = 0; i < TX_RING_SIZE; i++) {
796			if (mdp->tx_skbuff[i])
797				dev_kfree_skb(mdp->tx_skbuff[i]);
798		}
799	}
800	kfree(mdp->tx_skbuff);
801	mdp->tx_skbuff = NULL;
802}
803
804/* format skb and descriptor buffer */
805static void sh_eth_ring_format(struct net_device *ndev)
806{
807	struct sh_eth_private *mdp = netdev_priv(ndev);
808	int i;
809	struct sk_buff *skb;
810	struct sh_eth_rxdesc *rxdesc = NULL;
811	struct sh_eth_txdesc *txdesc = NULL;
812	int rx_ringsize = sizeof(*rxdesc) * RX_RING_SIZE;
813	int tx_ringsize = sizeof(*txdesc) * TX_RING_SIZE;
814
815	mdp->cur_rx = mdp->cur_tx = 0;
816	mdp->dirty_rx = mdp->dirty_tx = 0;
817
818	memset(mdp->rx_ring, 0, rx_ringsize);
819
820	/* build Rx ring buffer */
821	for (i = 0; i < RX_RING_SIZE; i++) {
822		/* skb */
823		mdp->rx_skbuff[i] = NULL;
824		skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
825		mdp->rx_skbuff[i] = skb;
826		if (skb == NULL)
827			break;
828		dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
829				DMA_FROM_DEVICE);
830		sh_eth_set_receive_align(skb);
831
832		/* RX descriptor */
833		rxdesc = &mdp->rx_ring[i];
834		rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
835		rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
836
837		/* The size of the buffer is 16 byte boundary. */
838		rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
839		/* Rx descriptor address set */
840		if (i == 0) {
841			sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
842			if (sh_eth_is_gether(mdp))
843				sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
844		}
845	}
846
847	mdp->dirty_rx = (u32) (i - RX_RING_SIZE);
848
849	/* Mark the last entry as wrapping the ring. */
850	rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
851
852	memset(mdp->tx_ring, 0, tx_ringsize);
853
854	/* build Tx ring buffer */
855	for (i = 0; i < TX_RING_SIZE; i++) {
856		mdp->tx_skbuff[i] = NULL;
857		txdesc = &mdp->tx_ring[i];
858		txdesc->status = cpu_to_edmac(mdp, TD_TFP);
859		txdesc->buffer_length = 0;
860		if (i == 0) {
861			/* Tx descriptor address set */
862			sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
863			if (sh_eth_is_gether(mdp))
864				sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
865		}
866	}
867
868	txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
869}
870
871/* Get skb and descriptor buffer */
872static int sh_eth_ring_init(struct net_device *ndev)
873{
874	struct sh_eth_private *mdp = netdev_priv(ndev);
875	int rx_ringsize, tx_ringsize, ret = 0;
876
877	/*
878	 * +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
879	 * card needs room to do 8 byte alignment, +2 so we can reserve
880	 * the first 2 bytes, and +16 gets room for the status word from the
881	 * card.
882	 */
883	mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
884			  (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
885	if (mdp->cd->rpadir)
886		mdp->rx_buf_sz += NET_IP_ALIGN;
887
888	/* Allocate RX and TX skb rings */
889	mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * RX_RING_SIZE,
890				GFP_KERNEL);
891	if (!mdp->rx_skbuff) {
892		dev_err(&ndev->dev, "Cannot allocate Rx skb\n");
893		ret = -ENOMEM;
894		return ret;
895	}
896
897	mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * TX_RING_SIZE,
898				GFP_KERNEL);
899	if (!mdp->tx_skbuff) {
900		dev_err(&ndev->dev, "Cannot allocate Tx skb\n");
901		ret = -ENOMEM;
902		goto skb_ring_free;
903	}
904
905	/* Allocate all Rx descriptors. */
906	rx_ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
907	mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
908			GFP_KERNEL);
909
910	if (!mdp->rx_ring) {
911		dev_err(&ndev->dev, "Cannot allocate Rx Ring (size %d bytes)\n",
912			rx_ringsize);
913		ret = -ENOMEM;
914		goto desc_ring_free;
915	}
916
917	mdp->dirty_rx = 0;
918
919	/* Allocate all Tx descriptors. */
920	tx_ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
921	mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
922			GFP_KERNEL);
923	if (!mdp->tx_ring) {
924		dev_err(&ndev->dev, "Cannot allocate Tx Ring (size %d bytes)\n",
925			tx_ringsize);
926		ret = -ENOMEM;
927		goto desc_ring_free;
928	}
929	return ret;
930
931desc_ring_free:
932	/* free DMA buffer */
933	dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma);
934
935skb_ring_free:
936	/* Free Rx and Tx skb ring buffer */
937	sh_eth_ring_free(ndev);
938	mdp->tx_ring = NULL;
939	mdp->rx_ring = NULL;
940
941	return ret;
942}
943
944static void sh_eth_free_dma_buffer(struct sh_eth_private *mdp)
945{
946	int ringsize;
947
948	if (mdp->rx_ring) {
949		ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
950		dma_free_coherent(NULL, ringsize, mdp->rx_ring,
951				  mdp->rx_desc_dma);
952		mdp->rx_ring = NULL;
953	}
954
955	if (mdp->tx_ring) {
956		ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
957		dma_free_coherent(NULL, ringsize, mdp->tx_ring,
958				  mdp->tx_desc_dma);
959		mdp->tx_ring = NULL;
960	}
961}
962
963static int sh_eth_dev_init(struct net_device *ndev)
964{
965	int ret = 0;
966	struct sh_eth_private *mdp = netdev_priv(ndev);
967	u32 val;
968
969	/* Soft Reset */
970	ret = sh_eth_reset(ndev);
971	if (ret)
972		goto out;
973
974	/* Descriptor format */
975	sh_eth_ring_format(ndev);
976	if (mdp->cd->rpadir)
977		sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
978
979	/* all sh_eth int mask */
980	sh_eth_write(ndev, 0, EESIPR);
981
982#if defined(__LITTLE_ENDIAN)
983	if (mdp->cd->hw_swap)
984		sh_eth_write(ndev, EDMR_EL, EDMR);
985	else
986#endif
987		sh_eth_write(ndev, 0, EDMR);
988
989	/* FIFO size set */
990	sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
991	sh_eth_write(ndev, 0, TFTR);
992
993	/* Frame recv control */
994	sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR);
995
996	sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER);
997
998	if (mdp->cd->bculr)
999		sh_eth_write(ndev, 0x800, BCULR);	/* Burst sycle set */
1000
1001	sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
1002
1003	if (!mdp->cd->no_trimd)
1004		sh_eth_write(ndev, 0, TRIMD);
1005
1006	/* Recv frame limit set register */
1007	sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
1008		     RFLR);
1009
1010	sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
1011	sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1012
1013	/* PAUSE Prohibition */
1014	val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
1015		ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
1016
1017	sh_eth_write(ndev, val, ECMR);
1018
1019	if (mdp->cd->set_rate)
1020		mdp->cd->set_rate(ndev);
1021
1022	/* E-MAC Status Register clear */
1023	sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
1024
1025	/* E-MAC Interrupt Enable register */
1026	sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
1027
1028	/* Set MAC address */
1029	update_mac_address(ndev);
1030
1031	/* mask reset */
1032	if (mdp->cd->apr)
1033		sh_eth_write(ndev, APR_AP, APR);
1034	if (mdp->cd->mpr)
1035		sh_eth_write(ndev, MPR_MP, MPR);
1036	if (mdp->cd->tpauser)
1037		sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
1038
1039	/* Setting the Rx mode will start the Rx process. */
1040	sh_eth_write(ndev, EDRRR_R, EDRRR);
1041
1042	netif_start_queue(ndev);
1043
1044out:
1045	return ret;
1046}
1047
1048/* free Tx skb function */
1049static int sh_eth_txfree(struct net_device *ndev)
1050{
1051	struct sh_eth_private *mdp = netdev_priv(ndev);
1052	struct sh_eth_txdesc *txdesc;
1053	int freeNum = 0;
1054	int entry = 0;
1055
1056	for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
1057		entry = mdp->dirty_tx % TX_RING_SIZE;
1058		txdesc = &mdp->tx_ring[entry];
1059		if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
1060			break;
1061		/* Free the original skb. */
1062		if (mdp->tx_skbuff[entry]) {
1063			dma_unmap_single(&ndev->dev, txdesc->addr,
1064					 txdesc->buffer_length, DMA_TO_DEVICE);
1065			dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
1066			mdp->tx_skbuff[entry] = NULL;
1067			freeNum++;
1068		}
1069		txdesc->status = cpu_to_edmac(mdp, TD_TFP);
1070		if (entry >= TX_RING_SIZE - 1)
1071			txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
1072
1073		ndev->stats.tx_packets++;
1074		ndev->stats.tx_bytes += txdesc->buffer_length;
1075	}
1076	return freeNum;
1077}
1078
1079/* Packet receive function */
1080static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
1081{
1082	struct sh_eth_private *mdp = netdev_priv(ndev);
1083	struct sh_eth_rxdesc *rxdesc;
1084
1085	int entry = mdp->cur_rx % RX_RING_SIZE;
1086	int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx;
1087	struct sk_buff *skb;
1088	u16 pkt_len = 0;
1089	u32 desc_status;
1090
1091	rxdesc = &mdp->rx_ring[entry];
1092	while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
1093		desc_status = edmac_to_cpu(mdp, rxdesc->status);
1094		pkt_len = rxdesc->frame_length;
1095
1096#if defined(CONFIG_ARCH_R8A7740)
1097		desc_status >>= 16;
1098#endif
1099
1100		if (--boguscnt < 0)
1101			break;
1102
1103		if (!(desc_status & RDFEND))
1104			ndev->stats.rx_length_errors++;
1105
1106		if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
1107				   RD_RFS5 | RD_RFS6 | RD_RFS10)) {
1108			ndev->stats.rx_errors++;
1109			if (desc_status & RD_RFS1)
1110				ndev->stats.rx_crc_errors++;
1111			if (desc_status & RD_RFS2)
1112				ndev->stats.rx_frame_errors++;
1113			if (desc_status & RD_RFS3)
1114				ndev->stats.rx_length_errors++;
1115			if (desc_status & RD_RFS4)
1116				ndev->stats.rx_length_errors++;
1117			if (desc_status & RD_RFS6)
1118				ndev->stats.rx_missed_errors++;
1119			if (desc_status & RD_RFS10)
1120				ndev->stats.rx_over_errors++;
1121		} else {
1122			if (!mdp->cd->hw_swap)
1123				sh_eth_soft_swap(
1124					phys_to_virt(ALIGN(rxdesc->addr, 4)),
1125					pkt_len + 2);
1126			skb = mdp->rx_skbuff[entry];
1127			mdp->rx_skbuff[entry] = NULL;
1128			if (mdp->cd->rpadir)
1129				skb_reserve(skb, NET_IP_ALIGN);
1130			skb_put(skb, pkt_len);
1131			skb->protocol = eth_type_trans(skb, ndev);
1132			netif_rx(skb);
1133			ndev->stats.rx_packets++;
1134			ndev->stats.rx_bytes += pkt_len;
1135		}
1136		rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
1137		entry = (++mdp->cur_rx) % RX_RING_SIZE;
1138		rxdesc = &mdp->rx_ring[entry];
1139	}
1140
1141	/* Refill the Rx ring buffers. */
1142	for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
1143		entry = mdp->dirty_rx % RX_RING_SIZE;
1144		rxdesc = &mdp->rx_ring[entry];
1145		/* The size of the buffer is 16 byte boundary. */
1146		rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1147
1148		if (mdp->rx_skbuff[entry] == NULL) {
1149			skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
1150			mdp->rx_skbuff[entry] = skb;
1151			if (skb == NULL)
1152				break;	/* Better luck next round. */
1153			dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
1154					DMA_FROM_DEVICE);
1155			sh_eth_set_receive_align(skb);
1156
1157			skb_checksum_none_assert(skb);
1158			rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
1159		}
1160		if (entry >= RX_RING_SIZE - 1)
1161			rxdesc->status |=
1162				cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
1163		else
1164			rxdesc->status |=
1165				cpu_to_edmac(mdp, RD_RACT | RD_RFP);
1166	}
1167
1168	/* Restart Rx engine if stopped. */
1169	/* If we don't need to check status, don't. -KDU */
1170	if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
1171		/* fix the values for the next receiving if RDE is set */
1172		if (intr_status & EESR_RDE)
1173			mdp->cur_rx = mdp->dirty_rx =
1174				(sh_eth_read(ndev, RDFAR) -
1175				 sh_eth_read(ndev, RDLAR)) >> 4;
1176		sh_eth_write(ndev, EDRRR_R, EDRRR);
1177	}
1178
1179	return 0;
1180}
1181
1182static void sh_eth_rcv_snd_disable(struct net_device *ndev)
1183{
1184	/* disable tx and rx */
1185	sh_eth_write(ndev, sh_eth_read(ndev, ECMR) &
1186		~(ECMR_RE | ECMR_TE), ECMR);
1187}
1188
1189static void sh_eth_rcv_snd_enable(struct net_device *ndev)
1190{
1191	/* enable tx and rx */
1192	sh_eth_write(ndev, sh_eth_read(ndev, ECMR) |
1193		(ECMR_RE | ECMR_TE), ECMR);
1194}
1195
1196/* error control function */
1197static void sh_eth_error(struct net_device *ndev, int intr_status)
1198{
1199	struct sh_eth_private *mdp = netdev_priv(ndev);
1200	u32 felic_stat;
1201	u32 link_stat;
1202	u32 mask;
1203
1204	if (intr_status & EESR_ECI) {
1205		felic_stat = sh_eth_read(ndev, ECSR);
1206		sh_eth_write(ndev, felic_stat, ECSR);	/* clear int */
1207		if (felic_stat & ECSR_ICD)
1208			ndev->stats.tx_carrier_errors++;
1209		if (felic_stat & ECSR_LCHNG) {
1210			/* Link Changed */
1211			if (mdp->cd->no_psr || mdp->no_ether_link) {
1212				if (mdp->link == PHY_DOWN)
1213					link_stat = 0;
1214				else
1215					link_stat = PHY_ST_LINK;
1216			} else {
1217				link_stat = (sh_eth_read(ndev, PSR));
1218				if (mdp->ether_link_active_low)
1219					link_stat = ~link_stat;
1220			}
1221			if (!(link_stat & PHY_ST_LINK))
1222				sh_eth_rcv_snd_disable(ndev);
1223			else {
1224				/* Link Up */
1225				sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
1226					  ~DMAC_M_ECI, EESIPR);
1227				/*clear int */
1228				sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
1229					  ECSR);
1230				sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
1231					  DMAC_M_ECI, EESIPR);
1232				/* enable tx and rx */
1233				sh_eth_rcv_snd_enable(ndev);
1234			}
1235		}
1236	}
1237
1238	if (intr_status & EESR_TWB) {
1239		/* Write buck end. unused write back interrupt */
1240		if (intr_status & EESR_TABT)	/* Transmit Abort int */
1241			ndev->stats.tx_aborted_errors++;
1242			if (netif_msg_tx_err(mdp))
1243				dev_err(&ndev->dev, "Transmit Abort\n");
1244	}
1245
1246	if (intr_status & EESR_RABT) {
1247		/* Receive Abort int */
1248		if (intr_status & EESR_RFRMER) {
1249			/* Receive Frame Overflow int */
1250			ndev->stats.rx_frame_errors++;
1251			if (netif_msg_rx_err(mdp))
1252				dev_err(&ndev->dev, "Receive Abort\n");
1253		}
1254	}
1255
1256	if (intr_status & EESR_TDE) {
1257		/* Transmit Descriptor Empty int */
1258		ndev->stats.tx_fifo_errors++;
1259		if (netif_msg_tx_err(mdp))
1260			dev_err(&ndev->dev, "Transmit Descriptor Empty\n");
1261	}
1262
1263	if (intr_status & EESR_TFE) {
1264		/* FIFO under flow */
1265		ndev->stats.tx_fifo_errors++;
1266		if (netif_msg_tx_err(mdp))
1267			dev_err(&ndev->dev, "Transmit FIFO Under flow\n");
1268	}
1269
1270	if (intr_status & EESR_RDE) {
1271		/* Receive Descriptor Empty int */
1272		ndev->stats.rx_over_errors++;
1273
1274		if (netif_msg_rx_err(mdp))
1275			dev_err(&ndev->dev, "Receive Descriptor Empty\n");
1276	}
1277
1278	if (intr_status & EESR_RFE) {
1279		/* Receive FIFO Overflow int */
1280		ndev->stats.rx_fifo_errors++;
1281		if (netif_msg_rx_err(mdp))
1282			dev_err(&ndev->dev, "Receive FIFO Overflow\n");
1283	}
1284
1285	if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
1286		/* Address Error */
1287		ndev->stats.tx_fifo_errors++;
1288		if (netif_msg_tx_err(mdp))
1289			dev_err(&ndev->dev, "Address Error\n");
1290	}
1291
1292	mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
1293	if (mdp->cd->no_ade)
1294		mask &= ~EESR_ADE;
1295	if (intr_status & mask) {
1296		/* Tx error */
1297		u32 edtrr = sh_eth_read(ndev, EDTRR);
1298		/* dmesg */
1299		dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ",
1300				intr_status, mdp->cur_tx);
1301		dev_err(&ndev->dev, "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
1302				mdp->dirty_tx, (u32) ndev->state, edtrr);
1303		/* dirty buffer free */
1304		sh_eth_txfree(ndev);
1305
1306		/* SH7712 BUG */
1307		if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
1308			/* tx dma start */
1309			sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
1310		}
1311		/* wakeup */
1312		netif_wake_queue(ndev);
1313	}
1314}
1315
1316static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1317{
1318	struct net_device *ndev = netdev;
1319	struct sh_eth_private *mdp = netdev_priv(ndev);
1320	struct sh_eth_cpu_data *cd = mdp->cd;
1321	irqreturn_t ret = IRQ_NONE;
1322	u32 intr_status = 0;
1323
1324	spin_lock(&mdp->lock);
1325
1326	/* Get interrpt stat */
1327	intr_status = sh_eth_read(ndev, EESR);
1328	/* Clear interrupt */
1329	if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |
1330			EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
1331			cd->tx_check | cd->eesr_err_check)) {
1332		sh_eth_write(ndev, intr_status, EESR);
1333		ret = IRQ_HANDLED;
1334	} else
1335		goto other_irq;
1336
1337	if (intr_status & (EESR_FRC | /* Frame recv*/
1338			EESR_RMAF | /* Multi cast address recv*/
1339			EESR_RRF  | /* Bit frame recv */
1340			EESR_RTLF | /* Long frame recv*/
1341			EESR_RTSF | /* short frame recv */
1342			EESR_PRE  | /* PHY-LSI recv error */
1343			EESR_CERF)){ /* recv frame CRC error */
1344		sh_eth_rx(ndev, intr_status);
1345	}
1346
1347	/* Tx Check */
1348	if (intr_status & cd->tx_check) {
1349		sh_eth_txfree(ndev);
1350		netif_wake_queue(ndev);
1351	}
1352
1353	if (intr_status & cd->eesr_err_check)
1354		sh_eth_error(ndev, intr_status);
1355
1356other_irq:
1357	spin_unlock(&mdp->lock);
1358
1359	return ret;
1360}
1361
1362/* PHY state control function */
1363static void sh_eth_adjust_link(struct net_device *ndev)
1364{
1365	struct sh_eth_private *mdp = netdev_priv(ndev);
1366	struct phy_device *phydev = mdp->phydev;
1367	int new_state = 0;
1368
1369	if (phydev->link != PHY_DOWN) {
1370		if (phydev->duplex != mdp->duplex) {
1371			new_state = 1;
1372			mdp->duplex = phydev->duplex;
1373			if (mdp->cd->set_duplex)
1374				mdp->cd->set_duplex(ndev);
1375		}
1376
1377		if (phydev->speed != mdp->speed) {
1378			new_state = 1;
1379			mdp->speed = phydev->speed;
1380			if (mdp->cd->set_rate)
1381				mdp->cd->set_rate(ndev);
1382		}
1383		if (mdp->link == PHY_DOWN) {
1384			sh_eth_write(ndev,
1385				(sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR);
1386			new_state = 1;
1387			mdp->link = phydev->link;
1388		}
1389	} else if (mdp->link) {
1390		new_state = 1;
1391		mdp->link = PHY_DOWN;
1392		mdp->speed = 0;
1393		mdp->duplex = -1;
1394	}
1395
1396	if (new_state && netif_msg_link(mdp))
1397		phy_print_status(phydev);
1398}
1399
1400/* PHY init function */
1401static int sh_eth_phy_init(struct net_device *ndev)
1402{
1403	struct sh_eth_private *mdp = netdev_priv(ndev);
1404	char phy_id[MII_BUS_ID_SIZE + 3];
1405	struct phy_device *phydev = NULL;
1406
1407	snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
1408		mdp->mii_bus->id , mdp->phy_id);
1409
1410	mdp->link = PHY_DOWN;
1411	mdp->speed = 0;
1412	mdp->duplex = -1;
1413
1414	/* Try connect to PHY */
1415	phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
1416				0, mdp->phy_interface);
1417	if (IS_ERR(phydev)) {
1418		dev_err(&ndev->dev, "phy_connect failed\n");
1419		return PTR_ERR(phydev);
1420	}
1421
1422	dev_info(&ndev->dev, "attached phy %i to driver %s\n",
1423		phydev->addr, phydev->drv->name);
1424
1425	mdp->phydev = phydev;
1426
1427	return 0;
1428}
1429
1430/* PHY control start function */
1431static int sh_eth_phy_start(struct net_device *ndev)
1432{
1433	struct sh_eth_private *mdp = netdev_priv(ndev);
1434	int ret;
1435
1436	ret = sh_eth_phy_init(ndev);
1437	if (ret)
1438		return ret;
1439
1440	/* reset phy - this also wakes it from PDOWN */
1441	phy_write(mdp->phydev, MII_BMCR, BMCR_RESET);
1442	phy_start(mdp->phydev);
1443
1444	return 0;
1445}
1446
1447static int sh_eth_get_settings(struct net_device *ndev,
1448			struct ethtool_cmd *ecmd)
1449{
1450	struct sh_eth_private *mdp = netdev_priv(ndev);
1451	unsigned long flags;
1452	int ret;
1453
1454	spin_lock_irqsave(&mdp->lock, flags);
1455	ret = phy_ethtool_gset(mdp->phydev, ecmd);
1456	spin_unlock_irqrestore(&mdp->lock, flags);
1457
1458	return ret;
1459}
1460
1461static int sh_eth_set_settings(struct net_device *ndev,
1462		struct ethtool_cmd *ecmd)
1463{
1464	struct sh_eth_private *mdp = netdev_priv(ndev);
1465	unsigned long flags;
1466	int ret;
1467
1468	spin_lock_irqsave(&mdp->lock, flags);
1469
1470	/* disable tx and rx */
1471	sh_eth_rcv_snd_disable(ndev);
1472
1473	ret = phy_ethtool_sset(mdp->phydev, ecmd);
1474	if (ret)
1475		goto error_exit;
1476
1477	if (ecmd->duplex == DUPLEX_FULL)
1478		mdp->duplex = 1;
1479	else
1480		mdp->duplex = 0;
1481
1482	if (mdp->cd->set_duplex)
1483		mdp->cd->set_duplex(ndev);
1484
1485error_exit:
1486	mdelay(1);
1487
1488	/* enable tx and rx */
1489	sh_eth_rcv_snd_enable(ndev);
1490
1491	spin_unlock_irqrestore(&mdp->lock, flags);
1492
1493	return ret;
1494}
1495
1496static int sh_eth_nway_reset(struct net_device *ndev)
1497{
1498	struct sh_eth_private *mdp = netdev_priv(ndev);
1499	unsigned long flags;
1500	int ret;
1501
1502	spin_lock_irqsave(&mdp->lock, flags);
1503	ret = phy_start_aneg(mdp->phydev);
1504	spin_unlock_irqrestore(&mdp->lock, flags);
1505
1506	return ret;
1507}
1508
1509static u32 sh_eth_get_msglevel(struct net_device *ndev)
1510{
1511	struct sh_eth_private *mdp = netdev_priv(ndev);
1512	return mdp->msg_enable;
1513}
1514
1515static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
1516{
1517	struct sh_eth_private *mdp = netdev_priv(ndev);
1518	mdp->msg_enable = value;
1519}
1520
1521static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
1522	"rx_current", "tx_current",
1523	"rx_dirty", "tx_dirty",
1524};
1525#define SH_ETH_STATS_LEN  ARRAY_SIZE(sh_eth_gstrings_stats)
1526
1527static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
1528{
1529	switch (sset) {
1530	case ETH_SS_STATS:
1531		return SH_ETH_STATS_LEN;
1532	default:
1533		return -EOPNOTSUPP;
1534	}
1535}
1536
1537static void sh_eth_get_ethtool_stats(struct net_device *ndev,
1538			struct ethtool_stats *stats, u64 *data)
1539{
1540	struct sh_eth_private *mdp = netdev_priv(ndev);
1541	int i = 0;
1542
1543	/* device-specific stats */
1544	data[i++] = mdp->cur_rx;
1545	data[i++] = mdp->cur_tx;
1546	data[i++] = mdp->dirty_rx;
1547	data[i++] = mdp->dirty_tx;
1548}
1549
1550static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1551{
1552	switch (stringset) {
1553	case ETH_SS_STATS:
1554		memcpy(data, *sh_eth_gstrings_stats,
1555					sizeof(sh_eth_gstrings_stats));
1556		break;
1557	}
1558}
1559
1560static const struct ethtool_ops sh_eth_ethtool_ops = {
1561	.get_settings	= sh_eth_get_settings,
1562	.set_settings	= sh_eth_set_settings,
1563	.nway_reset	= sh_eth_nway_reset,
1564	.get_msglevel	= sh_eth_get_msglevel,
1565	.set_msglevel	= sh_eth_set_msglevel,
1566	.get_link	= ethtool_op_get_link,
1567	.get_strings	= sh_eth_get_strings,
1568	.get_ethtool_stats  = sh_eth_get_ethtool_stats,
1569	.get_sset_count     = sh_eth_get_sset_count,
1570};
1571
1572/* network device open function */
1573static int sh_eth_open(struct net_device *ndev)
1574{
1575	int ret = 0;
1576	struct sh_eth_private *mdp = netdev_priv(ndev);
1577
1578	pm_runtime_get_sync(&mdp->pdev->dev);
1579
1580	ret = request_irq(ndev->irq, sh_eth_interrupt,
1581#if defined(CONFIG_CPU_SUBTYPE_SH7763) || \
1582	defined(CONFIG_CPU_SUBTYPE_SH7764) || \
1583	defined(CONFIG_CPU_SUBTYPE_SH7757)
1584				IRQF_SHARED,
1585#else
1586				0,
1587#endif
1588				ndev->name, ndev);
1589	if (ret) {
1590		dev_err(&ndev->dev, "Can not assign IRQ number\n");
1591		return ret;
1592	}
1593
1594	/* Descriptor set */
1595	ret = sh_eth_ring_init(ndev);
1596	if (ret)
1597		goto out_free_irq;
1598
1599	/* device init */
1600	ret = sh_eth_dev_init(ndev);
1601	if (ret)
1602		goto out_free_irq;
1603
1604	/* PHY control start*/
1605	ret = sh_eth_phy_start(ndev);
1606	if (ret)
1607		goto out_free_irq;
1608
1609	return ret;
1610
1611out_free_irq:
1612	free_irq(ndev->irq, ndev);
1613	pm_runtime_put_sync(&mdp->pdev->dev);
1614	return ret;
1615}
1616
1617/* Timeout function */
1618static void sh_eth_tx_timeout(struct net_device *ndev)
1619{
1620	struct sh_eth_private *mdp = netdev_priv(ndev);
1621	struct sh_eth_rxdesc *rxdesc;
1622	int i;
1623
1624	netif_stop_queue(ndev);
1625
1626	if (netif_msg_timer(mdp))
1627		dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x,"
1628	       " resetting...\n", ndev->name, (int)sh_eth_read(ndev, EESR));
1629
1630	/* tx_errors count up */
1631	ndev->stats.tx_errors++;
1632
1633	/* Free all the skbuffs in the Rx queue. */
1634	for (i = 0; i < RX_RING_SIZE; i++) {
1635		rxdesc = &mdp->rx_ring[i];
1636		rxdesc->status = 0;
1637		rxdesc->addr = 0xBADF00D0;
1638		if (mdp->rx_skbuff[i])
1639			dev_kfree_skb(mdp->rx_skbuff[i]);
1640		mdp->rx_skbuff[i] = NULL;
1641	}
1642	for (i = 0; i < TX_RING_SIZE; i++) {
1643		if (mdp->tx_skbuff[i])
1644			dev_kfree_skb(mdp->tx_skbuff[i]);
1645		mdp->tx_skbuff[i] = NULL;
1646	}
1647
1648	/* device init */
1649	sh_eth_dev_init(ndev);
1650}
1651
1652/* Packet transmit function */
1653static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1654{
1655	struct sh_eth_private *mdp = netdev_priv(ndev);
1656	struct sh_eth_txdesc *txdesc;
1657	u32 entry;
1658	unsigned long flags;
1659
1660	spin_lock_irqsave(&mdp->lock, flags);
1661	if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
1662		if (!sh_eth_txfree(ndev)) {
1663			if (netif_msg_tx_queued(mdp))
1664				dev_warn(&ndev->dev, "TxFD exhausted.\n");
1665			netif_stop_queue(ndev);
1666			spin_unlock_irqrestore(&mdp->lock, flags);
1667			return NETDEV_TX_BUSY;
1668		}
1669	}
1670	spin_unlock_irqrestore(&mdp->lock, flags);
1671
1672	entry = mdp->cur_tx % TX_RING_SIZE;
1673	mdp->tx_skbuff[entry] = skb;
1674	txdesc = &mdp->tx_ring[entry];
1675	/* soft swap. */
1676	if (!mdp->cd->hw_swap)
1677		sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)),
1678				 skb->len + 2);
1679	txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
1680				      DMA_TO_DEVICE);
1681	if (skb->len < ETHERSMALL)
1682		txdesc->buffer_length = ETHERSMALL;
1683	else
1684		txdesc->buffer_length = skb->len;
1685
1686	if (entry >= TX_RING_SIZE - 1)
1687		txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
1688	else
1689		txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
1690
1691	mdp->cur_tx++;
1692
1693	if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp)))
1694		sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
1695
1696	return NETDEV_TX_OK;
1697}
1698
1699/* device close function */
1700static int sh_eth_close(struct net_device *ndev)
1701{
1702	struct sh_eth_private *mdp = netdev_priv(ndev);
1703
1704	netif_stop_queue(ndev);
1705
1706	/* Disable interrupts by clearing the interrupt mask. */
1707	sh_eth_write(ndev, 0x0000, EESIPR);
1708
1709	/* Stop the chip's Tx and Rx processes. */
1710	sh_eth_write(ndev, 0, EDTRR);
1711	sh_eth_write(ndev, 0, EDRRR);
1712
1713	/* PHY Disconnect */
1714	if (mdp->phydev) {
1715		phy_stop(mdp->phydev);
1716		phy_disconnect(mdp->phydev);
1717	}
1718
1719	free_irq(ndev->irq, ndev);
1720
1721	/* Free all the skbuffs in the Rx queue. */
1722	sh_eth_ring_free(ndev);
1723
1724	/* free DMA buffer */
1725	sh_eth_free_dma_buffer(mdp);
1726
1727	pm_runtime_put_sync(&mdp->pdev->dev);
1728
1729	return 0;
1730}
1731
1732static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
1733{
1734	struct sh_eth_private *mdp = netdev_priv(ndev);
1735
1736	pm_runtime_get_sync(&mdp->pdev->dev);
1737
1738	ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
1739	sh_eth_write(ndev, 0, TROCR);	/* (write clear) */
1740	ndev->stats.collisions += sh_eth_read(ndev, CDCR);
1741	sh_eth_write(ndev, 0, CDCR);	/* (write clear) */
1742	ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
1743	sh_eth_write(ndev, 0, LCCR);	/* (write clear) */
1744	if (sh_eth_is_gether(mdp)) {
1745		ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
1746		sh_eth_write(ndev, 0, CERCR);	/* (write clear) */
1747		ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
1748		sh_eth_write(ndev, 0, CEECR);	/* (write clear) */
1749	} else {
1750		ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
1751		sh_eth_write(ndev, 0, CNDCR);	/* (write clear) */
1752	}
1753	pm_runtime_put_sync(&mdp->pdev->dev);
1754
1755	return &ndev->stats;
1756}
1757
1758/* ioctl to device function */
1759static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq,
1760				int cmd)
1761{
1762	struct sh_eth_private *mdp = netdev_priv(ndev);
1763	struct phy_device *phydev = mdp->phydev;
1764
1765	if (!netif_running(ndev))
1766		return -EINVAL;
1767
1768	if (!phydev)
1769		return -ENODEV;
1770
1771	return phy_mii_ioctl(phydev, rq, cmd);
1772}
1773
1774#if defined(SH_ETH_HAS_TSU)
1775/* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
1776static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp,
1777					    int entry)
1778{
1779	return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4);
1780}
1781
1782static u32 sh_eth_tsu_get_post_mask(int entry)
1783{
1784	return 0x0f << (28 - ((entry % 8) * 4));
1785}
1786
1787static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry)
1788{
1789	return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4));
1790}
1791
1792static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev,
1793					     int entry)
1794{
1795	struct sh_eth_private *mdp = netdev_priv(ndev);
1796	u32 tmp;
1797	void *reg_offset;
1798
1799	reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
1800	tmp = ioread32(reg_offset);
1801	iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset);
1802}
1803
1804static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev,
1805					      int entry)
1806{
1807	struct sh_eth_private *mdp = netdev_priv(ndev);
1808	u32 post_mask, ref_mask, tmp;
1809	void *reg_offset;
1810
1811	reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
1812	post_mask = sh_eth_tsu_get_post_mask(entry);
1813	ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask;
1814
1815	tmp = ioread32(reg_offset);
1816	iowrite32(tmp & ~post_mask, reg_offset);
1817
1818	/* If other port enables, the function returns "true" */
1819	return tmp & ref_mask;
1820}
1821
1822static int sh_eth_tsu_busy(struct net_device *ndev)
1823{
1824	int timeout = SH_ETH_TSU_TIMEOUT_MS * 100;
1825	struct sh_eth_private *mdp = netdev_priv(ndev);
1826
1827	while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) {
1828		udelay(10);
1829		timeout--;
1830		if (timeout <= 0) {
1831			dev_err(&ndev->dev, "%s: timeout\n", __func__);
1832			return -ETIMEDOUT;
1833		}
1834	}
1835
1836	return 0;
1837}
1838
1839static int sh_eth_tsu_write_entry(struct net_device *ndev, void *reg,
1840				  const u8 *addr)
1841{
1842	u32 val;
1843
1844	val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3];
1845	iowrite32(val, reg);
1846	if (sh_eth_tsu_busy(ndev) < 0)
1847		return -EBUSY;
1848
1849	val = addr[4] << 8 | addr[5];
1850	iowrite32(val, reg + 4);
1851	if (sh_eth_tsu_busy(ndev) < 0)
1852		return -EBUSY;
1853
1854	return 0;
1855}
1856
1857static void sh_eth_tsu_read_entry(void *reg, u8 *addr)
1858{
1859	u32 val;
1860
1861	val = ioread32(reg);
1862	addr[0] = (val >> 24) & 0xff;
1863	addr[1] = (val >> 16) & 0xff;
1864	addr[2] = (val >> 8) & 0xff;
1865	addr[3] = val & 0xff;
1866	val = ioread32(reg + 4);
1867	addr[4] = (val >> 8) & 0xff;
1868	addr[5] = val & 0xff;
1869}
1870
1871
1872static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr)
1873{
1874	struct sh_eth_private *mdp = netdev_priv(ndev);
1875	void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
1876	int i;
1877	u8 c_addr[ETH_ALEN];
1878
1879	for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
1880		sh_eth_tsu_read_entry(reg_offset, c_addr);
1881		if (memcmp(addr, c_addr, ETH_ALEN) == 0)
1882			return i;
1883	}
1884
1885	return -ENOENT;
1886}
1887
1888static int sh_eth_tsu_find_empty(struct net_device *ndev)
1889{
1890	u8 blank[ETH_ALEN];
1891	int entry;
1892
1893	memset(blank, 0, sizeof(blank));
1894	entry = sh_eth_tsu_find_entry(ndev, blank);
1895	return (entry < 0) ? -ENOMEM : entry;
1896}
1897
1898static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev,
1899					      int entry)
1900{
1901	struct sh_eth_private *mdp = netdev_priv(ndev);
1902	void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
1903	int ret;
1904	u8 blank[ETH_ALEN];
1905
1906	sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) &
1907			 ~(1 << (31 - entry)), TSU_TEN);
1908
1909	memset(blank, 0, sizeof(blank));
1910	ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank);
1911	if (ret < 0)
1912		return ret;
1913	return 0;
1914}
1915
1916static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr)
1917{
1918	struct sh_eth_private *mdp = netdev_priv(ndev);
1919	void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
1920	int i, ret;
1921
1922	if (!mdp->cd->tsu)
1923		return 0;
1924
1925	i = sh_eth_tsu_find_entry(ndev, addr);
1926	if (i < 0) {
1927		/* No entry found, create one */
1928		i = sh_eth_tsu_find_empty(ndev);
1929		if (i < 0)
1930			return -ENOMEM;
1931		ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr);
1932		if (ret < 0)
1933			return ret;
1934
1935		/* Enable the entry */
1936		sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) |
1937				 (1 << (31 - i)), TSU_TEN);
1938	}
1939
1940	/* Entry found or created, enable POST */
1941	sh_eth_tsu_enable_cam_entry_post(ndev, i);
1942
1943	return 0;
1944}
1945
1946static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr)
1947{
1948	struct sh_eth_private *mdp = netdev_priv(ndev);
1949	int i, ret;
1950
1951	if (!mdp->cd->tsu)
1952		return 0;
1953
1954	i = sh_eth_tsu_find_entry(ndev, addr);
1955	if (i) {
1956		/* Entry found */
1957		if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
1958			goto done;
1959
1960		/* Disable the entry if both ports was disabled */
1961		ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
1962		if (ret < 0)
1963			return ret;
1964	}
1965done:
1966	return 0;
1967}
1968
1969static int sh_eth_tsu_purge_all(struct net_device *ndev)
1970{
1971	struct sh_eth_private *mdp = netdev_priv(ndev);
1972	int i, ret;
1973
1974	if (unlikely(!mdp->cd->tsu))
1975		return 0;
1976
1977	for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) {
1978		if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
1979			continue;
1980
1981		/* Disable the entry if both ports was disabled */
1982		ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
1983		if (ret < 0)
1984			return ret;
1985	}
1986
1987	return 0;
1988}
1989
1990static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
1991{
1992	struct sh_eth_private *mdp = netdev_priv(ndev);
1993	u8 addr[ETH_ALEN];
1994	void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
1995	int i;
1996
1997	if (unlikely(!mdp->cd->tsu))
1998		return;
1999
2000	for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2001		sh_eth_tsu_read_entry(reg_offset, addr);
2002		if (is_multicast_ether_addr(addr))
2003			sh_eth_tsu_del_entry(ndev, addr);
2004	}
2005}
2006
2007/* Multicast reception directions set */
2008static void sh_eth_set_multicast_list(struct net_device *ndev)
2009{
2010	struct sh_eth_private *mdp = netdev_priv(ndev);
2011	u32 ecmr_bits;
2012	int mcast_all = 0;
2013	unsigned long flags;
2014
2015	spin_lock_irqsave(&mdp->lock, flags);
2016	/*
2017	 * Initial condition is MCT = 1, PRM = 0.
2018	 * Depending on ndev->flags, set PRM or clear MCT
2019	 */
2020	ecmr_bits = (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | ECMR_MCT;
2021
2022	if (!(ndev->flags & IFF_MULTICAST)) {
2023		sh_eth_tsu_purge_mcast(ndev);
2024		mcast_all = 1;
2025	}
2026	if (ndev->flags & IFF_ALLMULTI) {
2027		sh_eth_tsu_purge_mcast(ndev);
2028		ecmr_bits &= ~ECMR_MCT;
2029		mcast_all = 1;
2030	}
2031
2032	if (ndev->flags & IFF_PROMISC) {
2033		sh_eth_tsu_purge_all(ndev);
2034		ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM;
2035	} else if (mdp->cd->tsu) {
2036		struct netdev_hw_addr *ha;
2037		netdev_for_each_mc_addr(ha, ndev) {
2038			if (mcast_all && is_multicast_ether_addr(ha->addr))
2039				continue;
2040
2041			if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) {
2042				if (!mcast_all) {
2043					sh_eth_tsu_purge_mcast(ndev);
2044					ecmr_bits &= ~ECMR_MCT;
2045					mcast_all = 1;
2046				}
2047			}
2048		}
2049	} else {
2050		/* Normal, unicast/broadcast-only mode. */
2051		ecmr_bits = (ecmr_bits & ~ECMR_PRM) | ECMR_MCT;
2052	}
2053
2054	/* update the ethernet mode */
2055	sh_eth_write(ndev, ecmr_bits, ECMR);
2056
2057	spin_unlock_irqrestore(&mdp->lock, flags);
2058}
2059
2060static int sh_eth_get_vtag_index(struct sh_eth_private *mdp)
2061{
2062	if (!mdp->port)
2063		return TSU_VTAG0;
2064	else
2065		return TSU_VTAG1;
2066}
2067
2068static int sh_eth_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
2069{
2070	struct sh_eth_private *mdp = netdev_priv(ndev);
2071	int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2072
2073	if (unlikely(!mdp->cd->tsu))
2074		return -EPERM;
2075
2076	/* No filtering if vid = 0 */
2077	if (!vid)
2078		return 0;
2079
2080	mdp->vlan_num_ids++;
2081
2082	/*
2083	 * The controller has one VLAN tag HW filter. So, if the filter is
2084	 * already enabled, the driver disables it and the filte
2085	 */
2086	if (mdp->vlan_num_ids > 1) {
2087		/* disable VLAN filter */
2088		sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2089		return 0;
2090	}
2091
2092	sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK),
2093			 vtag_reg_index);
2094
2095	return 0;
2096}
2097
2098static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2099{
2100	struct sh_eth_private *mdp = netdev_priv(ndev);
2101	int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2102
2103	if (unlikely(!mdp->cd->tsu))
2104		return -EPERM;
2105
2106	/* No filtering if vid = 0 */
2107	if (!vid)
2108		return 0;
2109
2110	mdp->vlan_num_ids--;
2111	sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2112
2113	return 0;
2114}
2115#endif /* SH_ETH_HAS_TSU */
2116
2117/* SuperH's TSU register init function */
2118static void sh_eth_tsu_init(struct sh_eth_private *mdp)
2119{
2120	sh_eth_tsu_write(mdp, 0, TSU_FWEN0);	/* Disable forward(0->1) */
2121	sh_eth_tsu_write(mdp, 0, TSU_FWEN1);	/* Disable forward(1->0) */
2122	sh_eth_tsu_write(mdp, 0, TSU_FCM);	/* forward fifo 3k-3k */
2123	sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
2124	sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
2125	sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
2126	sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
2127	sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
2128	sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
2129	sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
2130	if (sh_eth_is_gether(mdp)) {
2131		sh_eth_tsu_write(mdp, 0, TSU_QTAG0);	/* Disable QTAG(0->1) */
2132		sh_eth_tsu_write(mdp, 0, TSU_QTAG1);	/* Disable QTAG(1->0) */
2133	} else {
2134		sh_eth_tsu_write(mdp, 0, TSU_QTAGM0);	/* Disable QTAG(0->1) */
2135		sh_eth_tsu_write(mdp, 0, TSU_QTAGM1);	/* Disable QTAG(1->0) */
2136	}
2137	sh_eth_tsu_write(mdp, 0, TSU_FWSR);	/* all interrupt status clear */
2138	sh_eth_tsu_write(mdp, 0, TSU_FWINMK);	/* Disable all interrupt */
2139	sh_eth_tsu_write(mdp, 0, TSU_TEN);	/* Disable all CAM entry */
2140	sh_eth_tsu_write(mdp, 0, TSU_POST1);	/* Disable CAM entry [ 0- 7] */
2141	sh_eth_tsu_write(mdp, 0, TSU_POST2);	/* Disable CAM entry [ 8-15] */
2142	sh_eth_tsu_write(mdp, 0, TSU_POST3);	/* Disable CAM entry [16-23] */
2143	sh_eth_tsu_write(mdp, 0, TSU_POST4);	/* Disable CAM entry [24-31] */
2144}
2145
2146/* MDIO bus release function */
2147static int sh_mdio_release(struct net_device *ndev)
2148{
2149	struct mii_bus *bus = dev_get_drvdata(&ndev->dev);
2150
2151	/* unregister mdio bus */
2152	mdiobus_unregister(bus);
2153
2154	/* remove mdio bus info from net_device */
2155	dev_set_drvdata(&ndev->dev, NULL);
2156
2157	/* free interrupts memory */
2158	kfree(bus->irq);
2159
2160	/* free bitbang info */
2161	free_mdio_bitbang(bus);
2162
2163	return 0;
2164}
2165
2166/* MDIO bus init function */
2167static int sh_mdio_init(struct net_device *ndev, int id,
2168			struct sh_eth_plat_data *pd)
2169{
2170	int ret, i;
2171	struct bb_info *bitbang;
2172	struct sh_eth_private *mdp = netdev_priv(ndev);
2173
2174	/* create bit control struct for PHY */
2175	bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL);
2176	if (!bitbang) {
2177		ret = -ENOMEM;
2178		goto out;
2179	}
2180
2181	/* bitbang init */
2182	bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
2183	bitbang->set_gate = pd->set_mdio_gate;
2184	bitbang->mdi_msk = 0x08;
2185	bitbang->mdo_msk = 0x04;
2186	bitbang->mmd_msk = 0x02;/* MMD */
2187	bitbang->mdc_msk = 0x01;
2188	bitbang->ctrl.ops = &bb_ops;
2189
2190	/* MII controller setting */
2191	mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
2192	if (!mdp->mii_bus) {
2193		ret = -ENOMEM;
2194		goto out_free_bitbang;
2195	}
2196
2197	/* Hook up MII support for ethtool */
2198	mdp->mii_bus->name = "sh_mii";
2199	mdp->mii_bus->parent = &ndev->dev;
2200	snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2201		mdp->pdev->name, id);
2202
2203	/* PHY IRQ */
2204	mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
2205	if (!mdp->mii_bus->irq) {
2206		ret = -ENOMEM;
2207		goto out_free_bus;
2208	}
2209
2210	for (i = 0; i < PHY_MAX_ADDR; i++)
2211		mdp->mii_bus->irq[i] = PHY_POLL;
2212
2213	/* regist mdio bus */
2214	ret = mdiobus_register(mdp->mii_bus);
2215	if (ret)
2216		goto out_free_irq;
2217
2218	dev_set_drvdata(&ndev->dev, mdp->mii_bus);
2219
2220	return 0;
2221
2222out_free_irq:
2223	kfree(mdp->mii_bus->irq);
2224
2225out_free_bus:
2226	free_mdio_bitbang(mdp->mii_bus);
2227
2228out_free_bitbang:
2229	kfree(bitbang);
2230
2231out:
2232	return ret;
2233}
2234
2235static const u16 *sh_eth_get_register_offset(int register_type)
2236{
2237	const u16 *reg_offset = NULL;
2238
2239	switch (register_type) {
2240	case SH_ETH_REG_GIGABIT:
2241		reg_offset = sh_eth_offset_gigabit;
2242		break;
2243	case SH_ETH_REG_FAST_SH4:
2244		reg_offset = sh_eth_offset_fast_sh4;
2245		break;
2246	case SH_ETH_REG_FAST_SH3_SH2:
2247		reg_offset = sh_eth_offset_fast_sh3_sh2;
2248		break;
2249	default:
2250		printk(KERN_ERR "Unknown register type (%d)\n", register_type);
2251		break;
2252	}
2253
2254	return reg_offset;
2255}
2256
2257static const struct net_device_ops sh_eth_netdev_ops = {
2258	.ndo_open		= sh_eth_open,
2259	.ndo_stop		= sh_eth_close,
2260	.ndo_start_xmit		= sh_eth_start_xmit,
2261	.ndo_get_stats		= sh_eth_get_stats,
2262#if defined(SH_ETH_HAS_TSU)
2263	.ndo_set_rx_mode	= sh_eth_set_multicast_list,
2264	.ndo_vlan_rx_add_vid	= sh_eth_vlan_rx_add_vid,
2265	.ndo_vlan_rx_kill_vid	= sh_eth_vlan_rx_kill_vid,
2266#endif
2267	.ndo_tx_timeout		= sh_eth_tx_timeout,
2268	.ndo_do_ioctl		= sh_eth_do_ioctl,
2269	.ndo_validate_addr	= eth_validate_addr,
2270	.ndo_set_mac_address	= eth_mac_addr,
2271	.ndo_change_mtu		= eth_change_mtu,
2272};
2273
2274static int sh_eth_drv_probe(struct platform_device *pdev)
2275{
2276	int ret, devno = 0;
2277	struct resource *res;
2278	struct net_device *ndev = NULL;
2279	struct sh_eth_private *mdp = NULL;
2280	struct sh_eth_plat_data *pd;
2281
2282	/* get base addr */
2283	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2284	if (unlikely(res == NULL)) {
2285		dev_err(&pdev->dev, "invalid resource\n");
2286		ret = -EINVAL;
2287		goto out;
2288	}
2289
2290	ndev = alloc_etherdev(sizeof(struct sh_eth_private));
2291	if (!ndev) {
2292		ret = -ENOMEM;
2293		goto out;
2294	}
2295
2296	/* The sh Ether-specific entries in the device structure. */
2297	ndev->base_addr = res->start;
2298	devno = pdev->id;
2299	if (devno < 0)
2300		devno = 0;
2301
2302	ndev->dma = -1;
2303	ret = platform_get_irq(pdev, 0);
2304	if (ret < 0) {
2305		ret = -ENODEV;
2306		goto out_release;
2307	}
2308	ndev->irq = ret;
2309
2310	SET_NETDEV_DEV(ndev, &pdev->dev);
2311
2312	/* Fill in the fields of the device structure with ethernet values. */
2313	ether_setup(ndev);
2314
2315	mdp = netdev_priv(ndev);
2316	mdp->addr = ioremap(res->start, resource_size(res));
2317	if (mdp->addr == NULL) {
2318		ret = -ENOMEM;
2319		dev_err(&pdev->dev, "ioremap failed.\n");
2320		goto out_release;
2321	}
2322
2323	spin_lock_init(&mdp->lock);
2324	mdp->pdev = pdev;
2325	pm_runtime_enable(&pdev->dev);
2326	pm_runtime_resume(&pdev->dev);
2327
2328	pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data);
2329	/* get PHY ID */
2330	mdp->phy_id = pd->phy;
2331	mdp->phy_interface = pd->phy_interface;
2332	/* EDMAC endian */
2333	mdp->edmac_endian = pd->edmac_endian;
2334	mdp->no_ether_link = pd->no_ether_link;
2335	mdp->ether_link_active_low = pd->ether_link_active_low;
2336	mdp->reg_offset = sh_eth_get_register_offset(pd->register_type);
2337
2338	/* set cpu data */
2339#if defined(SH_ETH_HAS_BOTH_MODULES)
2340	mdp->cd = sh_eth_get_cpu_data(mdp);
2341#else
2342	mdp->cd = &sh_eth_my_cpu_data;
2343#endif
2344	sh_eth_set_default_cpu_data(mdp->cd);
2345
2346	/* set function */
2347	ndev->netdev_ops = &sh_eth_netdev_ops;
2348	SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops);
2349	ndev->watchdog_timeo = TX_TIMEOUT;
2350
2351	/* debug message level */
2352	mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
2353
2354	/* read and set MAC address */
2355	read_mac_address(ndev, pd->mac_addr);
2356
2357	/* ioremap the TSU registers */
2358	if (mdp->cd->tsu) {
2359		struct resource *rtsu;
2360		rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2361		if (!rtsu) {
2362			dev_err(&pdev->dev, "Not found TSU resource\n");
2363			goto out_release;
2364		}
2365		mdp->tsu_addr = ioremap(rtsu->start,
2366					resource_size(rtsu));
2367		mdp->port = devno % 2;
2368		ndev->features = NETIF_F_HW_VLAN_FILTER;
2369	}
2370
2371	/* initialize first or needed device */
2372	if (!devno || pd->needs_init) {
2373		if (mdp->cd->chip_reset)
2374			mdp->cd->chip_reset(ndev);
2375
2376		if (mdp->cd->tsu) {
2377			/* TSU init (Init only)*/
2378			sh_eth_tsu_init(mdp);
2379		}
2380	}
2381
2382	/* network device register */
2383	ret = register_netdev(ndev);
2384	if (ret)
2385		goto out_release;
2386
2387	/* mdio bus init */
2388	ret = sh_mdio_init(ndev, pdev->id, pd);
2389	if (ret)
2390		goto out_unregister;
2391
2392	/* print device information */
2393	pr_info("Base address at 0x%x, %pM, IRQ %d.\n",
2394	       (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
2395
2396	platform_set_drvdata(pdev, ndev);
2397
2398	return ret;
2399
2400out_unregister:
2401	unregister_netdev(ndev);
2402
2403out_release:
2404	/* net_dev free */
2405	if (mdp && mdp->addr)
2406		iounmap(mdp->addr);
2407	if (mdp && mdp->tsu_addr)
2408		iounmap(mdp->tsu_addr);
2409	if (ndev)
2410		free_netdev(ndev);
2411
2412out:
2413	return ret;
2414}
2415
2416static int sh_eth_drv_remove(struct platform_device *pdev)
2417{
2418	struct net_device *ndev = platform_get_drvdata(pdev);
2419	struct sh_eth_private *mdp = netdev_priv(ndev);
2420
2421	if (mdp->cd->tsu)
2422		iounmap(mdp->tsu_addr);
2423	sh_mdio_release(ndev);
2424	unregister_netdev(ndev);
2425	pm_runtime_disable(&pdev->dev);
2426	iounmap(mdp->addr);
2427	free_netdev(ndev);
2428	platform_set_drvdata(pdev, NULL);
2429
2430	return 0;
2431}
2432
2433static int sh_eth_runtime_nop(struct device *dev)
2434{
2435	/*
2436	 * Runtime PM callback shared between ->runtime_suspend()
2437	 * and ->runtime_resume(). Simply returns success.
2438	 *
2439	 * This driver re-initializes all registers after
2440	 * pm_runtime_get_sync() anyway so there is no need
2441	 * to save and restore registers here.
2442	 */
2443	return 0;
2444}
2445
2446static struct dev_pm_ops sh_eth_dev_pm_ops = {
2447	.runtime_suspend = sh_eth_runtime_nop,
2448	.runtime_resume = sh_eth_runtime_nop,
2449};
2450
2451static struct platform_driver sh_eth_driver = {
2452	.probe = sh_eth_drv_probe,
2453	.remove = sh_eth_drv_remove,
2454	.driver = {
2455		   .name = CARDNAME,
2456		   .pm = &sh_eth_dev_pm_ops,
2457	},
2458};
2459
2460module_platform_driver(sh_eth_driver);
2461
2462MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
2463MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
2464MODULE_LICENSE("GPL v2");
2465