sh_eth.c revision 71cc7c37af71b497698f7f8a68e46a458071fcef
1/*
2 *  SuperH Ethernet device driver
3 *
4 *  Copyright (C) 2006-2008 Nobuhiro Iwamatsu
5 *  Copyright (C) 2008-2009 Renesas Solutions Corp.
6 *
7 *  This program is free software; you can redistribute it and/or modify it
8 *  under the terms and conditions of the GNU General Public License,
9 *  version 2, as published by the Free Software Foundation.
10 *
11 *  This program is distributed in the hope it will be useful, but WITHOUT
12 *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14 *  more details.
15 *  You should have received a copy of the GNU General Public License along with
16 *  this program; if not, write to the Free Software Foundation, Inc.,
17 *  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 *  The full GNU General Public License is included in this distribution in
20 *  the file called "COPYING".
21 */
22
23#include <linux/init.h>
24#include <linux/module.h>
25#include <linux/kernel.h>
26#include <linux/spinlock.h>
27#include <linux/interrupt.h>
28#include <linux/dma-mapping.h>
29#include <linux/etherdevice.h>
30#include <linux/delay.h>
31#include <linux/platform_device.h>
32#include <linux/mdio-bitbang.h>
33#include <linux/netdevice.h>
34#include <linux/phy.h>
35#include <linux/cache.h>
36#include <linux/io.h>
37#include <linux/pm_runtime.h>
38#include <linux/slab.h>
39#include <linux/ethtool.h>
40#include <linux/if_vlan.h>
41#include <linux/sh_eth.h>
42
43#include "sh_eth.h"
44
45#define SH_ETH_DEF_MSG_ENABLE \
46		(NETIF_MSG_LINK	| \
47		NETIF_MSG_TIMER	| \
48		NETIF_MSG_RX_ERR| \
49		NETIF_MSG_TX_ERR)
50
51/* There is CPU dependent code */
52#if defined(CONFIG_CPU_SUBTYPE_SH7724)
53#define SH_ETH_RESET_DEFAULT	1
54static void sh_eth_set_duplex(struct net_device *ndev)
55{
56	struct sh_eth_private *mdp = netdev_priv(ndev);
57
58	if (mdp->duplex) /* Full */
59		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
60	else		/* Half */
61		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
62}
63
64static void sh_eth_set_rate(struct net_device *ndev)
65{
66	struct sh_eth_private *mdp = netdev_priv(ndev);
67
68	switch (mdp->speed) {
69	case 10: /* 10BASE */
70		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR);
71		break;
72	case 100:/* 100BASE */
73		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR);
74		break;
75	default:
76		break;
77	}
78}
79
80/* SH7724 */
81static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
82	.set_duplex	= sh_eth_set_duplex,
83	.set_rate	= sh_eth_set_rate,
84
85	.ecsr_value	= ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
86	.ecsipr_value	= ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
87	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f,
88
89	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
90	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
91			  EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
92	.tx_error_check	= EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
93
94	.apr		= 1,
95	.mpr		= 1,
96	.tpauser	= 1,
97	.hw_swap	= 1,
98	.rpadir		= 1,
99	.rpadir_value	= 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
100};
101#elif defined(CONFIG_CPU_SUBTYPE_SH7757)
102#define SH_ETH_HAS_BOTH_MODULES	1
103#define SH_ETH_HAS_TSU	1
104static void sh_eth_set_duplex(struct net_device *ndev)
105{
106	struct sh_eth_private *mdp = netdev_priv(ndev);
107
108	if (mdp->duplex) /* Full */
109		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
110	else		/* Half */
111		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
112}
113
114static void sh_eth_set_rate(struct net_device *ndev)
115{
116	struct sh_eth_private *mdp = netdev_priv(ndev);
117
118	switch (mdp->speed) {
119	case 10: /* 10BASE */
120		sh_eth_write(ndev, 0, RTRATE);
121		break;
122	case 100:/* 100BASE */
123		sh_eth_write(ndev, 1, RTRATE);
124		break;
125	default:
126		break;
127	}
128}
129
130/* SH7757 */
131static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
132	.set_duplex		= sh_eth_set_duplex,
133	.set_rate		= sh_eth_set_rate,
134
135	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
136	.rmcr_value	= 0x00000001,
137
138	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
139	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
140			  EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
141	.tx_error_check	= EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
142
143	.apr		= 1,
144	.mpr		= 1,
145	.tpauser	= 1,
146	.hw_swap	= 1,
147	.no_ade		= 1,
148	.rpadir		= 1,
149	.rpadir_value   = 2 << 16,
150};
151
152#define SH_GIGA_ETH_BASE	0xfee00000
153#define GIGA_MALR(port)		(SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
154#define GIGA_MAHR(port)		(SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
155static void sh_eth_chip_reset_giga(struct net_device *ndev)
156{
157	int i;
158	unsigned long mahr[2], malr[2];
159
160	/* save MAHR and MALR */
161	for (i = 0; i < 2; i++) {
162		malr[i] = ioread32((void *)GIGA_MALR(i));
163		mahr[i] = ioread32((void *)GIGA_MAHR(i));
164	}
165
166	/* reset device */
167	iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800));
168	mdelay(1);
169
170	/* restore MAHR and MALR */
171	for (i = 0; i < 2; i++) {
172		iowrite32(malr[i], (void *)GIGA_MALR(i));
173		iowrite32(mahr[i], (void *)GIGA_MAHR(i));
174	}
175}
176
177static int sh_eth_is_gether(struct sh_eth_private *mdp);
178static void sh_eth_reset(struct net_device *ndev)
179{
180	struct sh_eth_private *mdp = netdev_priv(ndev);
181	int cnt = 100;
182
183	if (sh_eth_is_gether(mdp)) {
184		sh_eth_write(ndev, 0x03, EDSR);
185		sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
186				EDMR);
187		while (cnt > 0) {
188			if (!(sh_eth_read(ndev, EDMR) & 0x3))
189				break;
190			mdelay(1);
191			cnt--;
192		}
193		if (cnt < 0)
194			printk(KERN_ERR "Device reset fail\n");
195
196		/* Table Init */
197		sh_eth_write(ndev, 0x0, TDLAR);
198		sh_eth_write(ndev, 0x0, TDFAR);
199		sh_eth_write(ndev, 0x0, TDFXR);
200		sh_eth_write(ndev, 0x0, TDFFR);
201		sh_eth_write(ndev, 0x0, RDLAR);
202		sh_eth_write(ndev, 0x0, RDFAR);
203		sh_eth_write(ndev, 0x0, RDFXR);
204		sh_eth_write(ndev, 0x0, RDFFR);
205	} else {
206		sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER,
207				EDMR);
208		mdelay(3);
209		sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
210				EDMR);
211	}
212}
213
214static void sh_eth_set_duplex_giga(struct net_device *ndev)
215{
216	struct sh_eth_private *mdp = netdev_priv(ndev);
217
218	if (mdp->duplex) /* Full */
219		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
220	else		/* Half */
221		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
222}
223
224static void sh_eth_set_rate_giga(struct net_device *ndev)
225{
226	struct sh_eth_private *mdp = netdev_priv(ndev);
227
228	switch (mdp->speed) {
229	case 10: /* 10BASE */
230		sh_eth_write(ndev, 0x00000000, GECMR);
231		break;
232	case 100:/* 100BASE */
233		sh_eth_write(ndev, 0x00000010, GECMR);
234		break;
235	case 1000: /* 1000BASE */
236		sh_eth_write(ndev, 0x00000020, GECMR);
237		break;
238	default:
239		break;
240	}
241}
242
243/* SH7757(GETHERC) */
244static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = {
245	.chip_reset	= sh_eth_chip_reset_giga,
246	.set_duplex	= sh_eth_set_duplex_giga,
247	.set_rate	= sh_eth_set_rate_giga,
248
249	.ecsr_value	= ECSR_ICD | ECSR_MPD,
250	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
251	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
252
253	.tx_check	= EESR_TC1 | EESR_FTC,
254	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
255			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
256			  EESR_ECI,
257	.tx_error_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
258			  EESR_TFE,
259	.fdr_value	= 0x0000072f,
260	.rmcr_value	= 0x00000001,
261
262	.apr		= 1,
263	.mpr		= 1,
264	.tpauser	= 1,
265	.bculr		= 1,
266	.hw_swap	= 1,
267	.rpadir		= 1,
268	.rpadir_value   = 2 << 16,
269	.no_trimd	= 1,
270	.no_ade		= 1,
271	.tsu		= 1,
272};
273
274static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp)
275{
276	if (sh_eth_is_gether(mdp))
277		return &sh_eth_my_cpu_data_giga;
278	else
279		return &sh_eth_my_cpu_data;
280}
281
282#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
283#define SH_ETH_HAS_TSU	1
284static void sh_eth_chip_reset(struct net_device *ndev)
285{
286	struct sh_eth_private *mdp = netdev_priv(ndev);
287
288	/* reset device */
289	sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
290	mdelay(1);
291}
292
293static void sh_eth_reset(struct net_device *ndev)
294{
295	int cnt = 100;
296
297	sh_eth_write(ndev, EDSR_ENALL, EDSR);
298	sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
299	while (cnt > 0) {
300		if (!(sh_eth_read(ndev, EDMR) & 0x3))
301			break;
302		mdelay(1);
303		cnt--;
304	}
305	if (cnt == 0)
306		printk(KERN_ERR "Device reset fail\n");
307
308	/* Table Init */
309	sh_eth_write(ndev, 0x0, TDLAR);
310	sh_eth_write(ndev, 0x0, TDFAR);
311	sh_eth_write(ndev, 0x0, TDFXR);
312	sh_eth_write(ndev, 0x0, TDFFR);
313	sh_eth_write(ndev, 0x0, RDLAR);
314	sh_eth_write(ndev, 0x0, RDFAR);
315	sh_eth_write(ndev, 0x0, RDFXR);
316	sh_eth_write(ndev, 0x0, RDFFR);
317}
318
319static void sh_eth_set_duplex(struct net_device *ndev)
320{
321	struct sh_eth_private *mdp = netdev_priv(ndev);
322
323	if (mdp->duplex) /* Full */
324		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
325	else		/* Half */
326		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
327}
328
329static void sh_eth_set_rate(struct net_device *ndev)
330{
331	struct sh_eth_private *mdp = netdev_priv(ndev);
332
333	switch (mdp->speed) {
334	case 10: /* 10BASE */
335		sh_eth_write(ndev, GECMR_10, GECMR);
336		break;
337	case 100:/* 100BASE */
338		sh_eth_write(ndev, GECMR_100, GECMR);
339		break;
340	case 1000: /* 1000BASE */
341		sh_eth_write(ndev, GECMR_1000, GECMR);
342		break;
343	default:
344		break;
345	}
346}
347
348/* sh7763 */
349static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
350	.chip_reset	= sh_eth_chip_reset,
351	.set_duplex	= sh_eth_set_duplex,
352	.set_rate	= sh_eth_set_rate,
353
354	.ecsr_value	= ECSR_ICD | ECSR_MPD,
355	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
356	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
357
358	.tx_check	= EESR_TC1 | EESR_FTC,
359	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
360			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
361			  EESR_ECI,
362	.tx_error_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
363			  EESR_TFE,
364
365	.apr		= 1,
366	.mpr		= 1,
367	.tpauser	= 1,
368	.bculr		= 1,
369	.hw_swap	= 1,
370	.no_trimd	= 1,
371	.no_ade		= 1,
372	.tsu		= 1,
373};
374
375#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
376#define SH_ETH_RESET_DEFAULT	1
377static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
378	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
379
380	.apr		= 1,
381	.mpr		= 1,
382	.tpauser	= 1,
383	.hw_swap	= 1,
384};
385#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
386#define SH_ETH_RESET_DEFAULT	1
387#define SH_ETH_HAS_TSU	1
388static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
389	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
390	.tsu		= 1,
391};
392#endif
393
394static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
395{
396	if (!cd->ecsr_value)
397		cd->ecsr_value = DEFAULT_ECSR_INIT;
398
399	if (!cd->ecsipr_value)
400		cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
401
402	if (!cd->fcftr_value)
403		cd->fcftr_value = DEFAULT_FIFO_F_D_RFF | \
404				  DEFAULT_FIFO_F_D_RFD;
405
406	if (!cd->fdr_value)
407		cd->fdr_value = DEFAULT_FDR_INIT;
408
409	if (!cd->rmcr_value)
410		cd->rmcr_value = DEFAULT_RMCR_VALUE;
411
412	if (!cd->tx_check)
413		cd->tx_check = DEFAULT_TX_CHECK;
414
415	if (!cd->eesr_err_check)
416		cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
417
418	if (!cd->tx_error_check)
419		cd->tx_error_check = DEFAULT_TX_ERROR_CHECK;
420}
421
422#if defined(SH_ETH_RESET_DEFAULT)
423/* Chip Reset */
424static void sh_eth_reset(struct net_device *ndev)
425{
426	sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, EDMR);
427	mdelay(3);
428	sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, EDMR);
429}
430#endif
431
432#if defined(CONFIG_CPU_SH4)
433static void sh_eth_set_receive_align(struct sk_buff *skb)
434{
435	int reserve;
436
437	reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1));
438	if (reserve)
439		skb_reserve(skb, reserve);
440}
441#else
442static void sh_eth_set_receive_align(struct sk_buff *skb)
443{
444	skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN);
445}
446#endif
447
448
449/* CPU <-> EDMAC endian convert */
450static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x)
451{
452	switch (mdp->edmac_endian) {
453	case EDMAC_LITTLE_ENDIAN:
454		return cpu_to_le32(x);
455	case EDMAC_BIG_ENDIAN:
456		return cpu_to_be32(x);
457	}
458	return x;
459}
460
461static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
462{
463	switch (mdp->edmac_endian) {
464	case EDMAC_LITTLE_ENDIAN:
465		return le32_to_cpu(x);
466	case EDMAC_BIG_ENDIAN:
467		return be32_to_cpu(x);
468	}
469	return x;
470}
471
472/*
473 * Program the hardware MAC address from dev->dev_addr.
474 */
475static void update_mac_address(struct net_device *ndev)
476{
477	sh_eth_write(ndev,
478		(ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
479		(ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
480	sh_eth_write(ndev,
481		(ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
482}
483
484/*
485 * Get MAC address from SuperH MAC address register
486 *
487 * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
488 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
489 * When you want use this device, you must set MAC address in bootloader.
490 *
491 */
492static void read_mac_address(struct net_device *ndev, unsigned char *mac)
493{
494	if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
495		memcpy(ndev->dev_addr, mac, 6);
496	} else {
497		ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24);
498		ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF;
499		ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF;
500		ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF);
501		ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF;
502		ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF);
503	}
504}
505
506static int sh_eth_is_gether(struct sh_eth_private *mdp)
507{
508	if (mdp->reg_offset == sh_eth_offset_gigabit)
509		return 1;
510	else
511		return 0;
512}
513
514static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
515{
516	if (sh_eth_is_gether(mdp))
517		return EDTRR_TRNS_GETHER;
518	else
519		return EDTRR_TRNS_ETHER;
520}
521
522struct bb_info {
523	void (*set_gate)(void *addr);
524	struct mdiobb_ctrl ctrl;
525	void *addr;
526	u32 mmd_msk;/* MMD */
527	u32 mdo_msk;
528	u32 mdi_msk;
529	u32 mdc_msk;
530};
531
532/* PHY bit set */
533static void bb_set(void *addr, u32 msk)
534{
535	iowrite32(ioread32(addr) | msk, addr);
536}
537
538/* PHY bit clear */
539static void bb_clr(void *addr, u32 msk)
540{
541	iowrite32((ioread32(addr) & ~msk), addr);
542}
543
544/* PHY bit read */
545static int bb_read(void *addr, u32 msk)
546{
547	return (ioread32(addr) & msk) != 0;
548}
549
550/* Data I/O pin control */
551static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
552{
553	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
554
555	if (bitbang->set_gate)
556		bitbang->set_gate(bitbang->addr);
557
558	if (bit)
559		bb_set(bitbang->addr, bitbang->mmd_msk);
560	else
561		bb_clr(bitbang->addr, bitbang->mmd_msk);
562}
563
564/* Set bit data*/
565static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
566{
567	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
568
569	if (bitbang->set_gate)
570		bitbang->set_gate(bitbang->addr);
571
572	if (bit)
573		bb_set(bitbang->addr, bitbang->mdo_msk);
574	else
575		bb_clr(bitbang->addr, bitbang->mdo_msk);
576}
577
578/* Get bit data*/
579static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
580{
581	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
582
583	if (bitbang->set_gate)
584		bitbang->set_gate(bitbang->addr);
585
586	return bb_read(bitbang->addr, bitbang->mdi_msk);
587}
588
589/* MDC pin control */
590static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
591{
592	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
593
594	if (bitbang->set_gate)
595		bitbang->set_gate(bitbang->addr);
596
597	if (bit)
598		bb_set(bitbang->addr, bitbang->mdc_msk);
599	else
600		bb_clr(bitbang->addr, bitbang->mdc_msk);
601}
602
603/* mdio bus control struct */
604static struct mdiobb_ops bb_ops = {
605	.owner = THIS_MODULE,
606	.set_mdc = sh_mdc_ctrl,
607	.set_mdio_dir = sh_mmd_ctrl,
608	.set_mdio_data = sh_set_mdio,
609	.get_mdio_data = sh_get_mdio,
610};
611
612/* free skb and descriptor buffer */
613static void sh_eth_ring_free(struct net_device *ndev)
614{
615	struct sh_eth_private *mdp = netdev_priv(ndev);
616	int i;
617
618	/* Free Rx skb ringbuffer */
619	if (mdp->rx_skbuff) {
620		for (i = 0; i < RX_RING_SIZE; i++) {
621			if (mdp->rx_skbuff[i])
622				dev_kfree_skb(mdp->rx_skbuff[i]);
623		}
624	}
625	kfree(mdp->rx_skbuff);
626
627	/* Free Tx skb ringbuffer */
628	if (mdp->tx_skbuff) {
629		for (i = 0; i < TX_RING_SIZE; i++) {
630			if (mdp->tx_skbuff[i])
631				dev_kfree_skb(mdp->tx_skbuff[i]);
632		}
633	}
634	kfree(mdp->tx_skbuff);
635}
636
637/* format skb and descriptor buffer */
638static void sh_eth_ring_format(struct net_device *ndev)
639{
640	struct sh_eth_private *mdp = netdev_priv(ndev);
641	int i;
642	struct sk_buff *skb;
643	struct sh_eth_rxdesc *rxdesc = NULL;
644	struct sh_eth_txdesc *txdesc = NULL;
645	int rx_ringsize = sizeof(*rxdesc) * RX_RING_SIZE;
646	int tx_ringsize = sizeof(*txdesc) * TX_RING_SIZE;
647
648	mdp->cur_rx = mdp->cur_tx = 0;
649	mdp->dirty_rx = mdp->dirty_tx = 0;
650
651	memset(mdp->rx_ring, 0, rx_ringsize);
652
653	/* build Rx ring buffer */
654	for (i = 0; i < RX_RING_SIZE; i++) {
655		/* skb */
656		mdp->rx_skbuff[i] = NULL;
657		skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
658		mdp->rx_skbuff[i] = skb;
659		if (skb == NULL)
660			break;
661		dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
662				DMA_FROM_DEVICE);
663		sh_eth_set_receive_align(skb);
664
665		/* RX descriptor */
666		rxdesc = &mdp->rx_ring[i];
667		rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
668		rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
669
670		/* The size of the buffer is 16 byte boundary. */
671		rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
672		/* Rx descriptor address set */
673		if (i == 0) {
674			sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
675			if (sh_eth_is_gether(mdp))
676				sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
677		}
678	}
679
680	mdp->dirty_rx = (u32) (i - RX_RING_SIZE);
681
682	/* Mark the last entry as wrapping the ring. */
683	rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
684
685	memset(mdp->tx_ring, 0, tx_ringsize);
686
687	/* build Tx ring buffer */
688	for (i = 0; i < TX_RING_SIZE; i++) {
689		mdp->tx_skbuff[i] = NULL;
690		txdesc = &mdp->tx_ring[i];
691		txdesc->status = cpu_to_edmac(mdp, TD_TFP);
692		txdesc->buffer_length = 0;
693		if (i == 0) {
694			/* Tx descriptor address set */
695			sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
696			if (sh_eth_is_gether(mdp))
697				sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
698		}
699	}
700
701	txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
702}
703
704/* Get skb and descriptor buffer */
705static int sh_eth_ring_init(struct net_device *ndev)
706{
707	struct sh_eth_private *mdp = netdev_priv(ndev);
708	int rx_ringsize, tx_ringsize, ret = 0;
709
710	/*
711	 * +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
712	 * card needs room to do 8 byte alignment, +2 so we can reserve
713	 * the first 2 bytes, and +16 gets room for the status word from the
714	 * card.
715	 */
716	mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
717			  (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
718	if (mdp->cd->rpadir)
719		mdp->rx_buf_sz += NET_IP_ALIGN;
720
721	/* Allocate RX and TX skb rings */
722	mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * RX_RING_SIZE,
723				GFP_KERNEL);
724	if (!mdp->rx_skbuff) {
725		dev_err(&ndev->dev, "Cannot allocate Rx skb\n");
726		ret = -ENOMEM;
727		return ret;
728	}
729
730	mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * TX_RING_SIZE,
731				GFP_KERNEL);
732	if (!mdp->tx_skbuff) {
733		dev_err(&ndev->dev, "Cannot allocate Tx skb\n");
734		ret = -ENOMEM;
735		goto skb_ring_free;
736	}
737
738	/* Allocate all Rx descriptors. */
739	rx_ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
740	mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
741			GFP_KERNEL);
742
743	if (!mdp->rx_ring) {
744		dev_err(&ndev->dev, "Cannot allocate Rx Ring (size %d bytes)\n",
745			rx_ringsize);
746		ret = -ENOMEM;
747		goto desc_ring_free;
748	}
749
750	mdp->dirty_rx = 0;
751
752	/* Allocate all Tx descriptors. */
753	tx_ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
754	mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
755			GFP_KERNEL);
756	if (!mdp->tx_ring) {
757		dev_err(&ndev->dev, "Cannot allocate Tx Ring (size %d bytes)\n",
758			tx_ringsize);
759		ret = -ENOMEM;
760		goto desc_ring_free;
761	}
762	return ret;
763
764desc_ring_free:
765	/* free DMA buffer */
766	dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma);
767
768skb_ring_free:
769	/* Free Rx and Tx skb ring buffer */
770	sh_eth_ring_free(ndev);
771
772	return ret;
773}
774
775static int sh_eth_dev_init(struct net_device *ndev)
776{
777	int ret = 0;
778	struct sh_eth_private *mdp = netdev_priv(ndev);
779	u_int32_t rx_int_var, tx_int_var;
780	u32 val;
781
782	/* Soft Reset */
783	sh_eth_reset(ndev);
784
785	/* Descriptor format */
786	sh_eth_ring_format(ndev);
787	if (mdp->cd->rpadir)
788		sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
789
790	/* all sh_eth int mask */
791	sh_eth_write(ndev, 0, EESIPR);
792
793#if defined(__LITTLE_ENDIAN__)
794	if (mdp->cd->hw_swap)
795		sh_eth_write(ndev, EDMR_EL, EDMR);
796	else
797#endif
798		sh_eth_write(ndev, 0, EDMR);
799
800	/* FIFO size set */
801	sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
802	sh_eth_write(ndev, 0, TFTR);
803
804	/* Frame recv control */
805	sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR);
806
807	rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5;
808	tx_int_var = mdp->tx_int_var = DESC_I_TINT2;
809	sh_eth_write(ndev, rx_int_var | tx_int_var, TRSCER);
810
811	if (mdp->cd->bculr)
812		sh_eth_write(ndev, 0x800, BCULR);	/* Burst sycle set */
813
814	sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
815
816	if (!mdp->cd->no_trimd)
817		sh_eth_write(ndev, 0, TRIMD);
818
819	/* Recv frame limit set register */
820	sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
821		     RFLR);
822
823	sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
824	sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
825
826	/* PAUSE Prohibition */
827	val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
828		ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
829
830	sh_eth_write(ndev, val, ECMR);
831
832	if (mdp->cd->set_rate)
833		mdp->cd->set_rate(ndev);
834
835	/* E-MAC Status Register clear */
836	sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
837
838	/* E-MAC Interrupt Enable register */
839	sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
840
841	/* Set MAC address */
842	update_mac_address(ndev);
843
844	/* mask reset */
845	if (mdp->cd->apr)
846		sh_eth_write(ndev, APR_AP, APR);
847	if (mdp->cd->mpr)
848		sh_eth_write(ndev, MPR_MP, MPR);
849	if (mdp->cd->tpauser)
850		sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
851
852	/* Setting the Rx mode will start the Rx process. */
853	sh_eth_write(ndev, EDRRR_R, EDRRR);
854
855	netif_start_queue(ndev);
856
857	return ret;
858}
859
860/* free Tx skb function */
861static int sh_eth_txfree(struct net_device *ndev)
862{
863	struct sh_eth_private *mdp = netdev_priv(ndev);
864	struct sh_eth_txdesc *txdesc;
865	int freeNum = 0;
866	int entry = 0;
867
868	for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
869		entry = mdp->dirty_tx % TX_RING_SIZE;
870		txdesc = &mdp->tx_ring[entry];
871		if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
872			break;
873		/* Free the original skb. */
874		if (mdp->tx_skbuff[entry]) {
875			dma_unmap_single(&ndev->dev, txdesc->addr,
876					 txdesc->buffer_length, DMA_TO_DEVICE);
877			dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
878			mdp->tx_skbuff[entry] = NULL;
879			freeNum++;
880		}
881		txdesc->status = cpu_to_edmac(mdp, TD_TFP);
882		if (entry >= TX_RING_SIZE - 1)
883			txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
884
885		ndev->stats.tx_packets++;
886		ndev->stats.tx_bytes += txdesc->buffer_length;
887	}
888	return freeNum;
889}
890
891/* Packet receive function */
892static int sh_eth_rx(struct net_device *ndev)
893{
894	struct sh_eth_private *mdp = netdev_priv(ndev);
895	struct sh_eth_rxdesc *rxdesc;
896
897	int entry = mdp->cur_rx % RX_RING_SIZE;
898	int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx;
899	struct sk_buff *skb;
900	u16 pkt_len = 0;
901	u32 desc_status;
902
903	rxdesc = &mdp->rx_ring[entry];
904	while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
905		desc_status = edmac_to_cpu(mdp, rxdesc->status);
906		pkt_len = rxdesc->frame_length;
907
908		if (--boguscnt < 0)
909			break;
910
911		if (!(desc_status & RDFEND))
912			ndev->stats.rx_length_errors++;
913
914		if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
915				   RD_RFS5 | RD_RFS6 | RD_RFS10)) {
916			ndev->stats.rx_errors++;
917			if (desc_status & RD_RFS1)
918				ndev->stats.rx_crc_errors++;
919			if (desc_status & RD_RFS2)
920				ndev->stats.rx_frame_errors++;
921			if (desc_status & RD_RFS3)
922				ndev->stats.rx_length_errors++;
923			if (desc_status & RD_RFS4)
924				ndev->stats.rx_length_errors++;
925			if (desc_status & RD_RFS6)
926				ndev->stats.rx_missed_errors++;
927			if (desc_status & RD_RFS10)
928				ndev->stats.rx_over_errors++;
929		} else {
930			if (!mdp->cd->hw_swap)
931				sh_eth_soft_swap(
932					phys_to_virt(ALIGN(rxdesc->addr, 4)),
933					pkt_len + 2);
934			skb = mdp->rx_skbuff[entry];
935			mdp->rx_skbuff[entry] = NULL;
936			if (mdp->cd->rpadir)
937				skb_reserve(skb, NET_IP_ALIGN);
938			skb_put(skb, pkt_len);
939			skb->protocol = eth_type_trans(skb, ndev);
940			netif_rx(skb);
941			ndev->stats.rx_packets++;
942			ndev->stats.rx_bytes += pkt_len;
943		}
944		rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
945		entry = (++mdp->cur_rx) % RX_RING_SIZE;
946		rxdesc = &mdp->rx_ring[entry];
947	}
948
949	/* Refill the Rx ring buffers. */
950	for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
951		entry = mdp->dirty_rx % RX_RING_SIZE;
952		rxdesc = &mdp->rx_ring[entry];
953		/* The size of the buffer is 16 byte boundary. */
954		rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
955
956		if (mdp->rx_skbuff[entry] == NULL) {
957			skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
958			mdp->rx_skbuff[entry] = skb;
959			if (skb == NULL)
960				break;	/* Better luck next round. */
961			dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
962					DMA_FROM_DEVICE);
963			sh_eth_set_receive_align(skb);
964
965			skb_checksum_none_assert(skb);
966			rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
967		}
968		if (entry >= RX_RING_SIZE - 1)
969			rxdesc->status |=
970				cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
971		else
972			rxdesc->status |=
973				cpu_to_edmac(mdp, RD_RACT | RD_RFP);
974	}
975
976	/* Restart Rx engine if stopped. */
977	/* If we don't need to check status, don't. -KDU */
978	if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R))
979		sh_eth_write(ndev, EDRRR_R, EDRRR);
980
981	return 0;
982}
983
984static void sh_eth_rcv_snd_disable(struct net_device *ndev)
985{
986	/* disable tx and rx */
987	sh_eth_write(ndev, sh_eth_read(ndev, ECMR) &
988		~(ECMR_RE | ECMR_TE), ECMR);
989}
990
991static void sh_eth_rcv_snd_enable(struct net_device *ndev)
992{
993	/* enable tx and rx */
994	sh_eth_write(ndev, sh_eth_read(ndev, ECMR) |
995		(ECMR_RE | ECMR_TE), ECMR);
996}
997
998/* error control function */
999static void sh_eth_error(struct net_device *ndev, int intr_status)
1000{
1001	struct sh_eth_private *mdp = netdev_priv(ndev);
1002	u32 felic_stat;
1003	u32 link_stat;
1004	u32 mask;
1005
1006	if (intr_status & EESR_ECI) {
1007		felic_stat = sh_eth_read(ndev, ECSR);
1008		sh_eth_write(ndev, felic_stat, ECSR);	/* clear int */
1009		if (felic_stat & ECSR_ICD)
1010			ndev->stats.tx_carrier_errors++;
1011		if (felic_stat & ECSR_LCHNG) {
1012			/* Link Changed */
1013			if (mdp->cd->no_psr || mdp->no_ether_link) {
1014				if (mdp->link == PHY_DOWN)
1015					link_stat = 0;
1016				else
1017					link_stat = PHY_ST_LINK;
1018			} else {
1019				link_stat = (sh_eth_read(ndev, PSR));
1020				if (mdp->ether_link_active_low)
1021					link_stat = ~link_stat;
1022			}
1023			if (!(link_stat & PHY_ST_LINK))
1024				sh_eth_rcv_snd_disable(ndev);
1025			else {
1026				/* Link Up */
1027				sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
1028					  ~DMAC_M_ECI, EESIPR);
1029				/*clear int */
1030				sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
1031					  ECSR);
1032				sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
1033					  DMAC_M_ECI, EESIPR);
1034				/* enable tx and rx */
1035				sh_eth_rcv_snd_enable(ndev);
1036			}
1037		}
1038	}
1039
1040	if (intr_status & EESR_TWB) {
1041		/* Write buck end. unused write back interrupt */
1042		if (intr_status & EESR_TABT)	/* Transmit Abort int */
1043			ndev->stats.tx_aborted_errors++;
1044			if (netif_msg_tx_err(mdp))
1045				dev_err(&ndev->dev, "Transmit Abort\n");
1046	}
1047
1048	if (intr_status & EESR_RABT) {
1049		/* Receive Abort int */
1050		if (intr_status & EESR_RFRMER) {
1051			/* Receive Frame Overflow int */
1052			ndev->stats.rx_frame_errors++;
1053			if (netif_msg_rx_err(mdp))
1054				dev_err(&ndev->dev, "Receive Abort\n");
1055		}
1056	}
1057
1058	if (intr_status & EESR_TDE) {
1059		/* Transmit Descriptor Empty int */
1060		ndev->stats.tx_fifo_errors++;
1061		if (netif_msg_tx_err(mdp))
1062			dev_err(&ndev->dev, "Transmit Descriptor Empty\n");
1063	}
1064
1065	if (intr_status & EESR_TFE) {
1066		/* FIFO under flow */
1067		ndev->stats.tx_fifo_errors++;
1068		if (netif_msg_tx_err(mdp))
1069			dev_err(&ndev->dev, "Transmit FIFO Under flow\n");
1070	}
1071
1072	if (intr_status & EESR_RDE) {
1073		/* Receive Descriptor Empty int */
1074		ndev->stats.rx_over_errors++;
1075
1076		if (sh_eth_read(ndev, EDRRR) ^ EDRRR_R)
1077			sh_eth_write(ndev, EDRRR_R, EDRRR);
1078		if (netif_msg_rx_err(mdp))
1079			dev_err(&ndev->dev, "Receive Descriptor Empty\n");
1080	}
1081
1082	if (intr_status & EESR_RFE) {
1083		/* Receive FIFO Overflow int */
1084		ndev->stats.rx_fifo_errors++;
1085		if (netif_msg_rx_err(mdp))
1086			dev_err(&ndev->dev, "Receive FIFO Overflow\n");
1087	}
1088
1089	if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
1090		/* Address Error */
1091		ndev->stats.tx_fifo_errors++;
1092		if (netif_msg_tx_err(mdp))
1093			dev_err(&ndev->dev, "Address Error\n");
1094	}
1095
1096	mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
1097	if (mdp->cd->no_ade)
1098		mask &= ~EESR_ADE;
1099	if (intr_status & mask) {
1100		/* Tx error */
1101		u32 edtrr = sh_eth_read(ndev, EDTRR);
1102		/* dmesg */
1103		dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ",
1104				intr_status, mdp->cur_tx);
1105		dev_err(&ndev->dev, "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
1106				mdp->dirty_tx, (u32) ndev->state, edtrr);
1107		/* dirty buffer free */
1108		sh_eth_txfree(ndev);
1109
1110		/* SH7712 BUG */
1111		if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
1112			/* tx dma start */
1113			sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
1114		}
1115		/* wakeup */
1116		netif_wake_queue(ndev);
1117	}
1118}
1119
1120static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1121{
1122	struct net_device *ndev = netdev;
1123	struct sh_eth_private *mdp = netdev_priv(ndev);
1124	struct sh_eth_cpu_data *cd = mdp->cd;
1125	irqreturn_t ret = IRQ_NONE;
1126	u32 intr_status = 0;
1127
1128	spin_lock(&mdp->lock);
1129
1130	/* Get interrpt stat */
1131	intr_status = sh_eth_read(ndev, EESR);
1132	/* Clear interrupt */
1133	if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |
1134			EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
1135			cd->tx_check | cd->eesr_err_check)) {
1136		sh_eth_write(ndev, intr_status, EESR);
1137		ret = IRQ_HANDLED;
1138	} else
1139		goto other_irq;
1140
1141	if (intr_status & (EESR_FRC | /* Frame recv*/
1142			EESR_RMAF | /* Multi cast address recv*/
1143			EESR_RRF  | /* Bit frame recv */
1144			EESR_RTLF | /* Long frame recv*/
1145			EESR_RTSF | /* short frame recv */
1146			EESR_PRE  | /* PHY-LSI recv error */
1147			EESR_CERF)){ /* recv frame CRC error */
1148		sh_eth_rx(ndev);
1149	}
1150
1151	/* Tx Check */
1152	if (intr_status & cd->tx_check) {
1153		sh_eth_txfree(ndev);
1154		netif_wake_queue(ndev);
1155	}
1156
1157	if (intr_status & cd->eesr_err_check)
1158		sh_eth_error(ndev, intr_status);
1159
1160other_irq:
1161	spin_unlock(&mdp->lock);
1162
1163	return ret;
1164}
1165
1166static void sh_eth_timer(unsigned long data)
1167{
1168	struct net_device *ndev = (struct net_device *)data;
1169	struct sh_eth_private *mdp = netdev_priv(ndev);
1170
1171	mod_timer(&mdp->timer, jiffies + (10 * HZ));
1172}
1173
1174/* PHY state control function */
1175static void sh_eth_adjust_link(struct net_device *ndev)
1176{
1177	struct sh_eth_private *mdp = netdev_priv(ndev);
1178	struct phy_device *phydev = mdp->phydev;
1179	int new_state = 0;
1180
1181	if (phydev->link != PHY_DOWN) {
1182		if (phydev->duplex != mdp->duplex) {
1183			new_state = 1;
1184			mdp->duplex = phydev->duplex;
1185			if (mdp->cd->set_duplex)
1186				mdp->cd->set_duplex(ndev);
1187		}
1188
1189		if (phydev->speed != mdp->speed) {
1190			new_state = 1;
1191			mdp->speed = phydev->speed;
1192			if (mdp->cd->set_rate)
1193				mdp->cd->set_rate(ndev);
1194		}
1195		if (mdp->link == PHY_DOWN) {
1196			sh_eth_write(ndev,
1197				(sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR);
1198			new_state = 1;
1199			mdp->link = phydev->link;
1200		}
1201	} else if (mdp->link) {
1202		new_state = 1;
1203		mdp->link = PHY_DOWN;
1204		mdp->speed = 0;
1205		mdp->duplex = -1;
1206	}
1207
1208	if (new_state && netif_msg_link(mdp))
1209		phy_print_status(phydev);
1210}
1211
1212/* PHY init function */
1213static int sh_eth_phy_init(struct net_device *ndev)
1214{
1215	struct sh_eth_private *mdp = netdev_priv(ndev);
1216	char phy_id[MII_BUS_ID_SIZE + 3];
1217	struct phy_device *phydev = NULL;
1218
1219	snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
1220		mdp->mii_bus->id , mdp->phy_id);
1221
1222	mdp->link = PHY_DOWN;
1223	mdp->speed = 0;
1224	mdp->duplex = -1;
1225
1226	/* Try connect to PHY */
1227	phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
1228				0, mdp->phy_interface);
1229	if (IS_ERR(phydev)) {
1230		dev_err(&ndev->dev, "phy_connect failed\n");
1231		return PTR_ERR(phydev);
1232	}
1233
1234	dev_info(&ndev->dev, "attached phy %i to driver %s\n",
1235		phydev->addr, phydev->drv->name);
1236
1237	mdp->phydev = phydev;
1238
1239	return 0;
1240}
1241
1242/* PHY control start function */
1243static int sh_eth_phy_start(struct net_device *ndev)
1244{
1245	struct sh_eth_private *mdp = netdev_priv(ndev);
1246	int ret;
1247
1248	ret = sh_eth_phy_init(ndev);
1249	if (ret)
1250		return ret;
1251
1252	/* reset phy - this also wakes it from PDOWN */
1253	phy_write(mdp->phydev, MII_BMCR, BMCR_RESET);
1254	phy_start(mdp->phydev);
1255
1256	return 0;
1257}
1258
1259static int sh_eth_get_settings(struct net_device *ndev,
1260			struct ethtool_cmd *ecmd)
1261{
1262	struct sh_eth_private *mdp = netdev_priv(ndev);
1263	unsigned long flags;
1264	int ret;
1265
1266	spin_lock_irqsave(&mdp->lock, flags);
1267	ret = phy_ethtool_gset(mdp->phydev, ecmd);
1268	spin_unlock_irqrestore(&mdp->lock, flags);
1269
1270	return ret;
1271}
1272
1273static int sh_eth_set_settings(struct net_device *ndev,
1274		struct ethtool_cmd *ecmd)
1275{
1276	struct sh_eth_private *mdp = netdev_priv(ndev);
1277	unsigned long flags;
1278	int ret;
1279
1280	spin_lock_irqsave(&mdp->lock, flags);
1281
1282	/* disable tx and rx */
1283	sh_eth_rcv_snd_disable(ndev);
1284
1285	ret = phy_ethtool_sset(mdp->phydev, ecmd);
1286	if (ret)
1287		goto error_exit;
1288
1289	if (ecmd->duplex == DUPLEX_FULL)
1290		mdp->duplex = 1;
1291	else
1292		mdp->duplex = 0;
1293
1294	if (mdp->cd->set_duplex)
1295		mdp->cd->set_duplex(ndev);
1296
1297error_exit:
1298	mdelay(1);
1299
1300	/* enable tx and rx */
1301	sh_eth_rcv_snd_enable(ndev);
1302
1303	spin_unlock_irqrestore(&mdp->lock, flags);
1304
1305	return ret;
1306}
1307
1308static int sh_eth_nway_reset(struct net_device *ndev)
1309{
1310	struct sh_eth_private *mdp = netdev_priv(ndev);
1311	unsigned long flags;
1312	int ret;
1313
1314	spin_lock_irqsave(&mdp->lock, flags);
1315	ret = phy_start_aneg(mdp->phydev);
1316	spin_unlock_irqrestore(&mdp->lock, flags);
1317
1318	return ret;
1319}
1320
1321static u32 sh_eth_get_msglevel(struct net_device *ndev)
1322{
1323	struct sh_eth_private *mdp = netdev_priv(ndev);
1324	return mdp->msg_enable;
1325}
1326
1327static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
1328{
1329	struct sh_eth_private *mdp = netdev_priv(ndev);
1330	mdp->msg_enable = value;
1331}
1332
1333static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
1334	"rx_current", "tx_current",
1335	"rx_dirty", "tx_dirty",
1336};
1337#define SH_ETH_STATS_LEN  ARRAY_SIZE(sh_eth_gstrings_stats)
1338
1339static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
1340{
1341	switch (sset) {
1342	case ETH_SS_STATS:
1343		return SH_ETH_STATS_LEN;
1344	default:
1345		return -EOPNOTSUPP;
1346	}
1347}
1348
1349static void sh_eth_get_ethtool_stats(struct net_device *ndev,
1350			struct ethtool_stats *stats, u64 *data)
1351{
1352	struct sh_eth_private *mdp = netdev_priv(ndev);
1353	int i = 0;
1354
1355	/* device-specific stats */
1356	data[i++] = mdp->cur_rx;
1357	data[i++] = mdp->cur_tx;
1358	data[i++] = mdp->dirty_rx;
1359	data[i++] = mdp->dirty_tx;
1360}
1361
1362static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1363{
1364	switch (stringset) {
1365	case ETH_SS_STATS:
1366		memcpy(data, *sh_eth_gstrings_stats,
1367					sizeof(sh_eth_gstrings_stats));
1368		break;
1369	}
1370}
1371
1372static const struct ethtool_ops sh_eth_ethtool_ops = {
1373	.get_settings	= sh_eth_get_settings,
1374	.set_settings	= sh_eth_set_settings,
1375	.nway_reset	= sh_eth_nway_reset,
1376	.get_msglevel	= sh_eth_get_msglevel,
1377	.set_msglevel	= sh_eth_set_msglevel,
1378	.get_link	= ethtool_op_get_link,
1379	.get_strings	= sh_eth_get_strings,
1380	.get_ethtool_stats  = sh_eth_get_ethtool_stats,
1381	.get_sset_count     = sh_eth_get_sset_count,
1382};
1383
1384/* network device open function */
1385static int sh_eth_open(struct net_device *ndev)
1386{
1387	int ret = 0;
1388	struct sh_eth_private *mdp = netdev_priv(ndev);
1389
1390	pm_runtime_get_sync(&mdp->pdev->dev);
1391
1392	ret = request_irq(ndev->irq, sh_eth_interrupt,
1393#if defined(CONFIG_CPU_SUBTYPE_SH7763) || \
1394	defined(CONFIG_CPU_SUBTYPE_SH7764) || \
1395	defined(CONFIG_CPU_SUBTYPE_SH7757)
1396				IRQF_SHARED,
1397#else
1398				0,
1399#endif
1400				ndev->name, ndev);
1401	if (ret) {
1402		dev_err(&ndev->dev, "Can not assign IRQ number\n");
1403		return ret;
1404	}
1405
1406	/* Descriptor set */
1407	ret = sh_eth_ring_init(ndev);
1408	if (ret)
1409		goto out_free_irq;
1410
1411	/* device init */
1412	ret = sh_eth_dev_init(ndev);
1413	if (ret)
1414		goto out_free_irq;
1415
1416	/* PHY control start*/
1417	ret = sh_eth_phy_start(ndev);
1418	if (ret)
1419		goto out_free_irq;
1420
1421	/* Set the timer to check for link beat. */
1422	init_timer(&mdp->timer);
1423	mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
1424	setup_timer(&mdp->timer, sh_eth_timer, (unsigned long)ndev);
1425
1426	return ret;
1427
1428out_free_irq:
1429	free_irq(ndev->irq, ndev);
1430	pm_runtime_put_sync(&mdp->pdev->dev);
1431	return ret;
1432}
1433
1434/* Timeout function */
1435static void sh_eth_tx_timeout(struct net_device *ndev)
1436{
1437	struct sh_eth_private *mdp = netdev_priv(ndev);
1438	struct sh_eth_rxdesc *rxdesc;
1439	int i;
1440
1441	netif_stop_queue(ndev);
1442
1443	if (netif_msg_timer(mdp))
1444		dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x,"
1445	       " resetting...\n", ndev->name, (int)sh_eth_read(ndev, EESR));
1446
1447	/* tx_errors count up */
1448	ndev->stats.tx_errors++;
1449
1450	/* timer off */
1451	del_timer_sync(&mdp->timer);
1452
1453	/* Free all the skbuffs in the Rx queue. */
1454	for (i = 0; i < RX_RING_SIZE; i++) {
1455		rxdesc = &mdp->rx_ring[i];
1456		rxdesc->status = 0;
1457		rxdesc->addr = 0xBADF00D0;
1458		if (mdp->rx_skbuff[i])
1459			dev_kfree_skb(mdp->rx_skbuff[i]);
1460		mdp->rx_skbuff[i] = NULL;
1461	}
1462	for (i = 0; i < TX_RING_SIZE; i++) {
1463		if (mdp->tx_skbuff[i])
1464			dev_kfree_skb(mdp->tx_skbuff[i]);
1465		mdp->tx_skbuff[i] = NULL;
1466	}
1467
1468	/* device init */
1469	sh_eth_dev_init(ndev);
1470
1471	/* timer on */
1472	mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
1473	add_timer(&mdp->timer);
1474}
1475
1476/* Packet transmit function */
1477static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1478{
1479	struct sh_eth_private *mdp = netdev_priv(ndev);
1480	struct sh_eth_txdesc *txdesc;
1481	u32 entry;
1482	unsigned long flags;
1483
1484	spin_lock_irqsave(&mdp->lock, flags);
1485	if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
1486		if (!sh_eth_txfree(ndev)) {
1487			if (netif_msg_tx_queued(mdp))
1488				dev_warn(&ndev->dev, "TxFD exhausted.\n");
1489			netif_stop_queue(ndev);
1490			spin_unlock_irqrestore(&mdp->lock, flags);
1491			return NETDEV_TX_BUSY;
1492		}
1493	}
1494	spin_unlock_irqrestore(&mdp->lock, flags);
1495
1496	entry = mdp->cur_tx % TX_RING_SIZE;
1497	mdp->tx_skbuff[entry] = skb;
1498	txdesc = &mdp->tx_ring[entry];
1499	/* soft swap. */
1500	if (!mdp->cd->hw_swap)
1501		sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)),
1502				 skb->len + 2);
1503	txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
1504				      DMA_TO_DEVICE);
1505	if (skb->len < ETHERSMALL)
1506		txdesc->buffer_length = ETHERSMALL;
1507	else
1508		txdesc->buffer_length = skb->len;
1509
1510	if (entry >= TX_RING_SIZE - 1)
1511		txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
1512	else
1513		txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
1514
1515	mdp->cur_tx++;
1516
1517	if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp)))
1518		sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
1519
1520	return NETDEV_TX_OK;
1521}
1522
1523/* device close function */
1524static int sh_eth_close(struct net_device *ndev)
1525{
1526	struct sh_eth_private *mdp = netdev_priv(ndev);
1527	int ringsize;
1528
1529	netif_stop_queue(ndev);
1530
1531	/* Disable interrupts by clearing the interrupt mask. */
1532	sh_eth_write(ndev, 0x0000, EESIPR);
1533
1534	/* Stop the chip's Tx and Rx processes. */
1535	sh_eth_write(ndev, 0, EDTRR);
1536	sh_eth_write(ndev, 0, EDRRR);
1537
1538	/* PHY Disconnect */
1539	if (mdp->phydev) {
1540		phy_stop(mdp->phydev);
1541		phy_disconnect(mdp->phydev);
1542	}
1543
1544	free_irq(ndev->irq, ndev);
1545
1546	del_timer_sync(&mdp->timer);
1547
1548	/* Free all the skbuffs in the Rx queue. */
1549	sh_eth_ring_free(ndev);
1550
1551	/* free DMA buffer */
1552	ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
1553	dma_free_coherent(NULL, ringsize, mdp->rx_ring, mdp->rx_desc_dma);
1554
1555	/* free DMA buffer */
1556	ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
1557	dma_free_coherent(NULL, ringsize, mdp->tx_ring, mdp->tx_desc_dma);
1558
1559	pm_runtime_put_sync(&mdp->pdev->dev);
1560
1561	return 0;
1562}
1563
1564static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
1565{
1566	struct sh_eth_private *mdp = netdev_priv(ndev);
1567
1568	pm_runtime_get_sync(&mdp->pdev->dev);
1569
1570	ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
1571	sh_eth_write(ndev, 0, TROCR);	/* (write clear) */
1572	ndev->stats.collisions += sh_eth_read(ndev, CDCR);
1573	sh_eth_write(ndev, 0, CDCR);	/* (write clear) */
1574	ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
1575	sh_eth_write(ndev, 0, LCCR);	/* (write clear) */
1576	if (sh_eth_is_gether(mdp)) {
1577		ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
1578		sh_eth_write(ndev, 0, CERCR);	/* (write clear) */
1579		ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
1580		sh_eth_write(ndev, 0, CEECR);	/* (write clear) */
1581	} else {
1582		ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
1583		sh_eth_write(ndev, 0, CNDCR);	/* (write clear) */
1584	}
1585	pm_runtime_put_sync(&mdp->pdev->dev);
1586
1587	return &ndev->stats;
1588}
1589
1590/* ioctl to device function */
1591static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq,
1592				int cmd)
1593{
1594	struct sh_eth_private *mdp = netdev_priv(ndev);
1595	struct phy_device *phydev = mdp->phydev;
1596
1597	if (!netif_running(ndev))
1598		return -EINVAL;
1599
1600	if (!phydev)
1601		return -ENODEV;
1602
1603	return phy_mii_ioctl(phydev, rq, cmd);
1604}
1605
1606#if defined(SH_ETH_HAS_TSU)
1607/* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
1608static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp,
1609					    int entry)
1610{
1611	return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4);
1612}
1613
1614static u32 sh_eth_tsu_get_post_mask(int entry)
1615{
1616	return 0x0f << (28 - ((entry % 8) * 4));
1617}
1618
1619static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry)
1620{
1621	return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4));
1622}
1623
1624static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev,
1625					     int entry)
1626{
1627	struct sh_eth_private *mdp = netdev_priv(ndev);
1628	u32 tmp;
1629	void *reg_offset;
1630
1631	reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
1632	tmp = ioread32(reg_offset);
1633	iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset);
1634}
1635
1636static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev,
1637					      int entry)
1638{
1639	struct sh_eth_private *mdp = netdev_priv(ndev);
1640	u32 post_mask, ref_mask, tmp;
1641	void *reg_offset;
1642
1643	reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
1644	post_mask = sh_eth_tsu_get_post_mask(entry);
1645	ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask;
1646
1647	tmp = ioread32(reg_offset);
1648	iowrite32(tmp & ~post_mask, reg_offset);
1649
1650	/* If other port enables, the function returns "true" */
1651	return tmp & ref_mask;
1652}
1653
1654static int sh_eth_tsu_busy(struct net_device *ndev)
1655{
1656	int timeout = SH_ETH_TSU_TIMEOUT_MS * 100;
1657	struct sh_eth_private *mdp = netdev_priv(ndev);
1658
1659	while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) {
1660		udelay(10);
1661		timeout--;
1662		if (timeout <= 0) {
1663			dev_err(&ndev->dev, "%s: timeout\n", __func__);
1664			return -ETIMEDOUT;
1665		}
1666	}
1667
1668	return 0;
1669}
1670
1671static int sh_eth_tsu_write_entry(struct net_device *ndev, void *reg,
1672				  const u8 *addr)
1673{
1674	u32 val;
1675
1676	val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3];
1677	iowrite32(val, reg);
1678	if (sh_eth_tsu_busy(ndev) < 0)
1679		return -EBUSY;
1680
1681	val = addr[4] << 8 | addr[5];
1682	iowrite32(val, reg + 4);
1683	if (sh_eth_tsu_busy(ndev) < 0)
1684		return -EBUSY;
1685
1686	return 0;
1687}
1688
1689static void sh_eth_tsu_read_entry(void *reg, u8 *addr)
1690{
1691	u32 val;
1692
1693	val = ioread32(reg);
1694	addr[0] = (val >> 24) & 0xff;
1695	addr[1] = (val >> 16) & 0xff;
1696	addr[2] = (val >> 8) & 0xff;
1697	addr[3] = val & 0xff;
1698	val = ioread32(reg + 4);
1699	addr[4] = (val >> 8) & 0xff;
1700	addr[5] = val & 0xff;
1701}
1702
1703
1704static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr)
1705{
1706	struct sh_eth_private *mdp = netdev_priv(ndev);
1707	void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
1708	int i;
1709	u8 c_addr[ETH_ALEN];
1710
1711	for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
1712		sh_eth_tsu_read_entry(reg_offset, c_addr);
1713		if (memcmp(addr, c_addr, ETH_ALEN) == 0)
1714			return i;
1715	}
1716
1717	return -ENOENT;
1718}
1719
1720static int sh_eth_tsu_find_empty(struct net_device *ndev)
1721{
1722	u8 blank[ETH_ALEN];
1723	int entry;
1724
1725	memset(blank, 0, sizeof(blank));
1726	entry = sh_eth_tsu_find_entry(ndev, blank);
1727	return (entry < 0) ? -ENOMEM : entry;
1728}
1729
1730static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev,
1731					      int entry)
1732{
1733	struct sh_eth_private *mdp = netdev_priv(ndev);
1734	void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
1735	int ret;
1736	u8 blank[ETH_ALEN];
1737
1738	sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) &
1739			 ~(1 << (31 - entry)), TSU_TEN);
1740
1741	memset(blank, 0, sizeof(blank));
1742	ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank);
1743	if (ret < 0)
1744		return ret;
1745	return 0;
1746}
1747
1748static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr)
1749{
1750	struct sh_eth_private *mdp = netdev_priv(ndev);
1751	void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
1752	int i, ret;
1753
1754	if (!mdp->cd->tsu)
1755		return 0;
1756
1757	i = sh_eth_tsu_find_entry(ndev, addr);
1758	if (i < 0) {
1759		/* No entry found, create one */
1760		i = sh_eth_tsu_find_empty(ndev);
1761		if (i < 0)
1762			return -ENOMEM;
1763		ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr);
1764		if (ret < 0)
1765			return ret;
1766
1767		/* Enable the entry */
1768		sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) |
1769				 (1 << (31 - i)), TSU_TEN);
1770	}
1771
1772	/* Entry found or created, enable POST */
1773	sh_eth_tsu_enable_cam_entry_post(ndev, i);
1774
1775	return 0;
1776}
1777
1778static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr)
1779{
1780	struct sh_eth_private *mdp = netdev_priv(ndev);
1781	int i, ret;
1782
1783	if (!mdp->cd->tsu)
1784		return 0;
1785
1786	i = sh_eth_tsu_find_entry(ndev, addr);
1787	if (i) {
1788		/* Entry found */
1789		if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
1790			goto done;
1791
1792		/* Disable the entry if both ports was disabled */
1793		ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
1794		if (ret < 0)
1795			return ret;
1796	}
1797done:
1798	return 0;
1799}
1800
1801static int sh_eth_tsu_purge_all(struct net_device *ndev)
1802{
1803	struct sh_eth_private *mdp = netdev_priv(ndev);
1804	int i, ret;
1805
1806	if (unlikely(!mdp->cd->tsu))
1807		return 0;
1808
1809	for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) {
1810		if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
1811			continue;
1812
1813		/* Disable the entry if both ports was disabled */
1814		ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
1815		if (ret < 0)
1816			return ret;
1817	}
1818
1819	return 0;
1820}
1821
1822static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
1823{
1824	struct sh_eth_private *mdp = netdev_priv(ndev);
1825	u8 addr[ETH_ALEN];
1826	void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
1827	int i;
1828
1829	if (unlikely(!mdp->cd->tsu))
1830		return;
1831
1832	for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
1833		sh_eth_tsu_read_entry(reg_offset, addr);
1834		if (is_multicast_ether_addr(addr))
1835			sh_eth_tsu_del_entry(ndev, addr);
1836	}
1837}
1838
1839/* Multicast reception directions set */
1840static void sh_eth_set_multicast_list(struct net_device *ndev)
1841{
1842	struct sh_eth_private *mdp = netdev_priv(ndev);
1843	u32 ecmr_bits;
1844	int mcast_all = 0;
1845	unsigned long flags;
1846
1847	spin_lock_irqsave(&mdp->lock, flags);
1848	/*
1849	 * Initial condition is MCT = 1, PRM = 0.
1850	 * Depending on ndev->flags, set PRM or clear MCT
1851	 */
1852	ecmr_bits = (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | ECMR_MCT;
1853
1854	if (!(ndev->flags & IFF_MULTICAST)) {
1855		sh_eth_tsu_purge_mcast(ndev);
1856		mcast_all = 1;
1857	}
1858	if (ndev->flags & IFF_ALLMULTI) {
1859		sh_eth_tsu_purge_mcast(ndev);
1860		ecmr_bits &= ~ECMR_MCT;
1861		mcast_all = 1;
1862	}
1863
1864	if (ndev->flags & IFF_PROMISC) {
1865		sh_eth_tsu_purge_all(ndev);
1866		ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM;
1867	} else if (mdp->cd->tsu) {
1868		struct netdev_hw_addr *ha;
1869		netdev_for_each_mc_addr(ha, ndev) {
1870			if (mcast_all && is_multicast_ether_addr(ha->addr))
1871				continue;
1872
1873			if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) {
1874				if (!mcast_all) {
1875					sh_eth_tsu_purge_mcast(ndev);
1876					ecmr_bits &= ~ECMR_MCT;
1877					mcast_all = 1;
1878				}
1879			}
1880		}
1881	} else {
1882		/* Normal, unicast/broadcast-only mode. */
1883		ecmr_bits = (ecmr_bits & ~ECMR_PRM) | ECMR_MCT;
1884	}
1885
1886	/* update the ethernet mode */
1887	sh_eth_write(ndev, ecmr_bits, ECMR);
1888
1889	spin_unlock_irqrestore(&mdp->lock, flags);
1890}
1891
1892static int sh_eth_get_vtag_index(struct sh_eth_private *mdp)
1893{
1894	if (!mdp->port)
1895		return TSU_VTAG0;
1896	else
1897		return TSU_VTAG1;
1898}
1899
1900static int sh_eth_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
1901{
1902	struct sh_eth_private *mdp = netdev_priv(ndev);
1903	int vtag_reg_index = sh_eth_get_vtag_index(mdp);
1904
1905	if (unlikely(!mdp->cd->tsu))
1906		return -EPERM;
1907
1908	/* No filtering if vid = 0 */
1909	if (!vid)
1910		return 0;
1911
1912	mdp->vlan_num_ids++;
1913
1914	/*
1915	 * The controller has one VLAN tag HW filter. So, if the filter is
1916	 * already enabled, the driver disables it and the filte
1917	 */
1918	if (mdp->vlan_num_ids > 1) {
1919		/* disable VLAN filter */
1920		sh_eth_tsu_write(mdp, 0, vtag_reg_index);
1921		return 0;
1922	}
1923
1924	sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK),
1925			 vtag_reg_index);
1926
1927	return 0;
1928}
1929
1930static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
1931{
1932	struct sh_eth_private *mdp = netdev_priv(ndev);
1933	int vtag_reg_index = sh_eth_get_vtag_index(mdp);
1934
1935	if (unlikely(!mdp->cd->tsu))
1936		return -EPERM;
1937
1938	/* No filtering if vid = 0 */
1939	if (!vid)
1940		return 0;
1941
1942	mdp->vlan_num_ids--;
1943	sh_eth_tsu_write(mdp, 0, vtag_reg_index);
1944
1945	return 0;
1946}
1947#endif /* SH_ETH_HAS_TSU */
1948
1949/* SuperH's TSU register init function */
1950static void sh_eth_tsu_init(struct sh_eth_private *mdp)
1951{
1952	sh_eth_tsu_write(mdp, 0, TSU_FWEN0);	/* Disable forward(0->1) */
1953	sh_eth_tsu_write(mdp, 0, TSU_FWEN1);	/* Disable forward(1->0) */
1954	sh_eth_tsu_write(mdp, 0, TSU_FCM);	/* forward fifo 3k-3k */
1955	sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
1956	sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
1957	sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
1958	sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
1959	sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
1960	sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
1961	sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
1962	if (sh_eth_is_gether(mdp)) {
1963		sh_eth_tsu_write(mdp, 0, TSU_QTAG0);	/* Disable QTAG(0->1) */
1964		sh_eth_tsu_write(mdp, 0, TSU_QTAG1);	/* Disable QTAG(1->0) */
1965	} else {
1966		sh_eth_tsu_write(mdp, 0, TSU_QTAGM0);	/* Disable QTAG(0->1) */
1967		sh_eth_tsu_write(mdp, 0, TSU_QTAGM1);	/* Disable QTAG(1->0) */
1968	}
1969	sh_eth_tsu_write(mdp, 0, TSU_FWSR);	/* all interrupt status clear */
1970	sh_eth_tsu_write(mdp, 0, TSU_FWINMK);	/* Disable all interrupt */
1971	sh_eth_tsu_write(mdp, 0, TSU_TEN);	/* Disable all CAM entry */
1972	sh_eth_tsu_write(mdp, 0, TSU_POST1);	/* Disable CAM entry [ 0- 7] */
1973	sh_eth_tsu_write(mdp, 0, TSU_POST2);	/* Disable CAM entry [ 8-15] */
1974	sh_eth_tsu_write(mdp, 0, TSU_POST3);	/* Disable CAM entry [16-23] */
1975	sh_eth_tsu_write(mdp, 0, TSU_POST4);	/* Disable CAM entry [24-31] */
1976}
1977
1978/* MDIO bus release function */
1979static int sh_mdio_release(struct net_device *ndev)
1980{
1981	struct mii_bus *bus = dev_get_drvdata(&ndev->dev);
1982
1983	/* unregister mdio bus */
1984	mdiobus_unregister(bus);
1985
1986	/* remove mdio bus info from net_device */
1987	dev_set_drvdata(&ndev->dev, NULL);
1988
1989	/* free interrupts memory */
1990	kfree(bus->irq);
1991
1992	/* free bitbang info */
1993	free_mdio_bitbang(bus);
1994
1995	return 0;
1996}
1997
1998/* MDIO bus init function */
1999static int sh_mdio_init(struct net_device *ndev, int id,
2000			struct sh_eth_plat_data *pd)
2001{
2002	int ret, i;
2003	struct bb_info *bitbang;
2004	struct sh_eth_private *mdp = netdev_priv(ndev);
2005
2006	/* create bit control struct for PHY */
2007	bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL);
2008	if (!bitbang) {
2009		ret = -ENOMEM;
2010		goto out;
2011	}
2012
2013	/* bitbang init */
2014	bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
2015	bitbang->set_gate = pd->set_mdio_gate;
2016	bitbang->mdi_msk = 0x08;
2017	bitbang->mdo_msk = 0x04;
2018	bitbang->mmd_msk = 0x02;/* MMD */
2019	bitbang->mdc_msk = 0x01;
2020	bitbang->ctrl.ops = &bb_ops;
2021
2022	/* MII controller setting */
2023	mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
2024	if (!mdp->mii_bus) {
2025		ret = -ENOMEM;
2026		goto out_free_bitbang;
2027	}
2028
2029	/* Hook up MII support for ethtool */
2030	mdp->mii_bus->name = "sh_mii";
2031	mdp->mii_bus->parent = &ndev->dev;
2032	snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2033		mdp->pdev->name, id);
2034
2035	/* PHY IRQ */
2036	mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
2037	if (!mdp->mii_bus->irq) {
2038		ret = -ENOMEM;
2039		goto out_free_bus;
2040	}
2041
2042	for (i = 0; i < PHY_MAX_ADDR; i++)
2043		mdp->mii_bus->irq[i] = PHY_POLL;
2044
2045	/* regist mdio bus */
2046	ret = mdiobus_register(mdp->mii_bus);
2047	if (ret)
2048		goto out_free_irq;
2049
2050	dev_set_drvdata(&ndev->dev, mdp->mii_bus);
2051
2052	return 0;
2053
2054out_free_irq:
2055	kfree(mdp->mii_bus->irq);
2056
2057out_free_bus:
2058	free_mdio_bitbang(mdp->mii_bus);
2059
2060out_free_bitbang:
2061	kfree(bitbang);
2062
2063out:
2064	return ret;
2065}
2066
2067static const u16 *sh_eth_get_register_offset(int register_type)
2068{
2069	const u16 *reg_offset = NULL;
2070
2071	switch (register_type) {
2072	case SH_ETH_REG_GIGABIT:
2073		reg_offset = sh_eth_offset_gigabit;
2074		break;
2075	case SH_ETH_REG_FAST_SH4:
2076		reg_offset = sh_eth_offset_fast_sh4;
2077		break;
2078	case SH_ETH_REG_FAST_SH3_SH2:
2079		reg_offset = sh_eth_offset_fast_sh3_sh2;
2080		break;
2081	default:
2082		printk(KERN_ERR "Unknown register type (%d)\n", register_type);
2083		break;
2084	}
2085
2086	return reg_offset;
2087}
2088
2089static const struct net_device_ops sh_eth_netdev_ops = {
2090	.ndo_open		= sh_eth_open,
2091	.ndo_stop		= sh_eth_close,
2092	.ndo_start_xmit		= sh_eth_start_xmit,
2093	.ndo_get_stats		= sh_eth_get_stats,
2094#if defined(SH_ETH_HAS_TSU)
2095	.ndo_set_rx_mode	= sh_eth_set_multicast_list,
2096	.ndo_vlan_rx_add_vid	= sh_eth_vlan_rx_add_vid,
2097	.ndo_vlan_rx_kill_vid	= sh_eth_vlan_rx_kill_vid,
2098#endif
2099	.ndo_tx_timeout		= sh_eth_tx_timeout,
2100	.ndo_do_ioctl		= sh_eth_do_ioctl,
2101	.ndo_validate_addr	= eth_validate_addr,
2102	.ndo_set_mac_address	= eth_mac_addr,
2103	.ndo_change_mtu		= eth_change_mtu,
2104};
2105
2106static int sh_eth_drv_probe(struct platform_device *pdev)
2107{
2108	int ret, devno = 0;
2109	struct resource *res;
2110	struct net_device *ndev = NULL;
2111	struct sh_eth_private *mdp = NULL;
2112	struct sh_eth_plat_data *pd;
2113
2114	/* get base addr */
2115	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2116	if (unlikely(res == NULL)) {
2117		dev_err(&pdev->dev, "invalid resource\n");
2118		ret = -EINVAL;
2119		goto out;
2120	}
2121
2122	ndev = alloc_etherdev(sizeof(struct sh_eth_private));
2123	if (!ndev) {
2124		ret = -ENOMEM;
2125		goto out;
2126	}
2127
2128	/* The sh Ether-specific entries in the device structure. */
2129	ndev->base_addr = res->start;
2130	devno = pdev->id;
2131	if (devno < 0)
2132		devno = 0;
2133
2134	ndev->dma = -1;
2135	ret = platform_get_irq(pdev, 0);
2136	if (ret < 0) {
2137		ret = -ENODEV;
2138		goto out_release;
2139	}
2140	ndev->irq = ret;
2141
2142	SET_NETDEV_DEV(ndev, &pdev->dev);
2143
2144	/* Fill in the fields of the device structure with ethernet values. */
2145	ether_setup(ndev);
2146
2147	mdp = netdev_priv(ndev);
2148	mdp->addr = ioremap(res->start, resource_size(res));
2149	if (mdp->addr == NULL) {
2150		ret = -ENOMEM;
2151		dev_err(&pdev->dev, "ioremap failed.\n");
2152		goto out_release;
2153	}
2154
2155	spin_lock_init(&mdp->lock);
2156	mdp->pdev = pdev;
2157	pm_runtime_enable(&pdev->dev);
2158	pm_runtime_resume(&pdev->dev);
2159
2160	pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data);
2161	/* get PHY ID */
2162	mdp->phy_id = pd->phy;
2163	mdp->phy_interface = pd->phy_interface;
2164	/* EDMAC endian */
2165	mdp->edmac_endian = pd->edmac_endian;
2166	mdp->no_ether_link = pd->no_ether_link;
2167	mdp->ether_link_active_low = pd->ether_link_active_low;
2168	mdp->reg_offset = sh_eth_get_register_offset(pd->register_type);
2169
2170	/* set cpu data */
2171#if defined(SH_ETH_HAS_BOTH_MODULES)
2172	mdp->cd = sh_eth_get_cpu_data(mdp);
2173#else
2174	mdp->cd = &sh_eth_my_cpu_data;
2175#endif
2176	sh_eth_set_default_cpu_data(mdp->cd);
2177
2178	/* set function */
2179	ndev->netdev_ops = &sh_eth_netdev_ops;
2180	SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops);
2181	ndev->watchdog_timeo = TX_TIMEOUT;
2182
2183	/* debug message level */
2184	mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
2185	mdp->post_rx = POST_RX >> (devno << 1);
2186	mdp->post_fw = POST_FW >> (devno << 1);
2187
2188	/* read and set MAC address */
2189	read_mac_address(ndev, pd->mac_addr);
2190
2191	/* ioremap the TSU registers */
2192	if (mdp->cd->tsu) {
2193		struct resource *rtsu;
2194		rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2195		if (!rtsu) {
2196			dev_err(&pdev->dev, "Not found TSU resource\n");
2197			goto out_release;
2198		}
2199		mdp->tsu_addr = ioremap(rtsu->start,
2200					resource_size(rtsu));
2201		mdp->port = devno % 2;
2202		ndev->features = NETIF_F_HW_VLAN_FILTER;
2203	}
2204
2205	/* initialize first or needed device */
2206	if (!devno || pd->needs_init) {
2207		if (mdp->cd->chip_reset)
2208			mdp->cd->chip_reset(ndev);
2209
2210		if (mdp->cd->tsu) {
2211			/* TSU init (Init only)*/
2212			sh_eth_tsu_init(mdp);
2213		}
2214	}
2215
2216	/* network device register */
2217	ret = register_netdev(ndev);
2218	if (ret)
2219		goto out_release;
2220
2221	/* mdio bus init */
2222	ret = sh_mdio_init(ndev, pdev->id, pd);
2223	if (ret)
2224		goto out_unregister;
2225
2226	/* print device information */
2227	pr_info("Base address at 0x%x, %pM, IRQ %d.\n",
2228	       (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
2229
2230	platform_set_drvdata(pdev, ndev);
2231
2232	return ret;
2233
2234out_unregister:
2235	unregister_netdev(ndev);
2236
2237out_release:
2238	/* net_dev free */
2239	if (mdp && mdp->addr)
2240		iounmap(mdp->addr);
2241	if (mdp && mdp->tsu_addr)
2242		iounmap(mdp->tsu_addr);
2243	if (ndev)
2244		free_netdev(ndev);
2245
2246out:
2247	return ret;
2248}
2249
2250static int sh_eth_drv_remove(struct platform_device *pdev)
2251{
2252	struct net_device *ndev = platform_get_drvdata(pdev);
2253	struct sh_eth_private *mdp = netdev_priv(ndev);
2254
2255	if (mdp->cd->tsu)
2256		iounmap(mdp->tsu_addr);
2257	sh_mdio_release(ndev);
2258	unregister_netdev(ndev);
2259	pm_runtime_disable(&pdev->dev);
2260	iounmap(mdp->addr);
2261	free_netdev(ndev);
2262	platform_set_drvdata(pdev, NULL);
2263
2264	return 0;
2265}
2266
2267static int sh_eth_runtime_nop(struct device *dev)
2268{
2269	/*
2270	 * Runtime PM callback shared between ->runtime_suspend()
2271	 * and ->runtime_resume(). Simply returns success.
2272	 *
2273	 * This driver re-initializes all registers after
2274	 * pm_runtime_get_sync() anyway so there is no need
2275	 * to save and restore registers here.
2276	 */
2277	return 0;
2278}
2279
2280static struct dev_pm_ops sh_eth_dev_pm_ops = {
2281	.runtime_suspend = sh_eth_runtime_nop,
2282	.runtime_resume = sh_eth_runtime_nop,
2283};
2284
2285static struct platform_driver sh_eth_driver = {
2286	.probe = sh_eth_drv_probe,
2287	.remove = sh_eth_drv_remove,
2288	.driver = {
2289		   .name = CARDNAME,
2290		   .pm = &sh_eth_dev_pm_ops,
2291	},
2292};
2293
2294module_platform_driver(sh_eth_driver);
2295
2296MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
2297MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
2298MODULE_LICENSE("GPL v2");
2299