sh_eth.c revision ae70644df780c0e87f1705fda932e7cb1bdb2074
1/*
2 *  SuperH Ethernet device driver
3 *
4 *  Copyright (C) 2006-2008 Nobuhiro Iwamatsu
5 *  Copyright (C) 2008-2009 Renesas Solutions Corp.
6 *
7 *  This program is free software; you can redistribute it and/or modify it
8 *  under the terms and conditions of the GNU General Public License,
9 *  version 2, as published by the Free Software Foundation.
10 *
11 *  This program is distributed in the hope it will be useful, but WITHOUT
12 *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14 *  more details.
15 *  You should have received a copy of the GNU General Public License along with
16 *  this program; if not, write to the Free Software Foundation, Inc.,
17 *  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 *  The full GNU General Public License is included in this distribution in
20 *  the file called "COPYING".
21 */
22
23#include <linux/init.h>
24#include <linux/interrupt.h>
25#include <linux/dma-mapping.h>
26#include <linux/etherdevice.h>
27#include <linux/delay.h>
28#include <linux/platform_device.h>
29#include <linux/mdio-bitbang.h>
30#include <linux/netdevice.h>
31#include <linux/phy.h>
32#include <linux/cache.h>
33#include <linux/io.h>
34#include <linux/interrupt.h>
35#include <linux/pm_runtime.h>
36#include <linux/slab.h>
37#include <linux/ethtool.h>
38
39#include "sh_eth.h"
40
41#define SH_ETH_DEF_MSG_ENABLE \
42		(NETIF_MSG_LINK	| \
43		NETIF_MSG_TIMER	| \
44		NETIF_MSG_RX_ERR| \
45		NETIF_MSG_TX_ERR)
46
47/* There is CPU dependent code */
48#if defined(CONFIG_CPU_SUBTYPE_SH7724)
49#define SH_ETH_RESET_DEFAULT	1
50static void sh_eth_set_duplex(struct net_device *ndev)
51{
52	struct sh_eth_private *mdp = netdev_priv(ndev);
53
54	if (mdp->duplex) /* Full */
55		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
56	else		/* Half */
57		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
58}
59
60static void sh_eth_set_rate(struct net_device *ndev)
61{
62	struct sh_eth_private *mdp = netdev_priv(ndev);
63
64	switch (mdp->speed) {
65	case 10: /* 10BASE */
66		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR);
67		break;
68	case 100:/* 100BASE */
69		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR);
70		break;
71	default:
72		break;
73	}
74}
75
76/* SH7724 */
77static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
78	.set_duplex	= sh_eth_set_duplex,
79	.set_rate	= sh_eth_set_rate,
80
81	.ecsr_value	= ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
82	.ecsipr_value	= ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
83	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f,
84
85	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
86	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
87			  EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
88	.tx_error_check	= EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
89
90	.apr		= 1,
91	.mpr		= 1,
92	.tpauser	= 1,
93	.hw_swap	= 1,
94	.rpadir		= 1,
95	.rpadir_value	= 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
96};
97#elif defined(CONFIG_CPU_SUBTYPE_SH7757)
98#define SH_ETH_HAS_BOTH_MODULES	1
99#define SH_ETH_HAS_TSU	1
100static void sh_eth_set_duplex(struct net_device *ndev)
101{
102	struct sh_eth_private *mdp = netdev_priv(ndev);
103
104	if (mdp->duplex) /* Full */
105		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
106	else		/* Half */
107		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
108}
109
110static void sh_eth_set_rate(struct net_device *ndev)
111{
112	struct sh_eth_private *mdp = netdev_priv(ndev);
113
114	switch (mdp->speed) {
115	case 10: /* 10BASE */
116		sh_eth_write(ndev, 0, RTRATE);
117		break;
118	case 100:/* 100BASE */
119		sh_eth_write(ndev, 1, RTRATE);
120		break;
121	default:
122		break;
123	}
124}
125
126/* SH7757 */
127static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
128	.set_duplex		= sh_eth_set_duplex,
129	.set_rate		= sh_eth_set_rate,
130
131	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
132	.rmcr_value	= 0x00000001,
133
134	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
135	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
136			  EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
137	.tx_error_check	= EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
138
139	.apr		= 1,
140	.mpr		= 1,
141	.tpauser	= 1,
142	.hw_swap	= 1,
143	.no_ade		= 1,
144	.rpadir		= 1,
145	.rpadir_value   = 2 << 16,
146};
147
148#define SH_GIGA_ETH_BASE	0xfee00000
149#define GIGA_MALR(port)		(SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
150#define GIGA_MAHR(port)		(SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
151static void sh_eth_chip_reset_giga(struct net_device *ndev)
152{
153	int i;
154	unsigned long mahr[2], malr[2];
155
156	/* save MAHR and MALR */
157	for (i = 0; i < 2; i++) {
158		malr[i] = ioread32((void *)GIGA_MALR(i));
159		mahr[i] = ioread32((void *)GIGA_MAHR(i));
160	}
161
162	/* reset device */
163	iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800));
164	mdelay(1);
165
166	/* restore MAHR and MALR */
167	for (i = 0; i < 2; i++) {
168		iowrite32(malr[i], (void *)GIGA_MALR(i));
169		iowrite32(mahr[i], (void *)GIGA_MAHR(i));
170	}
171}
172
173static int sh_eth_is_gether(struct sh_eth_private *mdp);
174static void sh_eth_reset(struct net_device *ndev)
175{
176	struct sh_eth_private *mdp = netdev_priv(ndev);
177	int cnt = 100;
178
179	if (sh_eth_is_gether(mdp)) {
180		sh_eth_write(ndev, 0x03, EDSR);
181		sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
182				EDMR);
183		while (cnt > 0) {
184			if (!(sh_eth_read(ndev, EDMR) & 0x3))
185				break;
186			mdelay(1);
187			cnt--;
188		}
189		if (cnt < 0)
190			printk(KERN_ERR "Device reset fail\n");
191
192		/* Table Init */
193		sh_eth_write(ndev, 0x0, TDLAR);
194		sh_eth_write(ndev, 0x0, TDFAR);
195		sh_eth_write(ndev, 0x0, TDFXR);
196		sh_eth_write(ndev, 0x0, TDFFR);
197		sh_eth_write(ndev, 0x0, RDLAR);
198		sh_eth_write(ndev, 0x0, RDFAR);
199		sh_eth_write(ndev, 0x0, RDFXR);
200		sh_eth_write(ndev, 0x0, RDFFR);
201	} else {
202		sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER,
203				EDMR);
204		mdelay(3);
205		sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
206				EDMR);
207	}
208}
209
210static void sh_eth_set_duplex_giga(struct net_device *ndev)
211{
212	struct sh_eth_private *mdp = netdev_priv(ndev);
213
214	if (mdp->duplex) /* Full */
215		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
216	else		/* Half */
217		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
218}
219
220static void sh_eth_set_rate_giga(struct net_device *ndev)
221{
222	struct sh_eth_private *mdp = netdev_priv(ndev);
223
224	switch (mdp->speed) {
225	case 10: /* 10BASE */
226		sh_eth_write(ndev, 0x00000000, GECMR);
227		break;
228	case 100:/* 100BASE */
229		sh_eth_write(ndev, 0x00000010, GECMR);
230		break;
231	case 1000: /* 1000BASE */
232		sh_eth_write(ndev, 0x00000020, GECMR);
233		break;
234	default:
235		break;
236	}
237}
238
239/* SH7757(GETHERC) */
240static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = {
241	.chip_reset	= sh_eth_chip_reset_giga,
242	.set_duplex	= sh_eth_set_duplex_giga,
243	.set_rate	= sh_eth_set_rate_giga,
244
245	.ecsr_value	= ECSR_ICD | ECSR_MPD,
246	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
247	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
248
249	.tx_check	= EESR_TC1 | EESR_FTC,
250	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
251			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
252			  EESR_ECI,
253	.tx_error_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
254			  EESR_TFE,
255	.fdr_value	= 0x0000072f,
256	.rmcr_value	= 0x00000001,
257
258	.apr		= 1,
259	.mpr		= 1,
260	.tpauser	= 1,
261	.bculr		= 1,
262	.hw_swap	= 1,
263	.rpadir		= 1,
264	.rpadir_value   = 2 << 16,
265	.no_trimd	= 1,
266	.no_ade		= 1,
267};
268
269static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp)
270{
271	if (sh_eth_is_gether(mdp))
272		return &sh_eth_my_cpu_data_giga;
273	else
274		return &sh_eth_my_cpu_data;
275}
276
277#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
278#define SH_ETH_HAS_TSU	1
279static void sh_eth_chip_reset(struct net_device *ndev)
280{
281	struct sh_eth_private *mdp = netdev_priv(ndev);
282
283	/* reset device */
284	sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
285	mdelay(1);
286}
287
288static void sh_eth_reset(struct net_device *ndev)
289{
290	int cnt = 100;
291
292	sh_eth_write(ndev, EDSR_ENALL, EDSR);
293	sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
294	while (cnt > 0) {
295		if (!(sh_eth_read(ndev, EDMR) & 0x3))
296			break;
297		mdelay(1);
298		cnt--;
299	}
300	if (cnt == 0)
301		printk(KERN_ERR "Device reset fail\n");
302
303	/* Table Init */
304	sh_eth_write(ndev, 0x0, TDLAR);
305	sh_eth_write(ndev, 0x0, TDFAR);
306	sh_eth_write(ndev, 0x0, TDFXR);
307	sh_eth_write(ndev, 0x0, TDFFR);
308	sh_eth_write(ndev, 0x0, RDLAR);
309	sh_eth_write(ndev, 0x0, RDFAR);
310	sh_eth_write(ndev, 0x0, RDFXR);
311	sh_eth_write(ndev, 0x0, RDFFR);
312}
313
314static void sh_eth_set_duplex(struct net_device *ndev)
315{
316	struct sh_eth_private *mdp = netdev_priv(ndev);
317
318	if (mdp->duplex) /* Full */
319		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
320	else		/* Half */
321		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
322}
323
324static void sh_eth_set_rate(struct net_device *ndev)
325{
326	struct sh_eth_private *mdp = netdev_priv(ndev);
327
328	switch (mdp->speed) {
329	case 10: /* 10BASE */
330		sh_eth_write(ndev, GECMR_10, GECMR);
331		break;
332	case 100:/* 100BASE */
333		sh_eth_write(ndev, GECMR_100, GECMR);
334		break;
335	case 1000: /* 1000BASE */
336		sh_eth_write(ndev, GECMR_1000, GECMR);
337		break;
338	default:
339		break;
340	}
341}
342
343/* sh7763 */
344static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
345	.chip_reset	= sh_eth_chip_reset,
346	.set_duplex	= sh_eth_set_duplex,
347	.set_rate	= sh_eth_set_rate,
348
349	.ecsr_value	= ECSR_ICD | ECSR_MPD,
350	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
351	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
352
353	.tx_check	= EESR_TC1 | EESR_FTC,
354	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
355			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
356			  EESR_ECI,
357	.tx_error_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
358			  EESR_TFE,
359
360	.apr		= 1,
361	.mpr		= 1,
362	.tpauser	= 1,
363	.bculr		= 1,
364	.hw_swap	= 1,
365	.no_trimd	= 1,
366	.no_ade		= 1,
367	.tsu		= 1,
368};
369
370#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
371#define SH_ETH_RESET_DEFAULT	1
372static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
373	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
374
375	.apr		= 1,
376	.mpr		= 1,
377	.tpauser	= 1,
378	.hw_swap	= 1,
379};
380#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
381#define SH_ETH_RESET_DEFAULT	1
382#define SH_ETH_HAS_TSU	1
383static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
384	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
385	.tsu		= 1,
386};
387#endif
388
389static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
390{
391	if (!cd->ecsr_value)
392		cd->ecsr_value = DEFAULT_ECSR_INIT;
393
394	if (!cd->ecsipr_value)
395		cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
396
397	if (!cd->fcftr_value)
398		cd->fcftr_value = DEFAULT_FIFO_F_D_RFF | \
399				  DEFAULT_FIFO_F_D_RFD;
400
401	if (!cd->fdr_value)
402		cd->fdr_value = DEFAULT_FDR_INIT;
403
404	if (!cd->rmcr_value)
405		cd->rmcr_value = DEFAULT_RMCR_VALUE;
406
407	if (!cd->tx_check)
408		cd->tx_check = DEFAULT_TX_CHECK;
409
410	if (!cd->eesr_err_check)
411		cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
412
413	if (!cd->tx_error_check)
414		cd->tx_error_check = DEFAULT_TX_ERROR_CHECK;
415}
416
417#if defined(SH_ETH_RESET_DEFAULT)
418/* Chip Reset */
419static void sh_eth_reset(struct net_device *ndev)
420{
421	sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, EDMR);
422	mdelay(3);
423	sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, EDMR);
424}
425#endif
426
427#if defined(CONFIG_CPU_SH4)
428static void sh_eth_set_receive_align(struct sk_buff *skb)
429{
430	int reserve;
431
432	reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1));
433	if (reserve)
434		skb_reserve(skb, reserve);
435}
436#else
437static void sh_eth_set_receive_align(struct sk_buff *skb)
438{
439	skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN);
440}
441#endif
442
443
444/* CPU <-> EDMAC endian convert */
445static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x)
446{
447	switch (mdp->edmac_endian) {
448	case EDMAC_LITTLE_ENDIAN:
449		return cpu_to_le32(x);
450	case EDMAC_BIG_ENDIAN:
451		return cpu_to_be32(x);
452	}
453	return x;
454}
455
456static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
457{
458	switch (mdp->edmac_endian) {
459	case EDMAC_LITTLE_ENDIAN:
460		return le32_to_cpu(x);
461	case EDMAC_BIG_ENDIAN:
462		return be32_to_cpu(x);
463	}
464	return x;
465}
466
467/*
468 * Program the hardware MAC address from dev->dev_addr.
469 */
470static void update_mac_address(struct net_device *ndev)
471{
472	sh_eth_write(ndev,
473		(ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
474		(ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
475	sh_eth_write(ndev,
476		(ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
477}
478
479/*
480 * Get MAC address from SuperH MAC address register
481 *
482 * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
483 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
484 * When you want use this device, you must set MAC address in bootloader.
485 *
486 */
487static void read_mac_address(struct net_device *ndev, unsigned char *mac)
488{
489	if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
490		memcpy(ndev->dev_addr, mac, 6);
491	} else {
492		ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24);
493		ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF;
494		ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF;
495		ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF);
496		ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF;
497		ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF);
498	}
499}
500
501static int sh_eth_is_gether(struct sh_eth_private *mdp)
502{
503	if (mdp->reg_offset == sh_eth_offset_gigabit)
504		return 1;
505	else
506		return 0;
507}
508
509static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
510{
511	if (sh_eth_is_gether(mdp))
512		return EDTRR_TRNS_GETHER;
513	else
514		return EDTRR_TRNS_ETHER;
515}
516
517struct bb_info {
518	void (*set_gate)(void *addr);
519	struct mdiobb_ctrl ctrl;
520	void *addr;
521	u32 mmd_msk;/* MMD */
522	u32 mdo_msk;
523	u32 mdi_msk;
524	u32 mdc_msk;
525};
526
527/* PHY bit set */
528static void bb_set(void *addr, u32 msk)
529{
530	iowrite32(ioread32(addr) | msk, addr);
531}
532
533/* PHY bit clear */
534static void bb_clr(void *addr, u32 msk)
535{
536	iowrite32((ioread32(addr) & ~msk), addr);
537}
538
539/* PHY bit read */
540static int bb_read(void *addr, u32 msk)
541{
542	return (ioread32(addr) & msk) != 0;
543}
544
545/* Data I/O pin control */
546static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
547{
548	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
549
550	if (bitbang->set_gate)
551		bitbang->set_gate(bitbang->addr);
552
553	if (bit)
554		bb_set(bitbang->addr, bitbang->mmd_msk);
555	else
556		bb_clr(bitbang->addr, bitbang->mmd_msk);
557}
558
559/* Set bit data*/
560static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
561{
562	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
563
564	if (bitbang->set_gate)
565		bitbang->set_gate(bitbang->addr);
566
567	if (bit)
568		bb_set(bitbang->addr, bitbang->mdo_msk);
569	else
570		bb_clr(bitbang->addr, bitbang->mdo_msk);
571}
572
573/* Get bit data*/
574static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
575{
576	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
577
578	if (bitbang->set_gate)
579		bitbang->set_gate(bitbang->addr);
580
581	return bb_read(bitbang->addr, bitbang->mdi_msk);
582}
583
584/* MDC pin control */
585static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
586{
587	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
588
589	if (bitbang->set_gate)
590		bitbang->set_gate(bitbang->addr);
591
592	if (bit)
593		bb_set(bitbang->addr, bitbang->mdc_msk);
594	else
595		bb_clr(bitbang->addr, bitbang->mdc_msk);
596}
597
598/* mdio bus control struct */
599static struct mdiobb_ops bb_ops = {
600	.owner = THIS_MODULE,
601	.set_mdc = sh_mdc_ctrl,
602	.set_mdio_dir = sh_mmd_ctrl,
603	.set_mdio_data = sh_set_mdio,
604	.get_mdio_data = sh_get_mdio,
605};
606
607/* free skb and descriptor buffer */
608static void sh_eth_ring_free(struct net_device *ndev)
609{
610	struct sh_eth_private *mdp = netdev_priv(ndev);
611	int i;
612
613	/* Free Rx skb ringbuffer */
614	if (mdp->rx_skbuff) {
615		for (i = 0; i < RX_RING_SIZE; i++) {
616			if (mdp->rx_skbuff[i])
617				dev_kfree_skb(mdp->rx_skbuff[i]);
618		}
619	}
620	kfree(mdp->rx_skbuff);
621
622	/* Free Tx skb ringbuffer */
623	if (mdp->tx_skbuff) {
624		for (i = 0; i < TX_RING_SIZE; i++) {
625			if (mdp->tx_skbuff[i])
626				dev_kfree_skb(mdp->tx_skbuff[i]);
627		}
628	}
629	kfree(mdp->tx_skbuff);
630}
631
632/* format skb and descriptor buffer */
633static void sh_eth_ring_format(struct net_device *ndev)
634{
635	struct sh_eth_private *mdp = netdev_priv(ndev);
636	int i;
637	struct sk_buff *skb;
638	struct sh_eth_rxdesc *rxdesc = NULL;
639	struct sh_eth_txdesc *txdesc = NULL;
640	int rx_ringsize = sizeof(*rxdesc) * RX_RING_SIZE;
641	int tx_ringsize = sizeof(*txdesc) * TX_RING_SIZE;
642
643	mdp->cur_rx = mdp->cur_tx = 0;
644	mdp->dirty_rx = mdp->dirty_tx = 0;
645
646	memset(mdp->rx_ring, 0, rx_ringsize);
647
648	/* build Rx ring buffer */
649	for (i = 0; i < RX_RING_SIZE; i++) {
650		/* skb */
651		mdp->rx_skbuff[i] = NULL;
652		skb = dev_alloc_skb(mdp->rx_buf_sz);
653		mdp->rx_skbuff[i] = skb;
654		if (skb == NULL)
655			break;
656		dma_map_single(&ndev->dev, skb->tail, mdp->rx_buf_sz,
657				DMA_FROM_DEVICE);
658		skb->dev = ndev; /* Mark as being used by this device. */
659		sh_eth_set_receive_align(skb);
660
661		/* RX descriptor */
662		rxdesc = &mdp->rx_ring[i];
663		rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
664		rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
665
666		/* The size of the buffer is 16 byte boundary. */
667		rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
668		/* Rx descriptor address set */
669		if (i == 0) {
670			sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
671			if (sh_eth_is_gether(mdp))
672				sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
673		}
674	}
675
676	mdp->dirty_rx = (u32) (i - RX_RING_SIZE);
677
678	/* Mark the last entry as wrapping the ring. */
679	rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
680
681	memset(mdp->tx_ring, 0, tx_ringsize);
682
683	/* build Tx ring buffer */
684	for (i = 0; i < TX_RING_SIZE; i++) {
685		mdp->tx_skbuff[i] = NULL;
686		txdesc = &mdp->tx_ring[i];
687		txdesc->status = cpu_to_edmac(mdp, TD_TFP);
688		txdesc->buffer_length = 0;
689		if (i == 0) {
690			/* Tx descriptor address set */
691			sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
692			if (sh_eth_is_gether(mdp))
693				sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
694		}
695	}
696
697	txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
698}
699
700/* Get skb and descriptor buffer */
701static int sh_eth_ring_init(struct net_device *ndev)
702{
703	struct sh_eth_private *mdp = netdev_priv(ndev);
704	int rx_ringsize, tx_ringsize, ret = 0;
705
706	/*
707	 * +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
708	 * card needs room to do 8 byte alignment, +2 so we can reserve
709	 * the first 2 bytes, and +16 gets room for the status word from the
710	 * card.
711	 */
712	mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
713			  (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
714	if (mdp->cd->rpadir)
715		mdp->rx_buf_sz += NET_IP_ALIGN;
716
717	/* Allocate RX and TX skb rings */
718	mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * RX_RING_SIZE,
719				GFP_KERNEL);
720	if (!mdp->rx_skbuff) {
721		dev_err(&ndev->dev, "Cannot allocate Rx skb\n");
722		ret = -ENOMEM;
723		return ret;
724	}
725
726	mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * TX_RING_SIZE,
727				GFP_KERNEL);
728	if (!mdp->tx_skbuff) {
729		dev_err(&ndev->dev, "Cannot allocate Tx skb\n");
730		ret = -ENOMEM;
731		goto skb_ring_free;
732	}
733
734	/* Allocate all Rx descriptors. */
735	rx_ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
736	mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
737			GFP_KERNEL);
738
739	if (!mdp->rx_ring) {
740		dev_err(&ndev->dev, "Cannot allocate Rx Ring (size %d bytes)\n",
741			rx_ringsize);
742		ret = -ENOMEM;
743		goto desc_ring_free;
744	}
745
746	mdp->dirty_rx = 0;
747
748	/* Allocate all Tx descriptors. */
749	tx_ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
750	mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
751			GFP_KERNEL);
752	if (!mdp->tx_ring) {
753		dev_err(&ndev->dev, "Cannot allocate Tx Ring (size %d bytes)\n",
754			tx_ringsize);
755		ret = -ENOMEM;
756		goto desc_ring_free;
757	}
758	return ret;
759
760desc_ring_free:
761	/* free DMA buffer */
762	dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma);
763
764skb_ring_free:
765	/* Free Rx and Tx skb ring buffer */
766	sh_eth_ring_free(ndev);
767
768	return ret;
769}
770
771static int sh_eth_dev_init(struct net_device *ndev)
772{
773	int ret = 0;
774	struct sh_eth_private *mdp = netdev_priv(ndev);
775	u_int32_t rx_int_var, tx_int_var;
776	u32 val;
777
778	/* Soft Reset */
779	sh_eth_reset(ndev);
780
781	/* Descriptor format */
782	sh_eth_ring_format(ndev);
783	if (mdp->cd->rpadir)
784		sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
785
786	/* all sh_eth int mask */
787	sh_eth_write(ndev, 0, EESIPR);
788
789#if defined(__LITTLE_ENDIAN__)
790	if (mdp->cd->hw_swap)
791		sh_eth_write(ndev, EDMR_EL, EDMR);
792	else
793#endif
794		sh_eth_write(ndev, 0, EDMR);
795
796	/* FIFO size set */
797	sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
798	sh_eth_write(ndev, 0, TFTR);
799
800	/* Frame recv control */
801	sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR);
802
803	rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5;
804	tx_int_var = mdp->tx_int_var = DESC_I_TINT2;
805	sh_eth_write(ndev, rx_int_var | tx_int_var, TRSCER);
806
807	if (mdp->cd->bculr)
808		sh_eth_write(ndev, 0x800, BCULR);	/* Burst sycle set */
809
810	sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
811
812	if (!mdp->cd->no_trimd)
813		sh_eth_write(ndev, 0, TRIMD);
814
815	/* Recv frame limit set register */
816	sh_eth_write(ndev, RFLR_VALUE, RFLR);
817
818	sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
819	sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
820
821	/* PAUSE Prohibition */
822	val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
823		ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
824
825	sh_eth_write(ndev, val, ECMR);
826
827	if (mdp->cd->set_rate)
828		mdp->cd->set_rate(ndev);
829
830	/* E-MAC Status Register clear */
831	sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
832
833	/* E-MAC Interrupt Enable register */
834	sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
835
836	/* Set MAC address */
837	update_mac_address(ndev);
838
839	/* mask reset */
840	if (mdp->cd->apr)
841		sh_eth_write(ndev, APR_AP, APR);
842	if (mdp->cd->mpr)
843		sh_eth_write(ndev, MPR_MP, MPR);
844	if (mdp->cd->tpauser)
845		sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
846
847	/* Setting the Rx mode will start the Rx process. */
848	sh_eth_write(ndev, EDRRR_R, EDRRR);
849
850	netif_start_queue(ndev);
851
852	return ret;
853}
854
855/* free Tx skb function */
856static int sh_eth_txfree(struct net_device *ndev)
857{
858	struct sh_eth_private *mdp = netdev_priv(ndev);
859	struct sh_eth_txdesc *txdesc;
860	int freeNum = 0;
861	int entry = 0;
862
863	for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
864		entry = mdp->dirty_tx % TX_RING_SIZE;
865		txdesc = &mdp->tx_ring[entry];
866		if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
867			break;
868		/* Free the original skb. */
869		if (mdp->tx_skbuff[entry]) {
870			dma_unmap_single(&ndev->dev, txdesc->addr,
871					 txdesc->buffer_length, DMA_TO_DEVICE);
872			dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
873			mdp->tx_skbuff[entry] = NULL;
874			freeNum++;
875		}
876		txdesc->status = cpu_to_edmac(mdp, TD_TFP);
877		if (entry >= TX_RING_SIZE - 1)
878			txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
879
880		mdp->stats.tx_packets++;
881		mdp->stats.tx_bytes += txdesc->buffer_length;
882	}
883	return freeNum;
884}
885
886/* Packet receive function */
887static int sh_eth_rx(struct net_device *ndev)
888{
889	struct sh_eth_private *mdp = netdev_priv(ndev);
890	struct sh_eth_rxdesc *rxdesc;
891
892	int entry = mdp->cur_rx % RX_RING_SIZE;
893	int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx;
894	struct sk_buff *skb;
895	u16 pkt_len = 0;
896	u32 desc_status;
897
898	rxdesc = &mdp->rx_ring[entry];
899	while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
900		desc_status = edmac_to_cpu(mdp, rxdesc->status);
901		pkt_len = rxdesc->frame_length;
902
903		if (--boguscnt < 0)
904			break;
905
906		if (!(desc_status & RDFEND))
907			mdp->stats.rx_length_errors++;
908
909		if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
910				   RD_RFS5 | RD_RFS6 | RD_RFS10)) {
911			mdp->stats.rx_errors++;
912			if (desc_status & RD_RFS1)
913				mdp->stats.rx_crc_errors++;
914			if (desc_status & RD_RFS2)
915				mdp->stats.rx_frame_errors++;
916			if (desc_status & RD_RFS3)
917				mdp->stats.rx_length_errors++;
918			if (desc_status & RD_RFS4)
919				mdp->stats.rx_length_errors++;
920			if (desc_status & RD_RFS6)
921				mdp->stats.rx_missed_errors++;
922			if (desc_status & RD_RFS10)
923				mdp->stats.rx_over_errors++;
924		} else {
925			if (!mdp->cd->hw_swap)
926				sh_eth_soft_swap(
927					phys_to_virt(ALIGN(rxdesc->addr, 4)),
928					pkt_len + 2);
929			skb = mdp->rx_skbuff[entry];
930			mdp->rx_skbuff[entry] = NULL;
931			if (mdp->cd->rpadir)
932				skb_reserve(skb, NET_IP_ALIGN);
933			skb_put(skb, pkt_len);
934			skb->protocol = eth_type_trans(skb, ndev);
935			netif_rx(skb);
936			mdp->stats.rx_packets++;
937			mdp->stats.rx_bytes += pkt_len;
938		}
939		rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
940		entry = (++mdp->cur_rx) % RX_RING_SIZE;
941		rxdesc = &mdp->rx_ring[entry];
942	}
943
944	/* Refill the Rx ring buffers. */
945	for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
946		entry = mdp->dirty_rx % RX_RING_SIZE;
947		rxdesc = &mdp->rx_ring[entry];
948		/* The size of the buffer is 16 byte boundary. */
949		rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
950
951		if (mdp->rx_skbuff[entry] == NULL) {
952			skb = dev_alloc_skb(mdp->rx_buf_sz);
953			mdp->rx_skbuff[entry] = skb;
954			if (skb == NULL)
955				break;	/* Better luck next round. */
956			dma_map_single(&ndev->dev, skb->tail, mdp->rx_buf_sz,
957					DMA_FROM_DEVICE);
958			skb->dev = ndev;
959			sh_eth_set_receive_align(skb);
960
961			skb_checksum_none_assert(skb);
962			rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
963		}
964		if (entry >= RX_RING_SIZE - 1)
965			rxdesc->status |=
966				cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
967		else
968			rxdesc->status |=
969				cpu_to_edmac(mdp, RD_RACT | RD_RFP);
970	}
971
972	/* Restart Rx engine if stopped. */
973	/* If we don't need to check status, don't. -KDU */
974	if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R))
975		sh_eth_write(ndev, EDRRR_R, EDRRR);
976
977	return 0;
978}
979
980static void sh_eth_rcv_snd_disable(struct net_device *ndev)
981{
982	/* disable tx and rx */
983	sh_eth_write(ndev, sh_eth_read(ndev, ECMR) &
984		~(ECMR_RE | ECMR_TE), ECMR);
985}
986
987static void sh_eth_rcv_snd_enable(struct net_device *ndev)
988{
989	/* enable tx and rx */
990	sh_eth_write(ndev, sh_eth_read(ndev, ECMR) |
991		(ECMR_RE | ECMR_TE), ECMR);
992}
993
994/* error control function */
995static void sh_eth_error(struct net_device *ndev, int intr_status)
996{
997	struct sh_eth_private *mdp = netdev_priv(ndev);
998	u32 felic_stat;
999	u32 link_stat;
1000	u32 mask;
1001
1002	if (intr_status & EESR_ECI) {
1003		felic_stat = sh_eth_read(ndev, ECSR);
1004		sh_eth_write(ndev, felic_stat, ECSR);	/* clear int */
1005		if (felic_stat & ECSR_ICD)
1006			mdp->stats.tx_carrier_errors++;
1007		if (felic_stat & ECSR_LCHNG) {
1008			/* Link Changed */
1009			if (mdp->cd->no_psr || mdp->no_ether_link) {
1010				if (mdp->link == PHY_DOWN)
1011					link_stat = 0;
1012				else
1013					link_stat = PHY_ST_LINK;
1014			} else {
1015				link_stat = (sh_eth_read(ndev, PSR));
1016				if (mdp->ether_link_active_low)
1017					link_stat = ~link_stat;
1018			}
1019			if (!(link_stat & PHY_ST_LINK))
1020				sh_eth_rcv_snd_disable(ndev);
1021			else {
1022				/* Link Up */
1023				sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
1024					  ~DMAC_M_ECI, EESIPR);
1025				/*clear int */
1026				sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
1027					  ECSR);
1028				sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
1029					  DMAC_M_ECI, EESIPR);
1030				/* enable tx and rx */
1031				sh_eth_rcv_snd_enable(ndev);
1032			}
1033		}
1034	}
1035
1036	if (intr_status & EESR_TWB) {
1037		/* Write buck end. unused write back interrupt */
1038		if (intr_status & EESR_TABT)	/* Transmit Abort int */
1039			mdp->stats.tx_aborted_errors++;
1040			if (netif_msg_tx_err(mdp))
1041				dev_err(&ndev->dev, "Transmit Abort\n");
1042	}
1043
1044	if (intr_status & EESR_RABT) {
1045		/* Receive Abort int */
1046		if (intr_status & EESR_RFRMER) {
1047			/* Receive Frame Overflow int */
1048			mdp->stats.rx_frame_errors++;
1049			if (netif_msg_rx_err(mdp))
1050				dev_err(&ndev->dev, "Receive Abort\n");
1051		}
1052	}
1053
1054	if (intr_status & EESR_TDE) {
1055		/* Transmit Descriptor Empty int */
1056		mdp->stats.tx_fifo_errors++;
1057		if (netif_msg_tx_err(mdp))
1058			dev_err(&ndev->dev, "Transmit Descriptor Empty\n");
1059	}
1060
1061	if (intr_status & EESR_TFE) {
1062		/* FIFO under flow */
1063		mdp->stats.tx_fifo_errors++;
1064		if (netif_msg_tx_err(mdp))
1065			dev_err(&ndev->dev, "Transmit FIFO Under flow\n");
1066	}
1067
1068	if (intr_status & EESR_RDE) {
1069		/* Receive Descriptor Empty int */
1070		mdp->stats.rx_over_errors++;
1071
1072		if (sh_eth_read(ndev, EDRRR) ^ EDRRR_R)
1073			sh_eth_write(ndev, EDRRR_R, EDRRR);
1074		if (netif_msg_rx_err(mdp))
1075			dev_err(&ndev->dev, "Receive Descriptor Empty\n");
1076	}
1077
1078	if (intr_status & EESR_RFE) {
1079		/* Receive FIFO Overflow int */
1080		mdp->stats.rx_fifo_errors++;
1081		if (netif_msg_rx_err(mdp))
1082			dev_err(&ndev->dev, "Receive FIFO Overflow\n");
1083	}
1084
1085	if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
1086		/* Address Error */
1087		mdp->stats.tx_fifo_errors++;
1088		if (netif_msg_tx_err(mdp))
1089			dev_err(&ndev->dev, "Address Error\n");
1090	}
1091
1092	mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
1093	if (mdp->cd->no_ade)
1094		mask &= ~EESR_ADE;
1095	if (intr_status & mask) {
1096		/* Tx error */
1097		u32 edtrr = sh_eth_read(ndev, EDTRR);
1098		/* dmesg */
1099		dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ",
1100				intr_status, mdp->cur_tx);
1101		dev_err(&ndev->dev, "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
1102				mdp->dirty_tx, (u32) ndev->state, edtrr);
1103		/* dirty buffer free */
1104		sh_eth_txfree(ndev);
1105
1106		/* SH7712 BUG */
1107		if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
1108			/* tx dma start */
1109			sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
1110		}
1111		/* wakeup */
1112		netif_wake_queue(ndev);
1113	}
1114}
1115
1116static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1117{
1118	struct net_device *ndev = netdev;
1119	struct sh_eth_private *mdp = netdev_priv(ndev);
1120	struct sh_eth_cpu_data *cd = mdp->cd;
1121	irqreturn_t ret = IRQ_NONE;
1122	u32 intr_status = 0;
1123
1124	spin_lock(&mdp->lock);
1125
1126	/* Get interrpt stat */
1127	intr_status = sh_eth_read(ndev, EESR);
1128	/* Clear interrupt */
1129	if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |
1130			EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
1131			cd->tx_check | cd->eesr_err_check)) {
1132		sh_eth_write(ndev, intr_status, EESR);
1133		ret = IRQ_HANDLED;
1134	} else
1135		goto other_irq;
1136
1137	if (intr_status & (EESR_FRC | /* Frame recv*/
1138			EESR_RMAF | /* Multi cast address recv*/
1139			EESR_RRF  | /* Bit frame recv */
1140			EESR_RTLF | /* Long frame recv*/
1141			EESR_RTSF | /* short frame recv */
1142			EESR_PRE  | /* PHY-LSI recv error */
1143			EESR_CERF)){ /* recv frame CRC error */
1144		sh_eth_rx(ndev);
1145	}
1146
1147	/* Tx Check */
1148	if (intr_status & cd->tx_check) {
1149		sh_eth_txfree(ndev);
1150		netif_wake_queue(ndev);
1151	}
1152
1153	if (intr_status & cd->eesr_err_check)
1154		sh_eth_error(ndev, intr_status);
1155
1156other_irq:
1157	spin_unlock(&mdp->lock);
1158
1159	return ret;
1160}
1161
1162static void sh_eth_timer(unsigned long data)
1163{
1164	struct net_device *ndev = (struct net_device *)data;
1165	struct sh_eth_private *mdp = netdev_priv(ndev);
1166
1167	mod_timer(&mdp->timer, jiffies + (10 * HZ));
1168}
1169
1170/* PHY state control function */
1171static void sh_eth_adjust_link(struct net_device *ndev)
1172{
1173	struct sh_eth_private *mdp = netdev_priv(ndev);
1174	struct phy_device *phydev = mdp->phydev;
1175	int new_state = 0;
1176
1177	if (phydev->link != PHY_DOWN) {
1178		if (phydev->duplex != mdp->duplex) {
1179			new_state = 1;
1180			mdp->duplex = phydev->duplex;
1181			if (mdp->cd->set_duplex)
1182				mdp->cd->set_duplex(ndev);
1183		}
1184
1185		if (phydev->speed != mdp->speed) {
1186			new_state = 1;
1187			mdp->speed = phydev->speed;
1188			if (mdp->cd->set_rate)
1189				mdp->cd->set_rate(ndev);
1190		}
1191		if (mdp->link == PHY_DOWN) {
1192			sh_eth_write(ndev,
1193				(sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR);
1194			new_state = 1;
1195			mdp->link = phydev->link;
1196		}
1197	} else if (mdp->link) {
1198		new_state = 1;
1199		mdp->link = PHY_DOWN;
1200		mdp->speed = 0;
1201		mdp->duplex = -1;
1202	}
1203
1204	if (new_state && netif_msg_link(mdp))
1205		phy_print_status(phydev);
1206}
1207
1208/* PHY init function */
1209static int sh_eth_phy_init(struct net_device *ndev)
1210{
1211	struct sh_eth_private *mdp = netdev_priv(ndev);
1212	char phy_id[MII_BUS_ID_SIZE + 3];
1213	struct phy_device *phydev = NULL;
1214
1215	snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
1216		mdp->mii_bus->id , mdp->phy_id);
1217
1218	mdp->link = PHY_DOWN;
1219	mdp->speed = 0;
1220	mdp->duplex = -1;
1221
1222	/* Try connect to PHY */
1223	phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
1224				0, mdp->phy_interface);
1225	if (IS_ERR(phydev)) {
1226		dev_err(&ndev->dev, "phy_connect failed\n");
1227		return PTR_ERR(phydev);
1228	}
1229
1230	dev_info(&ndev->dev, "attached phy %i to driver %s\n",
1231		phydev->addr, phydev->drv->name);
1232
1233	mdp->phydev = phydev;
1234
1235	return 0;
1236}
1237
1238/* PHY control start function */
1239static int sh_eth_phy_start(struct net_device *ndev)
1240{
1241	struct sh_eth_private *mdp = netdev_priv(ndev);
1242	int ret;
1243
1244	ret = sh_eth_phy_init(ndev);
1245	if (ret)
1246		return ret;
1247
1248	/* reset phy - this also wakes it from PDOWN */
1249	phy_write(mdp->phydev, MII_BMCR, BMCR_RESET);
1250	phy_start(mdp->phydev);
1251
1252	return 0;
1253}
1254
1255static int sh_eth_get_settings(struct net_device *ndev,
1256			struct ethtool_cmd *ecmd)
1257{
1258	struct sh_eth_private *mdp = netdev_priv(ndev);
1259	unsigned long flags;
1260	int ret;
1261
1262	spin_lock_irqsave(&mdp->lock, flags);
1263	ret = phy_ethtool_gset(mdp->phydev, ecmd);
1264	spin_unlock_irqrestore(&mdp->lock, flags);
1265
1266	return ret;
1267}
1268
1269static int sh_eth_set_settings(struct net_device *ndev,
1270		struct ethtool_cmd *ecmd)
1271{
1272	struct sh_eth_private *mdp = netdev_priv(ndev);
1273	unsigned long flags;
1274	int ret;
1275
1276	spin_lock_irqsave(&mdp->lock, flags);
1277
1278	/* disable tx and rx */
1279	sh_eth_rcv_snd_disable(ndev);
1280
1281	ret = phy_ethtool_sset(mdp->phydev, ecmd);
1282	if (ret)
1283		goto error_exit;
1284
1285	if (ecmd->duplex == DUPLEX_FULL)
1286		mdp->duplex = 1;
1287	else
1288		mdp->duplex = 0;
1289
1290	if (mdp->cd->set_duplex)
1291		mdp->cd->set_duplex(ndev);
1292
1293error_exit:
1294	mdelay(1);
1295
1296	/* enable tx and rx */
1297	sh_eth_rcv_snd_enable(ndev);
1298
1299	spin_unlock_irqrestore(&mdp->lock, flags);
1300
1301	return ret;
1302}
1303
1304static int sh_eth_nway_reset(struct net_device *ndev)
1305{
1306	struct sh_eth_private *mdp = netdev_priv(ndev);
1307	unsigned long flags;
1308	int ret;
1309
1310	spin_lock_irqsave(&mdp->lock, flags);
1311	ret = phy_start_aneg(mdp->phydev);
1312	spin_unlock_irqrestore(&mdp->lock, flags);
1313
1314	return ret;
1315}
1316
1317static u32 sh_eth_get_msglevel(struct net_device *ndev)
1318{
1319	struct sh_eth_private *mdp = netdev_priv(ndev);
1320	return mdp->msg_enable;
1321}
1322
1323static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
1324{
1325	struct sh_eth_private *mdp = netdev_priv(ndev);
1326	mdp->msg_enable = value;
1327}
1328
1329static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
1330	"rx_current", "tx_current",
1331	"rx_dirty", "tx_dirty",
1332};
1333#define SH_ETH_STATS_LEN  ARRAY_SIZE(sh_eth_gstrings_stats)
1334
1335static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
1336{
1337	switch (sset) {
1338	case ETH_SS_STATS:
1339		return SH_ETH_STATS_LEN;
1340	default:
1341		return -EOPNOTSUPP;
1342	}
1343}
1344
1345static void sh_eth_get_ethtool_stats(struct net_device *ndev,
1346			struct ethtool_stats *stats, u64 *data)
1347{
1348	struct sh_eth_private *mdp = netdev_priv(ndev);
1349	int i = 0;
1350
1351	/* device-specific stats */
1352	data[i++] = mdp->cur_rx;
1353	data[i++] = mdp->cur_tx;
1354	data[i++] = mdp->dirty_rx;
1355	data[i++] = mdp->dirty_tx;
1356}
1357
1358static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1359{
1360	switch (stringset) {
1361	case ETH_SS_STATS:
1362		memcpy(data, *sh_eth_gstrings_stats,
1363					sizeof(sh_eth_gstrings_stats));
1364		break;
1365	}
1366}
1367
1368static struct ethtool_ops sh_eth_ethtool_ops = {
1369	.get_settings	= sh_eth_get_settings,
1370	.set_settings	= sh_eth_set_settings,
1371	.nway_reset		= sh_eth_nway_reset,
1372	.get_msglevel	= sh_eth_get_msglevel,
1373	.set_msglevel	= sh_eth_set_msglevel,
1374	.get_link		= ethtool_op_get_link,
1375	.get_strings	= sh_eth_get_strings,
1376	.get_ethtool_stats  = sh_eth_get_ethtool_stats,
1377	.get_sset_count     = sh_eth_get_sset_count,
1378};
1379
1380/* network device open function */
1381static int sh_eth_open(struct net_device *ndev)
1382{
1383	int ret = 0;
1384	struct sh_eth_private *mdp = netdev_priv(ndev);
1385
1386	pm_runtime_get_sync(&mdp->pdev->dev);
1387
1388	ret = request_irq(ndev->irq, sh_eth_interrupt,
1389#if defined(CONFIG_CPU_SUBTYPE_SH7763) || \
1390	defined(CONFIG_CPU_SUBTYPE_SH7764) || \
1391	defined(CONFIG_CPU_SUBTYPE_SH7757)
1392				IRQF_SHARED,
1393#else
1394				0,
1395#endif
1396				ndev->name, ndev);
1397	if (ret) {
1398		dev_err(&ndev->dev, "Can not assign IRQ number\n");
1399		return ret;
1400	}
1401
1402	/* Descriptor set */
1403	ret = sh_eth_ring_init(ndev);
1404	if (ret)
1405		goto out_free_irq;
1406
1407	/* device init */
1408	ret = sh_eth_dev_init(ndev);
1409	if (ret)
1410		goto out_free_irq;
1411
1412	/* PHY control start*/
1413	ret = sh_eth_phy_start(ndev);
1414	if (ret)
1415		goto out_free_irq;
1416
1417	/* Set the timer to check for link beat. */
1418	init_timer(&mdp->timer);
1419	mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
1420	setup_timer(&mdp->timer, sh_eth_timer, (unsigned long)ndev);
1421
1422	return ret;
1423
1424out_free_irq:
1425	free_irq(ndev->irq, ndev);
1426	pm_runtime_put_sync(&mdp->pdev->dev);
1427	return ret;
1428}
1429
1430/* Timeout function */
1431static void sh_eth_tx_timeout(struct net_device *ndev)
1432{
1433	struct sh_eth_private *mdp = netdev_priv(ndev);
1434	struct sh_eth_rxdesc *rxdesc;
1435	int i;
1436
1437	netif_stop_queue(ndev);
1438
1439	if (netif_msg_timer(mdp))
1440		dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x,"
1441	       " resetting...\n", ndev->name, (int)sh_eth_read(ndev, EESR));
1442
1443	/* tx_errors count up */
1444	mdp->stats.tx_errors++;
1445
1446	/* timer off */
1447	del_timer_sync(&mdp->timer);
1448
1449	/* Free all the skbuffs in the Rx queue. */
1450	for (i = 0; i < RX_RING_SIZE; i++) {
1451		rxdesc = &mdp->rx_ring[i];
1452		rxdesc->status = 0;
1453		rxdesc->addr = 0xBADF00D0;
1454		if (mdp->rx_skbuff[i])
1455			dev_kfree_skb(mdp->rx_skbuff[i]);
1456		mdp->rx_skbuff[i] = NULL;
1457	}
1458	for (i = 0; i < TX_RING_SIZE; i++) {
1459		if (mdp->tx_skbuff[i])
1460			dev_kfree_skb(mdp->tx_skbuff[i]);
1461		mdp->tx_skbuff[i] = NULL;
1462	}
1463
1464	/* device init */
1465	sh_eth_dev_init(ndev);
1466
1467	/* timer on */
1468	mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
1469	add_timer(&mdp->timer);
1470}
1471
1472/* Packet transmit function */
1473static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1474{
1475	struct sh_eth_private *mdp = netdev_priv(ndev);
1476	struct sh_eth_txdesc *txdesc;
1477	u32 entry;
1478	unsigned long flags;
1479
1480	spin_lock_irqsave(&mdp->lock, flags);
1481	if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
1482		if (!sh_eth_txfree(ndev)) {
1483			if (netif_msg_tx_queued(mdp))
1484				dev_warn(&ndev->dev, "TxFD exhausted.\n");
1485			netif_stop_queue(ndev);
1486			spin_unlock_irqrestore(&mdp->lock, flags);
1487			return NETDEV_TX_BUSY;
1488		}
1489	}
1490	spin_unlock_irqrestore(&mdp->lock, flags);
1491
1492	entry = mdp->cur_tx % TX_RING_SIZE;
1493	mdp->tx_skbuff[entry] = skb;
1494	txdesc = &mdp->tx_ring[entry];
1495	/* soft swap. */
1496	if (!mdp->cd->hw_swap)
1497		sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)),
1498				 skb->len + 2);
1499	txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
1500				      DMA_TO_DEVICE);
1501	if (skb->len < ETHERSMALL)
1502		txdesc->buffer_length = ETHERSMALL;
1503	else
1504		txdesc->buffer_length = skb->len;
1505
1506	if (entry >= TX_RING_SIZE - 1)
1507		txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
1508	else
1509		txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
1510
1511	mdp->cur_tx++;
1512
1513	if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp)))
1514		sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
1515
1516	return NETDEV_TX_OK;
1517}
1518
1519/* device close function */
1520static int sh_eth_close(struct net_device *ndev)
1521{
1522	struct sh_eth_private *mdp = netdev_priv(ndev);
1523	int ringsize;
1524
1525	netif_stop_queue(ndev);
1526
1527	/* Disable interrupts by clearing the interrupt mask. */
1528	sh_eth_write(ndev, 0x0000, EESIPR);
1529
1530	/* Stop the chip's Tx and Rx processes. */
1531	sh_eth_write(ndev, 0, EDTRR);
1532	sh_eth_write(ndev, 0, EDRRR);
1533
1534	/* PHY Disconnect */
1535	if (mdp->phydev) {
1536		phy_stop(mdp->phydev);
1537		phy_disconnect(mdp->phydev);
1538	}
1539
1540	free_irq(ndev->irq, ndev);
1541
1542	del_timer_sync(&mdp->timer);
1543
1544	/* Free all the skbuffs in the Rx queue. */
1545	sh_eth_ring_free(ndev);
1546
1547	/* free DMA buffer */
1548	ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
1549	dma_free_coherent(NULL, ringsize, mdp->rx_ring, mdp->rx_desc_dma);
1550
1551	/* free DMA buffer */
1552	ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
1553	dma_free_coherent(NULL, ringsize, mdp->tx_ring, mdp->tx_desc_dma);
1554
1555	pm_runtime_put_sync(&mdp->pdev->dev);
1556
1557	return 0;
1558}
1559
1560static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
1561{
1562	struct sh_eth_private *mdp = netdev_priv(ndev);
1563
1564	pm_runtime_get_sync(&mdp->pdev->dev);
1565
1566	mdp->stats.tx_dropped += sh_eth_read(ndev, TROCR);
1567	sh_eth_write(ndev, 0, TROCR);	/* (write clear) */
1568	mdp->stats.collisions += sh_eth_read(ndev, CDCR);
1569	sh_eth_write(ndev, 0, CDCR);	/* (write clear) */
1570	mdp->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
1571	sh_eth_write(ndev, 0, LCCR);	/* (write clear) */
1572	if (sh_eth_is_gether(mdp)) {
1573		mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
1574		sh_eth_write(ndev, 0, CERCR);	/* (write clear) */
1575		mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
1576		sh_eth_write(ndev, 0, CEECR);	/* (write clear) */
1577	} else {
1578		mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
1579		sh_eth_write(ndev, 0, CNDCR);	/* (write clear) */
1580	}
1581	pm_runtime_put_sync(&mdp->pdev->dev);
1582
1583	return &mdp->stats;
1584}
1585
1586/* ioctl to device funciotn*/
1587static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq,
1588				int cmd)
1589{
1590	struct sh_eth_private *mdp = netdev_priv(ndev);
1591	struct phy_device *phydev = mdp->phydev;
1592
1593	if (!netif_running(ndev))
1594		return -EINVAL;
1595
1596	if (!phydev)
1597		return -ENODEV;
1598
1599	return phy_mii_ioctl(phydev, rq, cmd);
1600}
1601
1602#if defined(SH_ETH_HAS_TSU)
1603/* Multicast reception directions set */
1604static void sh_eth_set_multicast_list(struct net_device *ndev)
1605{
1606	if (ndev->flags & IFF_PROMISC) {
1607		/* Set promiscuous. */
1608		sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_MCT) |
1609				ECMR_PRM, ECMR);
1610	} else {
1611		/* Normal, unicast/broadcast-only mode. */
1612		sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) |
1613				ECMR_MCT, ECMR);
1614	}
1615}
1616#endif /* SH_ETH_HAS_TSU */
1617
1618/* SuperH's TSU register init function */
1619static void sh_eth_tsu_init(struct sh_eth_private *mdp)
1620{
1621	sh_eth_tsu_write(mdp, 0, TSU_FWEN0);	/* Disable forward(0->1) */
1622	sh_eth_tsu_write(mdp, 0, TSU_FWEN1);	/* Disable forward(1->0) */
1623	sh_eth_tsu_write(mdp, 0, TSU_FCM);	/* forward fifo 3k-3k */
1624	sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
1625	sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
1626	sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
1627	sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
1628	sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
1629	sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
1630	sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
1631	if (sh_eth_is_gether(mdp)) {
1632		sh_eth_tsu_write(mdp, 0, TSU_QTAG0);	/* Disable QTAG(0->1) */
1633		sh_eth_tsu_write(mdp, 0, TSU_QTAG1);	/* Disable QTAG(1->0) */
1634	} else {
1635		sh_eth_tsu_write(mdp, 0, TSU_QTAGM0);	/* Disable QTAG(0->1) */
1636		sh_eth_tsu_write(mdp, 0, TSU_QTAGM1);	/* Disable QTAG(1->0) */
1637	}
1638	sh_eth_tsu_write(mdp, 0, TSU_FWSR);	/* all interrupt status clear */
1639	sh_eth_tsu_write(mdp, 0, TSU_FWINMK);	/* Disable all interrupt */
1640	sh_eth_tsu_write(mdp, 0, TSU_TEN);	/* Disable all CAM entry */
1641	sh_eth_tsu_write(mdp, 0, TSU_POST1);	/* Disable CAM entry [ 0- 7] */
1642	sh_eth_tsu_write(mdp, 0, TSU_POST2);	/* Disable CAM entry [ 8-15] */
1643	sh_eth_tsu_write(mdp, 0, TSU_POST3);	/* Disable CAM entry [16-23] */
1644	sh_eth_tsu_write(mdp, 0, TSU_POST4);	/* Disable CAM entry [24-31] */
1645}
1646
1647/* MDIO bus release function */
1648static int sh_mdio_release(struct net_device *ndev)
1649{
1650	struct mii_bus *bus = dev_get_drvdata(&ndev->dev);
1651
1652	/* unregister mdio bus */
1653	mdiobus_unregister(bus);
1654
1655	/* remove mdio bus info from net_device */
1656	dev_set_drvdata(&ndev->dev, NULL);
1657
1658	/* free interrupts memory */
1659	kfree(bus->irq);
1660
1661	/* free bitbang info */
1662	free_mdio_bitbang(bus);
1663
1664	return 0;
1665}
1666
1667/* MDIO bus init function */
1668static int sh_mdio_init(struct net_device *ndev, int id,
1669			struct sh_eth_plat_data *pd)
1670{
1671	int ret, i;
1672	struct bb_info *bitbang;
1673	struct sh_eth_private *mdp = netdev_priv(ndev);
1674
1675	/* create bit control struct for PHY */
1676	bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL);
1677	if (!bitbang) {
1678		ret = -ENOMEM;
1679		goto out;
1680	}
1681
1682	/* bitbang init */
1683	bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
1684	bitbang->set_gate = pd->set_mdio_gate;
1685	bitbang->mdi_msk = 0x08;
1686	bitbang->mdo_msk = 0x04;
1687	bitbang->mmd_msk = 0x02;/* MMD */
1688	bitbang->mdc_msk = 0x01;
1689	bitbang->ctrl.ops = &bb_ops;
1690
1691	/* MII controller setting */
1692	mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
1693	if (!mdp->mii_bus) {
1694		ret = -ENOMEM;
1695		goto out_free_bitbang;
1696	}
1697
1698	/* Hook up MII support for ethtool */
1699	mdp->mii_bus->name = "sh_mii";
1700	mdp->mii_bus->parent = &ndev->dev;
1701	snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%x", id);
1702
1703	/* PHY IRQ */
1704	mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
1705	if (!mdp->mii_bus->irq) {
1706		ret = -ENOMEM;
1707		goto out_free_bus;
1708	}
1709
1710	for (i = 0; i < PHY_MAX_ADDR; i++)
1711		mdp->mii_bus->irq[i] = PHY_POLL;
1712
1713	/* regist mdio bus */
1714	ret = mdiobus_register(mdp->mii_bus);
1715	if (ret)
1716		goto out_free_irq;
1717
1718	dev_set_drvdata(&ndev->dev, mdp->mii_bus);
1719
1720	return 0;
1721
1722out_free_irq:
1723	kfree(mdp->mii_bus->irq);
1724
1725out_free_bus:
1726	free_mdio_bitbang(mdp->mii_bus);
1727
1728out_free_bitbang:
1729	kfree(bitbang);
1730
1731out:
1732	return ret;
1733}
1734
1735static const u16 *sh_eth_get_register_offset(int register_type)
1736{
1737	const u16 *reg_offset = NULL;
1738
1739	switch (register_type) {
1740	case SH_ETH_REG_GIGABIT:
1741		reg_offset = sh_eth_offset_gigabit;
1742		break;
1743	case SH_ETH_REG_FAST_SH4:
1744		reg_offset = sh_eth_offset_fast_sh4;
1745		break;
1746	case SH_ETH_REG_FAST_SH3_SH2:
1747		reg_offset = sh_eth_offset_fast_sh3_sh2;
1748		break;
1749	default:
1750		printk(KERN_ERR "Unknown register type (%d)\n", register_type);
1751		break;
1752	}
1753
1754	return reg_offset;
1755}
1756
1757static const struct net_device_ops sh_eth_netdev_ops = {
1758	.ndo_open		= sh_eth_open,
1759	.ndo_stop		= sh_eth_close,
1760	.ndo_start_xmit		= sh_eth_start_xmit,
1761	.ndo_get_stats		= sh_eth_get_stats,
1762#if defined(SH_ETH_HAS_TSU)
1763	.ndo_set_rx_mode	= sh_eth_set_multicast_list,
1764#endif
1765	.ndo_tx_timeout		= sh_eth_tx_timeout,
1766	.ndo_do_ioctl		= sh_eth_do_ioctl,
1767	.ndo_validate_addr	= eth_validate_addr,
1768	.ndo_set_mac_address	= eth_mac_addr,
1769	.ndo_change_mtu		= eth_change_mtu,
1770};
1771
1772static int sh_eth_drv_probe(struct platform_device *pdev)
1773{
1774	int ret, devno = 0;
1775	struct resource *res;
1776	struct net_device *ndev = NULL;
1777	struct sh_eth_private *mdp = NULL;
1778	struct sh_eth_plat_data *pd;
1779
1780	/* get base addr */
1781	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1782	if (unlikely(res == NULL)) {
1783		dev_err(&pdev->dev, "invalid resource\n");
1784		ret = -EINVAL;
1785		goto out;
1786	}
1787
1788	ndev = alloc_etherdev(sizeof(struct sh_eth_private));
1789	if (!ndev) {
1790		dev_err(&pdev->dev, "Could not allocate device.\n");
1791		ret = -ENOMEM;
1792		goto out;
1793	}
1794
1795	/* The sh Ether-specific entries in the device structure. */
1796	ndev->base_addr = res->start;
1797	devno = pdev->id;
1798	if (devno < 0)
1799		devno = 0;
1800
1801	ndev->dma = -1;
1802	ret = platform_get_irq(pdev, 0);
1803	if (ret < 0) {
1804		ret = -ENODEV;
1805		goto out_release;
1806	}
1807	ndev->irq = ret;
1808
1809	SET_NETDEV_DEV(ndev, &pdev->dev);
1810
1811	/* Fill in the fields of the device structure with ethernet values. */
1812	ether_setup(ndev);
1813
1814	mdp = netdev_priv(ndev);
1815	mdp->addr = ioremap(res->start, resource_size(res));
1816	if (mdp->addr == NULL) {
1817		ret = -ENOMEM;
1818		dev_err(&pdev->dev, "ioremap failed.\n");
1819		goto out_release;
1820	}
1821
1822	spin_lock_init(&mdp->lock);
1823	mdp->pdev = pdev;
1824	pm_runtime_enable(&pdev->dev);
1825	pm_runtime_resume(&pdev->dev);
1826
1827	pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data);
1828	/* get PHY ID */
1829	mdp->phy_id = pd->phy;
1830	mdp->phy_interface = pd->phy_interface;
1831	/* EDMAC endian */
1832	mdp->edmac_endian = pd->edmac_endian;
1833	mdp->no_ether_link = pd->no_ether_link;
1834	mdp->ether_link_active_low = pd->ether_link_active_low;
1835	mdp->reg_offset = sh_eth_get_register_offset(pd->register_type);
1836
1837	/* set cpu data */
1838#if defined(SH_ETH_HAS_BOTH_MODULES)
1839	mdp->cd = sh_eth_get_cpu_data(mdp);
1840#else
1841	mdp->cd = &sh_eth_my_cpu_data;
1842#endif
1843	sh_eth_set_default_cpu_data(mdp->cd);
1844
1845	/* set function */
1846	ndev->netdev_ops = &sh_eth_netdev_ops;
1847	SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops);
1848	ndev->watchdog_timeo = TX_TIMEOUT;
1849
1850	/* debug message level */
1851	mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
1852	mdp->post_rx = POST_RX >> (devno << 1);
1853	mdp->post_fw = POST_FW >> (devno << 1);
1854
1855	/* read and set MAC address */
1856	read_mac_address(ndev, pd->mac_addr);
1857
1858	/* First device only init */
1859	if (!devno) {
1860		if (mdp->cd->tsu) {
1861			struct resource *rtsu;
1862			rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1863			if (!rtsu) {
1864				dev_err(&pdev->dev, "Not found TSU resource\n");
1865				goto out_release;
1866			}
1867			mdp->tsu_addr = ioremap(rtsu->start,
1868						resource_size(rtsu));
1869		}
1870		if (mdp->cd->chip_reset)
1871			mdp->cd->chip_reset(ndev);
1872
1873		if (mdp->cd->tsu) {
1874			/* TSU init (Init only)*/
1875			sh_eth_tsu_init(mdp);
1876		}
1877	}
1878
1879	/* network device register */
1880	ret = register_netdev(ndev);
1881	if (ret)
1882		goto out_release;
1883
1884	/* mdio bus init */
1885	ret = sh_mdio_init(ndev, pdev->id, pd);
1886	if (ret)
1887		goto out_unregister;
1888
1889	/* print device information */
1890	pr_info("Base address at 0x%x, %pM, IRQ %d.\n",
1891	       (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
1892
1893	platform_set_drvdata(pdev, ndev);
1894
1895	return ret;
1896
1897out_unregister:
1898	unregister_netdev(ndev);
1899
1900out_release:
1901	/* net_dev free */
1902	if (mdp && mdp->addr)
1903		iounmap(mdp->addr);
1904	if (mdp && mdp->tsu_addr)
1905		iounmap(mdp->tsu_addr);
1906	if (ndev)
1907		free_netdev(ndev);
1908
1909out:
1910	return ret;
1911}
1912
1913static int sh_eth_drv_remove(struct platform_device *pdev)
1914{
1915	struct net_device *ndev = platform_get_drvdata(pdev);
1916	struct sh_eth_private *mdp = netdev_priv(ndev);
1917
1918	iounmap(mdp->tsu_addr);
1919	sh_mdio_release(ndev);
1920	unregister_netdev(ndev);
1921	pm_runtime_disable(&pdev->dev);
1922	iounmap(mdp->addr);
1923	free_netdev(ndev);
1924	platform_set_drvdata(pdev, NULL);
1925
1926	return 0;
1927}
1928
1929static int sh_eth_runtime_nop(struct device *dev)
1930{
1931	/*
1932	 * Runtime PM callback shared between ->runtime_suspend()
1933	 * and ->runtime_resume(). Simply returns success.
1934	 *
1935	 * This driver re-initializes all registers after
1936	 * pm_runtime_get_sync() anyway so there is no need
1937	 * to save and restore registers here.
1938	 */
1939	return 0;
1940}
1941
1942static struct dev_pm_ops sh_eth_dev_pm_ops = {
1943	.runtime_suspend = sh_eth_runtime_nop,
1944	.runtime_resume = sh_eth_runtime_nop,
1945};
1946
1947static struct platform_driver sh_eth_driver = {
1948	.probe = sh_eth_drv_probe,
1949	.remove = sh_eth_drv_remove,
1950	.driver = {
1951		   .name = CARDNAME,
1952		   .pm = &sh_eth_dev_pm_ops,
1953	},
1954};
1955
1956static int __init sh_eth_init(void)
1957{
1958	return platform_driver_register(&sh_eth_driver);
1959}
1960
1961static void __exit sh_eth_cleanup(void)
1962{
1963	platform_driver_unregister(&sh_eth_driver);
1964}
1965
1966module_init(sh_eth_init);
1967module_exit(sh_eth_cleanup);
1968
1969MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
1970MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
1971MODULE_LICENSE("GPL v2");
1972