1/*  Silan SC92031 PCI Fast Ethernet Adapter driver
2 *
3 *  Based on vendor drivers:
4 *  Silan Fast Ethernet Netcard Driver:
5 *    MODULE_AUTHOR ("gaoyonghong");
6 *    MODULE_DESCRIPTION ("SILAN Fast Ethernet driver");
7 *    MODULE_LICENSE("GPL");
8 *  8139D Fast Ethernet driver:
9 *    (C) 2002 by gaoyonghong
10 *    MODULE_AUTHOR ("gaoyonghong");
11 *    MODULE_DESCRIPTION ("Rsltek 8139D PCI Fast Ethernet Adapter driver");
12 *    MODULE_LICENSE("GPL");
13 *  Both are almost identical and seem to be based on pci-skeleton.c
14 *
15 *  Rewritten for 2.6 by Cesar Eduardo Barros
16 *
17 *  A datasheet for this chip can be found at
18 *  http://www.silan.com.cn/english/product/pdf/SC92031AY.pdf
19 */
20
21/* Note about set_mac_address: I don't know how to change the hardware
22 * matching, so you need to enable IFF_PROMISC when using it.
23 */
24
25#include <linux/interrupt.h>
26#include <linux/module.h>
27#include <linux/kernel.h>
28#include <linux/delay.h>
29#include <linux/pci.h>
30#include <linux/dma-mapping.h>
31#include <linux/netdevice.h>
32#include <linux/etherdevice.h>
33#include <linux/ethtool.h>
34#include <linux/mii.h>
35#include <linux/crc32.h>
36
37#include <asm/irq.h>
38
39#define SC92031_NAME "sc92031"
40
41/* BAR 0 is MMIO, BAR 1 is PIO */
42#ifndef SC92031_USE_BAR
43#define SC92031_USE_BAR 0
44#endif
45
46/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */
47static int multicast_filter_limit = 64;
48module_param(multicast_filter_limit, int, 0);
49MODULE_PARM_DESC(multicast_filter_limit,
50	"Maximum number of filtered multicast addresses");
51
52static int media;
53module_param(media, int, 0);
54MODULE_PARM_DESC(media, "Media type (0x00 = autodetect,"
55	" 0x01 = 10M half, 0x02 = 10M full,"
56	" 0x04 = 100M half, 0x08 = 100M full)");
57
58/* Size of the in-memory receive ring. */
59#define  RX_BUF_LEN_IDX  3 /* 0==8K, 1==16K, 2==32K, 3==64K ,4==128K*/
60#define  RX_BUF_LEN	(8192 << RX_BUF_LEN_IDX)
61
62/* Number of Tx descriptor registers. */
63#define  NUM_TX_DESC	   4
64
65/* max supported ethernet frame size -- must be at least (dev->mtu+14+4).*/
66#define  MAX_ETH_FRAME_SIZE	  1536
67
68/* Size of the Tx bounce buffers -- must be at least (dev->mtu+14+4). */
69#define  TX_BUF_SIZE       MAX_ETH_FRAME_SIZE
70#define  TX_BUF_TOT_LEN    (TX_BUF_SIZE * NUM_TX_DESC)
71
72/* The following settings are log_2(bytes)-4:  0 == 16 bytes .. 6==1024, 7==end of packet. */
73#define  RX_FIFO_THRESH    7     /* Rx buffer level before first PCI xfer.  */
74
75/* Time in jiffies before concluding the transmitter is hung. */
76#define  TX_TIMEOUT     (4*HZ)
77
78#define  SILAN_STATS_NUM    2    /* number of ETHTOOL_GSTATS */
79
80/* media options */
81#define  AUTOSELECT    0x00
82#define  M10_HALF      0x01
83#define  M10_FULL      0x02
84#define  M100_HALF     0x04
85#define  M100_FULL     0x08
86
87 /* Symbolic offsets to registers. */
88enum  silan_registers {
89   Config0    = 0x00,         // Config0
90   Config1    = 0x04,         // Config1
91   RxBufWPtr  = 0x08,         // Rx buffer writer poiter
92   IntrStatus = 0x0C,         // Interrupt status
93   IntrMask   = 0x10,         // Interrupt mask
94   RxbufAddr  = 0x14,         // Rx buffer start address
95   RxBufRPtr  = 0x18,         // Rx buffer read pointer
96   Txstatusall = 0x1C,        // Transmit status of all descriptors
97   TxStatus0  = 0x20,	      // Transmit status (Four 32bit registers).
98   TxAddr0    = 0x30,         // Tx descriptors (also four 32bit).
99   RxConfig   = 0x40,         // Rx configuration
100   MAC0	      = 0x44,	      // Ethernet hardware address.
101   MAR0	      = 0x4C,	      // Multicast filter.
102   RxStatus0  = 0x54,         // Rx status
103   TxConfig   = 0x5C,         // Tx configuration
104   PhyCtrl    = 0x60,         // physical control
105   FlowCtrlConfig = 0x64,     // flow control
106   Miicmd0    = 0x68,         // Mii command0 register
107   Miicmd1    = 0x6C,         // Mii command1 register
108   Miistatus  = 0x70,         // Mii status register
109   Timercnt   = 0x74,         // Timer counter register
110   TimerIntr  = 0x78,         // Timer interrupt register
111   PMConfig   = 0x7C,         // Power Manager configuration
112   CRC0       = 0x80,         // Power Manager CRC ( Two 32bit regisers)
113   Wakeup0    = 0x88,         // power Manager wakeup( Eight 64bit regiser)
114   LSBCRC0    = 0xC8,         // power Manager LSBCRC(Two 32bit regiser)
115   TestD0     = 0xD0,
116   TestD4     = 0xD4,
117   TestD8     = 0xD8,
118};
119
120#define MII_JAB             16
121#define MII_OutputStatus    24
122
123#define PHY_16_JAB_ENB      0x1000
124#define PHY_16_PORT_ENB     0x1
125
126enum IntrStatusBits {
127   LinkFail       = 0x80000000,
128   LinkOK         = 0x40000000,
129   TimeOut        = 0x20000000,
130   RxOverflow     = 0x0040,
131   RxOK           = 0x0020,
132   TxOK           = 0x0001,
133   IntrBits = LinkFail|LinkOK|TimeOut|RxOverflow|RxOK|TxOK,
134};
135
136enum TxStatusBits {
137   TxCarrierLost = 0x20000000,
138   TxAborted     = 0x10000000,
139   TxOutOfWindow = 0x08000000,
140   TxNccShift    = 22,
141   EarlyTxThresShift = 16,
142   TxStatOK      = 0x8000,
143   TxUnderrun    = 0x4000,
144   TxOwn         = 0x2000,
145};
146
147enum RxStatusBits {
148   RxStatesOK   = 0x80000,
149   RxBadAlign   = 0x40000,
150   RxHugeFrame  = 0x20000,
151   RxSmallFrame = 0x10000,
152   RxCRCOK      = 0x8000,
153   RxCrlFrame   = 0x4000,
154   Rx_Broadcast = 0x2000,
155   Rx_Multicast = 0x1000,
156   RxAddrMatch  = 0x0800,
157   MiiErr       = 0x0400,
158};
159
160enum RxConfigBits {
161   RxFullDx    = 0x80000000,
162   RxEnb       = 0x40000000,
163   RxSmall     = 0x20000000,
164   RxHuge      = 0x10000000,
165   RxErr       = 0x08000000,
166   RxAllphys   = 0x04000000,
167   RxMulticast = 0x02000000,
168   RxBroadcast = 0x01000000,
169   RxLoopBack  = (1 << 23) | (1 << 22),
170   LowThresholdShift  = 12,
171   HighThresholdShift = 2,
172};
173
174enum TxConfigBits {
175   TxFullDx       = 0x80000000,
176   TxEnb          = 0x40000000,
177   TxEnbPad       = 0x20000000,
178   TxEnbHuge      = 0x10000000,
179   TxEnbFCS       = 0x08000000,
180   TxNoBackOff    = 0x04000000,
181   TxEnbPrem      = 0x02000000,
182   TxCareLostCrs  = 0x1000000,
183   TxExdCollNum   = 0xf00000,
184   TxDataRate     = 0x80000,
185};
186
187enum PhyCtrlconfigbits {
188   PhyCtrlAne         = 0x80000000,
189   PhyCtrlSpd100      = 0x40000000,
190   PhyCtrlSpd10       = 0x20000000,
191   PhyCtrlPhyBaseAddr = 0x1f000000,
192   PhyCtrlDux         = 0x800000,
193   PhyCtrlReset       = 0x400000,
194};
195
196enum FlowCtrlConfigBits {
197   FlowCtrlFullDX = 0x80000000,
198   FlowCtrlEnb    = 0x40000000,
199};
200
201enum Config0Bits {
202   Cfg0_Reset  = 0x80000000,
203   Cfg0_Anaoff = 0x40000000,
204   Cfg0_LDPS   = 0x20000000,
205};
206
207enum Config1Bits {
208   Cfg1_EarlyRx = 1 << 31,
209   Cfg1_EarlyTx = 1 << 30,
210
211   //rx buffer size
212   Cfg1_Rcv8K   = 0x0,
213   Cfg1_Rcv16K  = 0x1,
214   Cfg1_Rcv32K  = 0x3,
215   Cfg1_Rcv64K  = 0x7,
216   Cfg1_Rcv128K = 0xf,
217};
218
219enum MiiCmd0Bits {
220   Mii_Divider = 0x20000000,
221   Mii_WRITE   = 0x400000,
222   Mii_READ    = 0x200000,
223   Mii_SCAN    = 0x100000,
224   Mii_Tamod   = 0x80000,
225   Mii_Drvmod  = 0x40000,
226   Mii_mdc     = 0x20000,
227   Mii_mdoen   = 0x10000,
228   Mii_mdo     = 0x8000,
229   Mii_mdi     = 0x4000,
230};
231
232enum MiiStatusBits {
233    Mii_StatusBusy = 0x80000000,
234};
235
236enum PMConfigBits {
237   PM_Enable  = 1 << 31,
238   PM_LongWF  = 1 << 30,
239   PM_Magic   = 1 << 29,
240   PM_LANWake = 1 << 28,
241   PM_LWPTN   = (1 << 27 | 1<< 26),
242   PM_LinkUp  = 1 << 25,
243   PM_WakeUp  = 1 << 24,
244};
245
246/* Locking rules:
247 * priv->lock protects most of the fields of priv and most of the
248 * hardware registers. It does not have to protect against softirqs
249 * between sc92031_disable_interrupts and sc92031_enable_interrupts;
250 * it also does not need to be used in ->open and ->stop while the
251 * device interrupts are off.
252 * Not having to protect against softirqs is very useful due to heavy
253 * use of mdelay() at _sc92031_reset.
254 * Functions prefixed with _sc92031_ must be called with the lock held;
255 * functions prefixed with sc92031_ must be called without the lock held.
256 * Use mmiowb() before unlocking if the hardware was written to.
257 */
258
259/* Locking rules for the interrupt:
260 * - the interrupt and the tasklet never run at the same time
261 * - neither run between sc92031_disable_interrupts and
262 *   sc92031_enable_interrupt
263 */
264
265struct sc92031_priv {
266	spinlock_t		lock;
267	/* iomap.h cookie */
268	void __iomem		*port_base;
269	/* pci device structure */
270	struct pci_dev		*pdev;
271	/* tasklet */
272	struct tasklet_struct	tasklet;
273
274	/* CPU address of rx ring */
275	void			*rx_ring;
276	/* PCI address of rx ring */
277	dma_addr_t		rx_ring_dma_addr;
278	/* PCI address of rx ring read pointer */
279	dma_addr_t		rx_ring_tail;
280
281	/* tx ring write index */
282	unsigned		tx_head;
283	/* tx ring read index */
284	unsigned		tx_tail;
285	/* CPU address of tx bounce buffer */
286	void			*tx_bufs;
287	/* PCI address of tx bounce buffer */
288	dma_addr_t		tx_bufs_dma_addr;
289
290	/* copies of some hardware registers */
291	u32			intr_status;
292	atomic_t		intr_mask;
293	u32			rx_config;
294	u32			tx_config;
295	u32			pm_config;
296
297	/* copy of some flags from dev->flags */
298	unsigned int		mc_flags;
299
300	/* for ETHTOOL_GSTATS */
301	u64			tx_timeouts;
302	u64			rx_loss;
303
304	/* for dev->get_stats */
305	long			rx_value;
306};
307
308/* I don't know which registers can be safely read; however, I can guess
309 * MAC0 is one of them. */
310static inline void _sc92031_dummy_read(void __iomem *port_base)
311{
312	ioread32(port_base + MAC0);
313}
314
315static u32 _sc92031_mii_wait(void __iomem *port_base)
316{
317	u32 mii_status;
318
319	do {
320		udelay(10);
321		mii_status = ioread32(port_base + Miistatus);
322	} while (mii_status & Mii_StatusBusy);
323
324	return mii_status;
325}
326
327static u32 _sc92031_mii_cmd(void __iomem *port_base, u32 cmd0, u32 cmd1)
328{
329	iowrite32(Mii_Divider, port_base + Miicmd0);
330
331	_sc92031_mii_wait(port_base);
332
333	iowrite32(cmd1, port_base + Miicmd1);
334	iowrite32(Mii_Divider | cmd0, port_base + Miicmd0);
335
336	return _sc92031_mii_wait(port_base);
337}
338
339static void _sc92031_mii_scan(void __iomem *port_base)
340{
341	_sc92031_mii_cmd(port_base, Mii_SCAN, 0x1 << 6);
342}
343
344static u16 _sc92031_mii_read(void __iomem *port_base, unsigned reg)
345{
346	return _sc92031_mii_cmd(port_base, Mii_READ, reg << 6) >> 13;
347}
348
349static void _sc92031_mii_write(void __iomem *port_base, unsigned reg, u16 val)
350{
351	_sc92031_mii_cmd(port_base, Mii_WRITE, (reg << 6) | ((u32)val << 11));
352}
353
354static void sc92031_disable_interrupts(struct net_device *dev)
355{
356	struct sc92031_priv *priv = netdev_priv(dev);
357	void __iomem *port_base = priv->port_base;
358
359	/* tell the tasklet/interrupt not to enable interrupts */
360	atomic_set(&priv->intr_mask, 0);
361	wmb();
362
363	/* stop interrupts */
364	iowrite32(0, port_base + IntrMask);
365	_sc92031_dummy_read(port_base);
366	mmiowb();
367
368	/* wait for any concurrent interrupt/tasklet to finish */
369	synchronize_irq(dev->irq);
370	tasklet_disable(&priv->tasklet);
371}
372
373static void sc92031_enable_interrupts(struct net_device *dev)
374{
375	struct sc92031_priv *priv = netdev_priv(dev);
376	void __iomem *port_base = priv->port_base;
377
378	tasklet_enable(&priv->tasklet);
379
380	atomic_set(&priv->intr_mask, IntrBits);
381	wmb();
382
383	iowrite32(IntrBits, port_base + IntrMask);
384	mmiowb();
385}
386
387static void _sc92031_disable_tx_rx(struct net_device *dev)
388{
389	struct sc92031_priv *priv = netdev_priv(dev);
390	void __iomem *port_base = priv->port_base;
391
392	priv->rx_config &= ~RxEnb;
393	priv->tx_config &= ~TxEnb;
394	iowrite32(priv->rx_config, port_base + RxConfig);
395	iowrite32(priv->tx_config, port_base + TxConfig);
396}
397
398static void _sc92031_enable_tx_rx(struct net_device *dev)
399{
400	struct sc92031_priv *priv = netdev_priv(dev);
401	void __iomem *port_base = priv->port_base;
402
403	priv->rx_config |= RxEnb;
404	priv->tx_config |= TxEnb;
405	iowrite32(priv->rx_config, port_base + RxConfig);
406	iowrite32(priv->tx_config, port_base + TxConfig);
407}
408
409static void _sc92031_tx_clear(struct net_device *dev)
410{
411	struct sc92031_priv *priv = netdev_priv(dev);
412
413	while (priv->tx_head - priv->tx_tail > 0) {
414		priv->tx_tail++;
415		dev->stats.tx_dropped++;
416	}
417	priv->tx_head = priv->tx_tail = 0;
418}
419
420static void _sc92031_set_mar(struct net_device *dev)
421{
422	struct sc92031_priv *priv = netdev_priv(dev);
423	void __iomem *port_base = priv->port_base;
424	u32 mar0 = 0, mar1 = 0;
425
426	if ((dev->flags & IFF_PROMISC) ||
427	    netdev_mc_count(dev) > multicast_filter_limit ||
428	    (dev->flags & IFF_ALLMULTI))
429		mar0 = mar1 = 0xffffffff;
430	else if (dev->flags & IFF_MULTICAST) {
431		struct netdev_hw_addr *ha;
432
433		netdev_for_each_mc_addr(ha, dev) {
434			u32 crc;
435			unsigned bit = 0;
436
437			crc = ~ether_crc(ETH_ALEN, ha->addr);
438			crc >>= 24;
439
440			if (crc & 0x01)	bit |= 0x02;
441			if (crc & 0x02)	bit |= 0x01;
442			if (crc & 0x10)	bit |= 0x20;
443			if (crc & 0x20)	bit |= 0x10;
444			if (crc & 0x40)	bit |= 0x08;
445			if (crc & 0x80)	bit |= 0x04;
446
447			if (bit > 31)
448				mar0 |= 0x1 << (bit - 32);
449			else
450				mar1 |= 0x1 << bit;
451		}
452	}
453
454	iowrite32(mar0, port_base + MAR0);
455	iowrite32(mar1, port_base + MAR0 + 4);
456}
457
458static void _sc92031_set_rx_config(struct net_device *dev)
459{
460	struct sc92031_priv *priv = netdev_priv(dev);
461	void __iomem *port_base = priv->port_base;
462	unsigned int old_mc_flags;
463	u32 rx_config_bits = 0;
464
465	old_mc_flags = priv->mc_flags;
466
467	if (dev->flags & IFF_PROMISC)
468		rx_config_bits |= RxSmall | RxHuge | RxErr | RxBroadcast
469				| RxMulticast | RxAllphys;
470
471	if (dev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
472		rx_config_bits |= RxMulticast;
473
474	if (dev->flags & IFF_BROADCAST)
475		rx_config_bits |= RxBroadcast;
476
477	priv->rx_config &= ~(RxSmall | RxHuge | RxErr | RxBroadcast
478			| RxMulticast | RxAllphys);
479	priv->rx_config |= rx_config_bits;
480
481	priv->mc_flags = dev->flags & (IFF_PROMISC | IFF_ALLMULTI
482			| IFF_MULTICAST | IFF_BROADCAST);
483
484	if (netif_carrier_ok(dev) && priv->mc_flags != old_mc_flags)
485		iowrite32(priv->rx_config, port_base + RxConfig);
486}
487
488static bool _sc92031_check_media(struct net_device *dev)
489{
490	struct sc92031_priv *priv = netdev_priv(dev);
491	void __iomem *port_base = priv->port_base;
492	u16 bmsr;
493
494	bmsr = _sc92031_mii_read(port_base, MII_BMSR);
495	rmb();
496	if (bmsr & BMSR_LSTATUS) {
497		bool speed_100, duplex_full;
498		u32 flow_ctrl_config = 0;
499		u16 output_status = _sc92031_mii_read(port_base,
500				MII_OutputStatus);
501		_sc92031_mii_scan(port_base);
502
503		speed_100 = output_status & 0x2;
504		duplex_full = output_status & 0x4;
505
506		/* Initial Tx/Rx configuration */
507		priv->rx_config = (0x40 << LowThresholdShift) | (0x1c0 << HighThresholdShift);
508		priv->tx_config = 0x48800000;
509
510		/* NOTE: vendor driver had dead code here to enable tx padding */
511
512		if (!speed_100)
513			priv->tx_config |= 0x80000;
514
515		// configure rx mode
516		_sc92031_set_rx_config(dev);
517
518		if (duplex_full) {
519			priv->rx_config |= RxFullDx;
520			priv->tx_config |= TxFullDx;
521			flow_ctrl_config = FlowCtrlFullDX | FlowCtrlEnb;
522		} else {
523			priv->rx_config &= ~RxFullDx;
524			priv->tx_config &= ~TxFullDx;
525		}
526
527		_sc92031_set_mar(dev);
528		_sc92031_set_rx_config(dev);
529		_sc92031_enable_tx_rx(dev);
530		iowrite32(flow_ctrl_config, port_base + FlowCtrlConfig);
531
532		netif_carrier_on(dev);
533
534		if (printk_ratelimit())
535			printk(KERN_INFO "%s: link up, %sMbps, %s-duplex\n",
536				dev->name,
537				speed_100 ? "100" : "10",
538				duplex_full ? "full" : "half");
539		return true;
540	} else {
541		_sc92031_mii_scan(port_base);
542
543		netif_carrier_off(dev);
544
545		_sc92031_disable_tx_rx(dev);
546
547		if (printk_ratelimit())
548			printk(KERN_INFO "%s: link down\n", dev->name);
549		return false;
550	}
551}
552
553static void _sc92031_phy_reset(struct net_device *dev)
554{
555	struct sc92031_priv *priv = netdev_priv(dev);
556	void __iomem *port_base = priv->port_base;
557	u32 phy_ctrl;
558
559	phy_ctrl = ioread32(port_base + PhyCtrl);
560	phy_ctrl &= ~(PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10);
561	phy_ctrl |= PhyCtrlAne | PhyCtrlReset;
562
563	switch (media) {
564	default:
565	case AUTOSELECT:
566		phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10;
567		break;
568	case M10_HALF:
569		phy_ctrl |= PhyCtrlSpd10;
570		break;
571	case M10_FULL:
572		phy_ctrl |= PhyCtrlDux | PhyCtrlSpd10;
573		break;
574	case M100_HALF:
575		phy_ctrl |= PhyCtrlSpd100;
576		break;
577	case M100_FULL:
578		phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100;
579		break;
580	}
581
582	iowrite32(phy_ctrl, port_base + PhyCtrl);
583	mdelay(10);
584
585	phy_ctrl &= ~PhyCtrlReset;
586	iowrite32(phy_ctrl, port_base + PhyCtrl);
587	mdelay(1);
588
589	_sc92031_mii_write(port_base, MII_JAB,
590			PHY_16_JAB_ENB | PHY_16_PORT_ENB);
591	_sc92031_mii_scan(port_base);
592
593	netif_carrier_off(dev);
594	netif_stop_queue(dev);
595}
596
597static void _sc92031_reset(struct net_device *dev)
598{
599	struct sc92031_priv *priv = netdev_priv(dev);
600	void __iomem *port_base = priv->port_base;
601
602	/* disable PM */
603	iowrite32(0, port_base + PMConfig);
604
605	/* soft reset the chip */
606	iowrite32(Cfg0_Reset, port_base + Config0);
607	mdelay(200);
608
609	iowrite32(0, port_base + Config0);
610	mdelay(10);
611
612	/* disable interrupts */
613	iowrite32(0, port_base + IntrMask);
614
615	/* clear multicast address */
616	iowrite32(0, port_base + MAR0);
617	iowrite32(0, port_base + MAR0 + 4);
618
619	/* init rx ring */
620	iowrite32(priv->rx_ring_dma_addr, port_base + RxbufAddr);
621	priv->rx_ring_tail = priv->rx_ring_dma_addr;
622
623	/* init tx ring */
624	_sc92031_tx_clear(dev);
625
626	/* clear old register values */
627	priv->intr_status = 0;
628	atomic_set(&priv->intr_mask, 0);
629	priv->rx_config = 0;
630	priv->tx_config = 0;
631	priv->mc_flags = 0;
632
633	/* configure rx buffer size */
634	/* NOTE: vendor driver had dead code here to enable early tx/rx */
635	iowrite32(Cfg1_Rcv64K, port_base + Config1);
636
637	_sc92031_phy_reset(dev);
638	_sc92031_check_media(dev);
639
640	/* calculate rx fifo overflow */
641	priv->rx_value = 0;
642
643	/* enable PM */
644	iowrite32(priv->pm_config, port_base + PMConfig);
645
646	/* clear intr register */
647	ioread32(port_base + IntrStatus);
648}
649
650static void _sc92031_tx_tasklet(struct net_device *dev)
651{
652	struct sc92031_priv *priv = netdev_priv(dev);
653	void __iomem *port_base = priv->port_base;
654
655	unsigned old_tx_tail;
656	unsigned entry;
657	u32 tx_status;
658
659	old_tx_tail = priv->tx_tail;
660	while (priv->tx_head - priv->tx_tail > 0) {
661		entry = priv->tx_tail % NUM_TX_DESC;
662		tx_status = ioread32(port_base + TxStatus0 + entry * 4);
663
664		if (!(tx_status & (TxStatOK | TxUnderrun | TxAborted)))
665			break;
666
667		priv->tx_tail++;
668
669		if (tx_status & TxStatOK) {
670			dev->stats.tx_bytes += tx_status & 0x1fff;
671			dev->stats.tx_packets++;
672			/* Note: TxCarrierLost is always asserted at 100mbps. */
673			dev->stats.collisions += (tx_status >> 22) & 0xf;
674		}
675
676		if (tx_status & (TxOutOfWindow | TxAborted)) {
677			dev->stats.tx_errors++;
678
679			if (tx_status & TxAborted)
680				dev->stats.tx_aborted_errors++;
681
682			if (tx_status & TxCarrierLost)
683				dev->stats.tx_carrier_errors++;
684
685			if (tx_status & TxOutOfWindow)
686				dev->stats.tx_window_errors++;
687		}
688
689		if (tx_status & TxUnderrun)
690			dev->stats.tx_fifo_errors++;
691	}
692
693	if (priv->tx_tail != old_tx_tail)
694		if (netif_queue_stopped(dev))
695			netif_wake_queue(dev);
696}
697
698static void _sc92031_rx_tasklet_error(struct net_device *dev,
699				      u32 rx_status, unsigned rx_size)
700{
701	if(rx_size > (MAX_ETH_FRAME_SIZE + 4) || rx_size < 16) {
702		dev->stats.rx_errors++;
703		dev->stats.rx_length_errors++;
704	}
705
706	if (!(rx_status & RxStatesOK)) {
707		dev->stats.rx_errors++;
708
709		if (rx_status & (RxHugeFrame | RxSmallFrame))
710			dev->stats.rx_length_errors++;
711
712		if (rx_status & RxBadAlign)
713			dev->stats.rx_frame_errors++;
714
715		if (!(rx_status & RxCRCOK))
716			dev->stats.rx_crc_errors++;
717	} else {
718		struct sc92031_priv *priv = netdev_priv(dev);
719		priv->rx_loss++;
720	}
721}
722
723static void _sc92031_rx_tasklet(struct net_device *dev)
724{
725	struct sc92031_priv *priv = netdev_priv(dev);
726	void __iomem *port_base = priv->port_base;
727
728	dma_addr_t rx_ring_head;
729	unsigned rx_len;
730	unsigned rx_ring_offset;
731	void *rx_ring = priv->rx_ring;
732
733	rx_ring_head = ioread32(port_base + RxBufWPtr);
734	rmb();
735
736	/* rx_ring_head is only 17 bits in the RxBufWPtr register.
737	 * we need to change it to 32 bits physical address
738	 */
739	rx_ring_head &= (dma_addr_t)(RX_BUF_LEN - 1);
740	rx_ring_head |= priv->rx_ring_dma_addr & ~(dma_addr_t)(RX_BUF_LEN - 1);
741	if (rx_ring_head < priv->rx_ring_dma_addr)
742		rx_ring_head += RX_BUF_LEN;
743
744	if (rx_ring_head >= priv->rx_ring_tail)
745		rx_len = rx_ring_head - priv->rx_ring_tail;
746	else
747		rx_len = RX_BUF_LEN - (priv->rx_ring_tail - rx_ring_head);
748
749	if (!rx_len)
750		return;
751
752	if (unlikely(rx_len > RX_BUF_LEN)) {
753		if (printk_ratelimit())
754			printk(KERN_ERR "%s: rx packets length > rx buffer\n",
755					dev->name);
756		return;
757	}
758
759	rx_ring_offset = (priv->rx_ring_tail - priv->rx_ring_dma_addr) % RX_BUF_LEN;
760
761	while (rx_len) {
762		u32 rx_status;
763		unsigned rx_size, rx_size_align, pkt_size;
764		struct sk_buff *skb;
765
766		rx_status = le32_to_cpup((__le32 *)(rx_ring + rx_ring_offset));
767		rmb();
768
769		rx_size = rx_status >> 20;
770		rx_size_align = (rx_size + 3) & ~3;	// for 4 bytes aligned
771		pkt_size = rx_size - 4;	// Omit the four octet CRC from the length.
772
773		rx_ring_offset = (rx_ring_offset + 4) % RX_BUF_LEN;
774
775		if (unlikely(rx_status == 0 ||
776			     rx_size > (MAX_ETH_FRAME_SIZE + 4) ||
777			     rx_size < 16 ||
778			     !(rx_status & RxStatesOK))) {
779			_sc92031_rx_tasklet_error(dev, rx_status, rx_size);
780			break;
781		}
782
783		if (unlikely(rx_size_align + 4 > rx_len)) {
784			if (printk_ratelimit())
785				printk(KERN_ERR "%s: rx_len is too small\n", dev->name);
786			break;
787		}
788
789		rx_len -= rx_size_align + 4;
790
791		skb = netdev_alloc_skb_ip_align(dev, pkt_size);
792		if (unlikely(!skb)) {
793			if (printk_ratelimit())
794				printk(KERN_ERR "%s: Couldn't allocate a skb_buff for a packet of size %u\n",
795						dev->name, pkt_size);
796			goto next;
797		}
798
799		if ((rx_ring_offset + pkt_size) > RX_BUF_LEN) {
800			memcpy(skb_put(skb, RX_BUF_LEN - rx_ring_offset),
801				rx_ring + rx_ring_offset, RX_BUF_LEN - rx_ring_offset);
802			memcpy(skb_put(skb, pkt_size - (RX_BUF_LEN - rx_ring_offset)),
803				rx_ring, pkt_size - (RX_BUF_LEN - rx_ring_offset));
804		} else {
805			memcpy(skb_put(skb, pkt_size), rx_ring + rx_ring_offset, pkt_size);
806		}
807
808		skb->protocol = eth_type_trans(skb, dev);
809		netif_rx(skb);
810
811		dev->stats.rx_bytes += pkt_size;
812		dev->stats.rx_packets++;
813
814		if (rx_status & Rx_Multicast)
815			dev->stats.multicast++;
816
817	next:
818		rx_ring_offset = (rx_ring_offset + rx_size_align) % RX_BUF_LEN;
819	}
820	mb();
821
822	priv->rx_ring_tail = rx_ring_head;
823	iowrite32(priv->rx_ring_tail, port_base + RxBufRPtr);
824}
825
826static void _sc92031_link_tasklet(struct net_device *dev)
827{
828	if (_sc92031_check_media(dev))
829		netif_wake_queue(dev);
830	else {
831		netif_stop_queue(dev);
832		dev->stats.tx_carrier_errors++;
833	}
834}
835
836static void sc92031_tasklet(unsigned long data)
837{
838	struct net_device *dev = (struct net_device *)data;
839	struct sc92031_priv *priv = netdev_priv(dev);
840	void __iomem *port_base = priv->port_base;
841	u32 intr_status, intr_mask;
842
843	intr_status = priv->intr_status;
844
845	spin_lock(&priv->lock);
846
847	if (unlikely(!netif_running(dev)))
848		goto out;
849
850	if (intr_status & TxOK)
851		_sc92031_tx_tasklet(dev);
852
853	if (intr_status & RxOK)
854		_sc92031_rx_tasklet(dev);
855
856	if (intr_status & RxOverflow)
857		dev->stats.rx_errors++;
858
859	if (intr_status & TimeOut) {
860		dev->stats.rx_errors++;
861		dev->stats.rx_length_errors++;
862	}
863
864	if (intr_status & (LinkFail | LinkOK))
865		_sc92031_link_tasklet(dev);
866
867out:
868	intr_mask = atomic_read(&priv->intr_mask);
869	rmb();
870
871	iowrite32(intr_mask, port_base + IntrMask);
872	mmiowb();
873
874	spin_unlock(&priv->lock);
875}
876
877static irqreturn_t sc92031_interrupt(int irq, void *dev_id)
878{
879	struct net_device *dev = dev_id;
880	struct sc92031_priv *priv = netdev_priv(dev);
881	void __iomem *port_base = priv->port_base;
882	u32 intr_status, intr_mask;
883
884	/* mask interrupts before clearing IntrStatus */
885	iowrite32(0, port_base + IntrMask);
886	_sc92031_dummy_read(port_base);
887
888	intr_status = ioread32(port_base + IntrStatus);
889	if (unlikely(intr_status == 0xffffffff))
890		return IRQ_NONE;	// hardware has gone missing
891
892	intr_status &= IntrBits;
893	if (!intr_status)
894		goto out_none;
895
896	priv->intr_status = intr_status;
897	tasklet_schedule(&priv->tasklet);
898
899	return IRQ_HANDLED;
900
901out_none:
902	intr_mask = atomic_read(&priv->intr_mask);
903	rmb();
904
905	iowrite32(intr_mask, port_base + IntrMask);
906	mmiowb();
907
908	return IRQ_NONE;
909}
910
911static struct net_device_stats *sc92031_get_stats(struct net_device *dev)
912{
913	struct sc92031_priv *priv = netdev_priv(dev);
914	void __iomem *port_base = priv->port_base;
915
916	// FIXME I do not understand what is this trying to do.
917	if (netif_running(dev)) {
918		int temp;
919
920		spin_lock_bh(&priv->lock);
921
922		/* Update the error count. */
923		temp = (ioread32(port_base + RxStatus0) >> 16) & 0xffff;
924
925		if (temp == 0xffff) {
926			priv->rx_value += temp;
927			dev->stats.rx_fifo_errors = priv->rx_value;
928		} else
929			dev->stats.rx_fifo_errors = temp + priv->rx_value;
930
931		spin_unlock_bh(&priv->lock);
932	}
933
934	return &dev->stats;
935}
936
937static netdev_tx_t sc92031_start_xmit(struct sk_buff *skb,
938				      struct net_device *dev)
939{
940	struct sc92031_priv *priv = netdev_priv(dev);
941	void __iomem *port_base = priv->port_base;
942	unsigned len;
943	unsigned entry;
944	u32 tx_status;
945
946	if (unlikely(skb->len > TX_BUF_SIZE)) {
947		dev->stats.tx_dropped++;
948		goto out;
949	}
950
951	spin_lock(&priv->lock);
952
953	if (unlikely(!netif_carrier_ok(dev))) {
954		dev->stats.tx_dropped++;
955		goto out_unlock;
956	}
957
958	BUG_ON(priv->tx_head - priv->tx_tail >= NUM_TX_DESC);
959
960	entry = priv->tx_head++ % NUM_TX_DESC;
961
962	skb_copy_and_csum_dev(skb, priv->tx_bufs + entry * TX_BUF_SIZE);
963
964	len = skb->len;
965	if (len < ETH_ZLEN) {
966		memset(priv->tx_bufs + entry * TX_BUF_SIZE + len,
967				0, ETH_ZLEN - len);
968		len = ETH_ZLEN;
969	}
970
971	wmb();
972
973	if (len < 100)
974		tx_status = len;
975	else if (len < 300)
976		tx_status = 0x30000 | len;
977	else
978		tx_status = 0x50000 | len;
979
980	iowrite32(priv->tx_bufs_dma_addr + entry * TX_BUF_SIZE,
981			port_base + TxAddr0 + entry * 4);
982	iowrite32(tx_status, port_base + TxStatus0 + entry * 4);
983	mmiowb();
984
985	if (priv->tx_head - priv->tx_tail >= NUM_TX_DESC)
986		netif_stop_queue(dev);
987
988out_unlock:
989	spin_unlock(&priv->lock);
990
991out:
992	dev_kfree_skb(skb);
993
994	return NETDEV_TX_OK;
995}
996
997static int sc92031_open(struct net_device *dev)
998{
999	int err;
1000	struct sc92031_priv *priv = netdev_priv(dev);
1001	struct pci_dev *pdev = priv->pdev;
1002
1003	priv->rx_ring = pci_alloc_consistent(pdev, RX_BUF_LEN,
1004			&priv->rx_ring_dma_addr);
1005	if (unlikely(!priv->rx_ring)) {
1006		err = -ENOMEM;
1007		goto out_alloc_rx_ring;
1008	}
1009
1010	priv->tx_bufs = pci_alloc_consistent(pdev, TX_BUF_TOT_LEN,
1011			&priv->tx_bufs_dma_addr);
1012	if (unlikely(!priv->tx_bufs)) {
1013		err = -ENOMEM;
1014		goto out_alloc_tx_bufs;
1015	}
1016	priv->tx_head = priv->tx_tail = 0;
1017
1018	err = request_irq(pdev->irq, sc92031_interrupt,
1019			IRQF_SHARED, dev->name, dev);
1020	if (unlikely(err < 0))
1021		goto out_request_irq;
1022
1023	priv->pm_config = 0;
1024
1025	/* Interrupts already disabled by sc92031_stop or sc92031_probe */
1026	spin_lock_bh(&priv->lock);
1027
1028	_sc92031_reset(dev);
1029	mmiowb();
1030
1031	spin_unlock_bh(&priv->lock);
1032	sc92031_enable_interrupts(dev);
1033
1034	if (netif_carrier_ok(dev))
1035		netif_start_queue(dev);
1036	else
1037		netif_tx_disable(dev);
1038
1039	return 0;
1040
1041out_request_irq:
1042	pci_free_consistent(pdev, TX_BUF_TOT_LEN, priv->tx_bufs,
1043			priv->tx_bufs_dma_addr);
1044out_alloc_tx_bufs:
1045	pci_free_consistent(pdev, RX_BUF_LEN, priv->rx_ring,
1046			priv->rx_ring_dma_addr);
1047out_alloc_rx_ring:
1048	return err;
1049}
1050
1051static int sc92031_stop(struct net_device *dev)
1052{
1053	struct sc92031_priv *priv = netdev_priv(dev);
1054	struct pci_dev *pdev = priv->pdev;
1055
1056	netif_tx_disable(dev);
1057
1058	/* Disable interrupts, stop Tx and Rx. */
1059	sc92031_disable_interrupts(dev);
1060
1061	spin_lock_bh(&priv->lock);
1062
1063	_sc92031_disable_tx_rx(dev);
1064	_sc92031_tx_clear(dev);
1065	mmiowb();
1066
1067	spin_unlock_bh(&priv->lock);
1068
1069	free_irq(pdev->irq, dev);
1070	pci_free_consistent(pdev, TX_BUF_TOT_LEN, priv->tx_bufs,
1071			priv->tx_bufs_dma_addr);
1072	pci_free_consistent(pdev, RX_BUF_LEN, priv->rx_ring,
1073			priv->rx_ring_dma_addr);
1074
1075	return 0;
1076}
1077
1078static void sc92031_set_multicast_list(struct net_device *dev)
1079{
1080	struct sc92031_priv *priv = netdev_priv(dev);
1081
1082	spin_lock_bh(&priv->lock);
1083
1084	_sc92031_set_mar(dev);
1085	_sc92031_set_rx_config(dev);
1086	mmiowb();
1087
1088	spin_unlock_bh(&priv->lock);
1089}
1090
1091static void sc92031_tx_timeout(struct net_device *dev)
1092{
1093	struct sc92031_priv *priv = netdev_priv(dev);
1094
1095	/* Disable interrupts by clearing the interrupt mask.*/
1096	sc92031_disable_interrupts(dev);
1097
1098	spin_lock(&priv->lock);
1099
1100	priv->tx_timeouts++;
1101
1102	_sc92031_reset(dev);
1103	mmiowb();
1104
1105	spin_unlock(&priv->lock);
1106
1107	/* enable interrupts */
1108	sc92031_enable_interrupts(dev);
1109
1110	if (netif_carrier_ok(dev))
1111		netif_wake_queue(dev);
1112}
1113
1114#ifdef CONFIG_NET_POLL_CONTROLLER
1115static void sc92031_poll_controller(struct net_device *dev)
1116{
1117	disable_irq(dev->irq);
1118	if (sc92031_interrupt(dev->irq, dev) != IRQ_NONE)
1119		sc92031_tasklet((unsigned long)dev);
1120	enable_irq(dev->irq);
1121}
1122#endif
1123
1124static int sc92031_ethtool_get_settings(struct net_device *dev,
1125		struct ethtool_cmd *cmd)
1126{
1127	struct sc92031_priv *priv = netdev_priv(dev);
1128	void __iomem *port_base = priv->port_base;
1129	u8 phy_address;
1130	u32 phy_ctrl;
1131	u16 output_status;
1132
1133	spin_lock_bh(&priv->lock);
1134
1135	phy_address = ioread32(port_base + Miicmd1) >> 27;
1136	phy_ctrl = ioread32(port_base + PhyCtrl);
1137
1138	output_status = _sc92031_mii_read(port_base, MII_OutputStatus);
1139	_sc92031_mii_scan(port_base);
1140	mmiowb();
1141
1142	spin_unlock_bh(&priv->lock);
1143
1144	cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full
1145			| SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full
1146			| SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII;
1147
1148	cmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
1149
1150	if ((phy_ctrl & (PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10))
1151			== (PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10))
1152		cmd->advertising |= ADVERTISED_Autoneg;
1153
1154	if ((phy_ctrl & PhyCtrlSpd10) == PhyCtrlSpd10)
1155		cmd->advertising |= ADVERTISED_10baseT_Half;
1156
1157	if ((phy_ctrl & (PhyCtrlSpd10 | PhyCtrlDux))
1158			== (PhyCtrlSpd10 | PhyCtrlDux))
1159		cmd->advertising |= ADVERTISED_10baseT_Full;
1160
1161	if ((phy_ctrl & PhyCtrlSpd100) == PhyCtrlSpd100)
1162		cmd->advertising |= ADVERTISED_100baseT_Half;
1163
1164	if ((phy_ctrl & (PhyCtrlSpd100 | PhyCtrlDux))
1165			== (PhyCtrlSpd100 | PhyCtrlDux))
1166		cmd->advertising |= ADVERTISED_100baseT_Full;
1167
1168	if (phy_ctrl & PhyCtrlAne)
1169		cmd->advertising |= ADVERTISED_Autoneg;
1170
1171	ethtool_cmd_speed_set(cmd,
1172			      (output_status & 0x2) ? SPEED_100 : SPEED_10);
1173	cmd->duplex = (output_status & 0x4) ? DUPLEX_FULL : DUPLEX_HALF;
1174	cmd->port = PORT_MII;
1175	cmd->phy_address = phy_address;
1176	cmd->transceiver = XCVR_INTERNAL;
1177	cmd->autoneg = (phy_ctrl & PhyCtrlAne) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
1178
1179	return 0;
1180}
1181
1182static int sc92031_ethtool_set_settings(struct net_device *dev,
1183		struct ethtool_cmd *cmd)
1184{
1185	struct sc92031_priv *priv = netdev_priv(dev);
1186	void __iomem *port_base = priv->port_base;
1187	u32 speed = ethtool_cmd_speed(cmd);
1188	u32 phy_ctrl;
1189	u32 old_phy_ctrl;
1190
1191	if (!(speed == SPEED_10 || speed == SPEED_100))
1192		return -EINVAL;
1193	if (!(cmd->duplex == DUPLEX_HALF || cmd->duplex == DUPLEX_FULL))
1194		return -EINVAL;
1195	if (!(cmd->port == PORT_MII))
1196		return -EINVAL;
1197	if (!(cmd->phy_address == 0x1f))
1198		return -EINVAL;
1199	if (!(cmd->transceiver == XCVR_INTERNAL))
1200		return -EINVAL;
1201	if (!(cmd->autoneg == AUTONEG_DISABLE || cmd->autoneg == AUTONEG_ENABLE))
1202		return -EINVAL;
1203
1204	if (cmd->autoneg == AUTONEG_ENABLE) {
1205		if (!(cmd->advertising & (ADVERTISED_Autoneg
1206				| ADVERTISED_100baseT_Full
1207				| ADVERTISED_100baseT_Half
1208				| ADVERTISED_10baseT_Full
1209				| ADVERTISED_10baseT_Half)))
1210			return -EINVAL;
1211
1212		phy_ctrl = PhyCtrlAne;
1213
1214		// FIXME: I'm not sure what the original code was trying to do
1215		if (cmd->advertising & ADVERTISED_Autoneg)
1216			phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10;
1217		if (cmd->advertising & ADVERTISED_100baseT_Full)
1218			phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100;
1219		if (cmd->advertising & ADVERTISED_100baseT_Half)
1220			phy_ctrl |= PhyCtrlSpd100;
1221		if (cmd->advertising & ADVERTISED_10baseT_Full)
1222			phy_ctrl |= PhyCtrlSpd10 | PhyCtrlDux;
1223		if (cmd->advertising & ADVERTISED_10baseT_Half)
1224			phy_ctrl |= PhyCtrlSpd10;
1225	} else {
1226		// FIXME: Whole branch guessed
1227		phy_ctrl = 0;
1228
1229		if (speed == SPEED_10)
1230			phy_ctrl |= PhyCtrlSpd10;
1231		else /* cmd->speed == SPEED_100 */
1232			phy_ctrl |= PhyCtrlSpd100;
1233
1234		if (cmd->duplex == DUPLEX_FULL)
1235			phy_ctrl |= PhyCtrlDux;
1236	}
1237
1238	spin_lock_bh(&priv->lock);
1239
1240	old_phy_ctrl = ioread32(port_base + PhyCtrl);
1241	phy_ctrl |= old_phy_ctrl & ~(PhyCtrlAne | PhyCtrlDux
1242			| PhyCtrlSpd100 | PhyCtrlSpd10);
1243	if (phy_ctrl != old_phy_ctrl)
1244		iowrite32(phy_ctrl, port_base + PhyCtrl);
1245
1246	spin_unlock_bh(&priv->lock);
1247
1248	return 0;
1249}
1250
1251static void sc92031_ethtool_get_wol(struct net_device *dev,
1252		struct ethtool_wolinfo *wolinfo)
1253{
1254	struct sc92031_priv *priv = netdev_priv(dev);
1255	void __iomem *port_base = priv->port_base;
1256	u32 pm_config;
1257
1258	spin_lock_bh(&priv->lock);
1259	pm_config = ioread32(port_base + PMConfig);
1260	spin_unlock_bh(&priv->lock);
1261
1262	// FIXME: Guessed
1263	wolinfo->supported = WAKE_PHY | WAKE_MAGIC
1264			| WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;
1265	wolinfo->wolopts = 0;
1266
1267	if (pm_config & PM_LinkUp)
1268		wolinfo->wolopts |= WAKE_PHY;
1269
1270	if (pm_config & PM_Magic)
1271		wolinfo->wolopts |= WAKE_MAGIC;
1272
1273	if (pm_config & PM_WakeUp)
1274		// FIXME: Guessed
1275		wolinfo->wolopts |= WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;
1276}
1277
1278static int sc92031_ethtool_set_wol(struct net_device *dev,
1279		struct ethtool_wolinfo *wolinfo)
1280{
1281	struct sc92031_priv *priv = netdev_priv(dev);
1282	void __iomem *port_base = priv->port_base;
1283	u32 pm_config;
1284
1285	spin_lock_bh(&priv->lock);
1286
1287	pm_config = ioread32(port_base + PMConfig)
1288			& ~(PM_LinkUp | PM_Magic | PM_WakeUp);
1289
1290	if (wolinfo->wolopts & WAKE_PHY)
1291		pm_config |= PM_LinkUp;
1292
1293	if (wolinfo->wolopts & WAKE_MAGIC)
1294		pm_config |= PM_Magic;
1295
1296	// FIXME: Guessed
1297	if (wolinfo->wolopts & (WAKE_UCAST | WAKE_MCAST | WAKE_BCAST))
1298		pm_config |= PM_WakeUp;
1299
1300	priv->pm_config = pm_config;
1301	iowrite32(pm_config, port_base + PMConfig);
1302	mmiowb();
1303
1304	spin_unlock_bh(&priv->lock);
1305
1306	return 0;
1307}
1308
1309static int sc92031_ethtool_nway_reset(struct net_device *dev)
1310{
1311	int err = 0;
1312	struct sc92031_priv *priv = netdev_priv(dev);
1313	void __iomem *port_base = priv->port_base;
1314	u16 bmcr;
1315
1316	spin_lock_bh(&priv->lock);
1317
1318	bmcr = _sc92031_mii_read(port_base, MII_BMCR);
1319	if (!(bmcr & BMCR_ANENABLE)) {
1320		err = -EINVAL;
1321		goto out;
1322	}
1323
1324	_sc92031_mii_write(port_base, MII_BMCR, bmcr | BMCR_ANRESTART);
1325
1326out:
1327	_sc92031_mii_scan(port_base);
1328	mmiowb();
1329
1330	spin_unlock_bh(&priv->lock);
1331
1332	return err;
1333}
1334
1335static const char sc92031_ethtool_stats_strings[SILAN_STATS_NUM][ETH_GSTRING_LEN] = {
1336	"tx_timeout",
1337	"rx_loss",
1338};
1339
1340static void sc92031_ethtool_get_strings(struct net_device *dev,
1341		u32 stringset, u8 *data)
1342{
1343	if (stringset == ETH_SS_STATS)
1344		memcpy(data, sc92031_ethtool_stats_strings,
1345				SILAN_STATS_NUM * ETH_GSTRING_LEN);
1346}
1347
1348static int sc92031_ethtool_get_sset_count(struct net_device *dev, int sset)
1349{
1350	switch (sset) {
1351	case ETH_SS_STATS:
1352		return SILAN_STATS_NUM;
1353	default:
1354		return -EOPNOTSUPP;
1355	}
1356}
1357
1358static void sc92031_ethtool_get_ethtool_stats(struct net_device *dev,
1359		struct ethtool_stats *stats, u64 *data)
1360{
1361	struct sc92031_priv *priv = netdev_priv(dev);
1362
1363	spin_lock_bh(&priv->lock);
1364	data[0] = priv->tx_timeouts;
1365	data[1] = priv->rx_loss;
1366	spin_unlock_bh(&priv->lock);
1367}
1368
1369static const struct ethtool_ops sc92031_ethtool_ops = {
1370	.get_settings		= sc92031_ethtool_get_settings,
1371	.set_settings		= sc92031_ethtool_set_settings,
1372	.get_wol		= sc92031_ethtool_get_wol,
1373	.set_wol		= sc92031_ethtool_set_wol,
1374	.nway_reset		= sc92031_ethtool_nway_reset,
1375	.get_link		= ethtool_op_get_link,
1376	.get_strings		= sc92031_ethtool_get_strings,
1377	.get_sset_count		= sc92031_ethtool_get_sset_count,
1378	.get_ethtool_stats	= sc92031_ethtool_get_ethtool_stats,
1379};
1380
1381
1382static const struct net_device_ops sc92031_netdev_ops = {
1383	.ndo_get_stats		= sc92031_get_stats,
1384	.ndo_start_xmit		= sc92031_start_xmit,
1385	.ndo_open		= sc92031_open,
1386	.ndo_stop		= sc92031_stop,
1387	.ndo_set_rx_mode	= sc92031_set_multicast_list,
1388	.ndo_change_mtu		= eth_change_mtu,
1389	.ndo_validate_addr	= eth_validate_addr,
1390	.ndo_set_mac_address 	= eth_mac_addr,
1391	.ndo_tx_timeout		= sc92031_tx_timeout,
1392#ifdef CONFIG_NET_POLL_CONTROLLER
1393	.ndo_poll_controller	= sc92031_poll_controller,
1394#endif
1395};
1396
1397static int __devinit sc92031_probe(struct pci_dev *pdev,
1398		const struct pci_device_id *id)
1399{
1400	int err;
1401	void __iomem* port_base;
1402	struct net_device *dev;
1403	struct sc92031_priv *priv;
1404	u32 mac0, mac1;
1405	unsigned long base_addr;
1406
1407	err = pci_enable_device(pdev);
1408	if (unlikely(err < 0))
1409		goto out_enable_device;
1410
1411	pci_set_master(pdev);
1412
1413	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1414	if (unlikely(err < 0))
1415		goto out_set_dma_mask;
1416
1417	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1418	if (unlikely(err < 0))
1419		goto out_set_dma_mask;
1420
1421	err = pci_request_regions(pdev, SC92031_NAME);
1422	if (unlikely(err < 0))
1423		goto out_request_regions;
1424
1425	port_base = pci_iomap(pdev, SC92031_USE_BAR, 0);
1426	if (unlikely(!port_base)) {
1427		err = -EIO;
1428		goto out_iomap;
1429	}
1430
1431	dev = alloc_etherdev(sizeof(struct sc92031_priv));
1432	if (unlikely(!dev)) {
1433		err = -ENOMEM;
1434		goto out_alloc_etherdev;
1435	}
1436
1437	pci_set_drvdata(pdev, dev);
1438	SET_NETDEV_DEV(dev, &pdev->dev);
1439
1440#if SC92031_USE_BAR == 0
1441	dev->mem_start = pci_resource_start(pdev, SC92031_USE_BAR);
1442	dev->mem_end = pci_resource_end(pdev, SC92031_USE_BAR);
1443#elif SC92031_USE_BAR == 1
1444	dev->base_addr = pci_resource_start(pdev, SC92031_USE_BAR);
1445#endif
1446	dev->irq = pdev->irq;
1447
1448	/* faked with skb_copy_and_csum_dev */
1449	dev->features = NETIF_F_SG | NETIF_F_HIGHDMA |
1450		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1451
1452	dev->netdev_ops		= &sc92031_netdev_ops;
1453	dev->watchdog_timeo	= TX_TIMEOUT;
1454	dev->ethtool_ops	= &sc92031_ethtool_ops;
1455
1456	priv = netdev_priv(dev);
1457	spin_lock_init(&priv->lock);
1458	priv->port_base = port_base;
1459	priv->pdev = pdev;
1460	tasklet_init(&priv->tasklet, sc92031_tasklet, (unsigned long)dev);
1461	/* Fudge tasklet count so the call to sc92031_enable_interrupts at
1462	 * sc92031_open will work correctly */
1463	tasklet_disable_nosync(&priv->tasklet);
1464
1465	/* PCI PM Wakeup */
1466	iowrite32((~PM_LongWF & ~PM_LWPTN) | PM_Enable, port_base + PMConfig);
1467
1468	mac0 = ioread32(port_base + MAC0);
1469	mac1 = ioread32(port_base + MAC0 + 4);
1470	dev->dev_addr[0] = dev->perm_addr[0] = mac0 >> 24;
1471	dev->dev_addr[1] = dev->perm_addr[1] = mac0 >> 16;
1472	dev->dev_addr[2] = dev->perm_addr[2] = mac0 >> 8;
1473	dev->dev_addr[3] = dev->perm_addr[3] = mac0;
1474	dev->dev_addr[4] = dev->perm_addr[4] = mac1 >> 8;
1475	dev->dev_addr[5] = dev->perm_addr[5] = mac1;
1476
1477	err = register_netdev(dev);
1478	if (err < 0)
1479		goto out_register_netdev;
1480
1481#if SC92031_USE_BAR == 0
1482	base_addr = dev->mem_start;
1483#elif SC92031_USE_BAR == 1
1484	base_addr = dev->base_addr;
1485#endif
1486	printk(KERN_INFO "%s: SC92031 at 0x%lx, %pM, IRQ %d\n", dev->name,
1487			base_addr, dev->dev_addr, dev->irq);
1488
1489	return 0;
1490
1491out_register_netdev:
1492	free_netdev(dev);
1493out_alloc_etherdev:
1494	pci_iounmap(pdev, port_base);
1495out_iomap:
1496	pci_release_regions(pdev);
1497out_request_regions:
1498out_set_dma_mask:
1499	pci_disable_device(pdev);
1500out_enable_device:
1501	return err;
1502}
1503
1504static void __devexit sc92031_remove(struct pci_dev *pdev)
1505{
1506	struct net_device *dev = pci_get_drvdata(pdev);
1507	struct sc92031_priv *priv = netdev_priv(dev);
1508	void __iomem* port_base = priv->port_base;
1509
1510	unregister_netdev(dev);
1511	free_netdev(dev);
1512	pci_iounmap(pdev, port_base);
1513	pci_release_regions(pdev);
1514	pci_disable_device(pdev);
1515}
1516
1517static int sc92031_suspend(struct pci_dev *pdev, pm_message_t state)
1518{
1519	struct net_device *dev = pci_get_drvdata(pdev);
1520	struct sc92031_priv *priv = netdev_priv(dev);
1521
1522	pci_save_state(pdev);
1523
1524	if (!netif_running(dev))
1525		goto out;
1526
1527	netif_device_detach(dev);
1528
1529	/* Disable interrupts, stop Tx and Rx. */
1530	sc92031_disable_interrupts(dev);
1531
1532	spin_lock_bh(&priv->lock);
1533
1534	_sc92031_disable_tx_rx(dev);
1535	_sc92031_tx_clear(dev);
1536	mmiowb();
1537
1538	spin_unlock_bh(&priv->lock);
1539
1540out:
1541	pci_set_power_state(pdev, pci_choose_state(pdev, state));
1542
1543	return 0;
1544}
1545
1546static int sc92031_resume(struct pci_dev *pdev)
1547{
1548	struct net_device *dev = pci_get_drvdata(pdev);
1549	struct sc92031_priv *priv = netdev_priv(dev);
1550
1551	pci_restore_state(pdev);
1552	pci_set_power_state(pdev, PCI_D0);
1553
1554	if (!netif_running(dev))
1555		goto out;
1556
1557	/* Interrupts already disabled by sc92031_suspend */
1558	spin_lock_bh(&priv->lock);
1559
1560	_sc92031_reset(dev);
1561	mmiowb();
1562
1563	spin_unlock_bh(&priv->lock);
1564	sc92031_enable_interrupts(dev);
1565
1566	netif_device_attach(dev);
1567
1568	if (netif_carrier_ok(dev))
1569		netif_wake_queue(dev);
1570	else
1571		netif_tx_disable(dev);
1572
1573out:
1574	return 0;
1575}
1576
1577static DEFINE_PCI_DEVICE_TABLE(sc92031_pci_device_id_table) = {
1578	{ PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x2031) },
1579	{ PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x8139) },
1580	{ PCI_DEVICE(0x1088, 0x2031) },
1581	{ 0, }
1582};
1583MODULE_DEVICE_TABLE(pci, sc92031_pci_device_id_table);
1584
1585static struct pci_driver sc92031_pci_driver = {
1586	.name		= SC92031_NAME,
1587	.id_table	= sc92031_pci_device_id_table,
1588	.probe		= sc92031_probe,
1589	.remove		= __devexit_p(sc92031_remove),
1590	.suspend	= sc92031_suspend,
1591	.resume		= sc92031_resume,
1592};
1593
1594static int __init sc92031_init(void)
1595{
1596	return pci_register_driver(&sc92031_pci_driver);
1597}
1598
1599static void __exit sc92031_exit(void)
1600{
1601	pci_unregister_driver(&sc92031_pci_driver);
1602}
1603
1604module_init(sc92031_init);
1605module_exit(sc92031_exit);
1606
1607MODULE_LICENSE("GPL");
1608MODULE_AUTHOR("Cesar Eduardo Barros <cesarb@cesarb.net>");
1609MODULE_DESCRIPTION("Silan SC92031 PCI Fast Ethernet Adapter driver");
1610