1/* bnx2.c: Broadcom NX2 network driver.
2 *
3 * Copyright (c) 2004-2011 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan  (mchan@broadcom.com)
10 */
11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include <linux/module.h>
15#include <linux/moduleparam.h>
16
17#include <linux/kernel.h>
18#include <linux/timer.h>
19#include <linux/errno.h>
20#include <linux/ioport.h>
21#include <linux/slab.h>
22#include <linux/vmalloc.h>
23#include <linux/interrupt.h>
24#include <linux/pci.h>
25#include <linux/init.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/skbuff.h>
29#include <linux/dma-mapping.h>
30#include <linux/bitops.h>
31#include <asm/io.h>
32#include <asm/irq.h>
33#include <linux/delay.h>
34#include <asm/byteorder.h>
35#include <asm/page.h>
36#include <linux/time.h>
37#include <linux/ethtool.h>
38#include <linux/mii.h>
39#include <linux/if.h>
40#include <linux/if_vlan.h>
41#include <net/ip.h>
42#include <net/tcp.h>
43#include <net/checksum.h>
44#include <linux/workqueue.h>
45#include <linux/crc32.h>
46#include <linux/prefetch.h>
47#include <linux/cache.h>
48#include <linux/firmware.h>
49#include <linux/log2.h>
50#include <linux/aer.h>
51
52#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
53#define BCM_CNIC 1
54#include "cnic_if.h"
55#endif
56#include "bnx2.h"
57#include "bnx2_fw.h"
58
59#define DRV_MODULE_NAME		"bnx2"
60#define DRV_MODULE_VERSION	"2.2.1"
61#define DRV_MODULE_RELDATE	"Dec 18, 2011"
62#define FW_MIPS_FILE_06		"bnx2/bnx2-mips-06-6.2.3.fw"
63#define FW_RV2P_FILE_06		"bnx2/bnx2-rv2p-06-6.0.15.fw"
64#define FW_MIPS_FILE_09		"bnx2/bnx2-mips-09-6.2.1b.fw"
65#define FW_RV2P_FILE_09_Ax	"bnx2/bnx2-rv2p-09ax-6.0.17.fw"
66#define FW_RV2P_FILE_09		"bnx2/bnx2-rv2p-09-6.0.17.fw"
67
68#define RUN_AT(x) (jiffies + (x))
69
70/* Time in jiffies before concluding the transmitter is hung. */
71#define TX_TIMEOUT  (5*HZ)
72
73static char version[] __devinitdata =
74	"Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
76MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
77MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
78MODULE_LICENSE("GPL");
79MODULE_VERSION(DRV_MODULE_VERSION);
80MODULE_FIRMWARE(FW_MIPS_FILE_06);
81MODULE_FIRMWARE(FW_RV2P_FILE_06);
82MODULE_FIRMWARE(FW_MIPS_FILE_09);
83MODULE_FIRMWARE(FW_RV2P_FILE_09);
84MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
85
86static int disable_msi = 0;
87
88module_param(disable_msi, int, 0);
89MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
90
91typedef enum {
92	BCM5706 = 0,
93	NC370T,
94	NC370I,
95	BCM5706S,
96	NC370F,
97	BCM5708,
98	BCM5708S,
99	BCM5709,
100	BCM5709S,
101	BCM5716,
102	BCM5716S,
103} board_t;
104
105/* indexed by board_t, above */
106static struct {
107	char *name;
108} board_info[] __devinitdata = {
109	{ "Broadcom NetXtreme II BCM5706 1000Base-T" },
110	{ "HP NC370T Multifunction Gigabit Server Adapter" },
111	{ "HP NC370i Multifunction Gigabit Server Adapter" },
112	{ "Broadcom NetXtreme II BCM5706 1000Base-SX" },
113	{ "HP NC370F Multifunction Gigabit Server Adapter" },
114	{ "Broadcom NetXtreme II BCM5708 1000Base-T" },
115	{ "Broadcom NetXtreme II BCM5708 1000Base-SX" },
116	{ "Broadcom NetXtreme II BCM5709 1000Base-T" },
117	{ "Broadcom NetXtreme II BCM5709 1000Base-SX" },
118	{ "Broadcom NetXtreme II BCM5716 1000Base-T" },
119	{ "Broadcom NetXtreme II BCM5716 1000Base-SX" },
120	};
121
122static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
123	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
124	  PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
125	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
126	  PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
127	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
128	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
129	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
130	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
131	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
132	  PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
133	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
134	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
135	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
136	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
137	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
138	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
139	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
140	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
141	{ PCI_VENDOR_ID_BROADCOM, 0x163b,
142	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
143	{ PCI_VENDOR_ID_BROADCOM, 0x163c,
144	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
145	{ 0, }
146};
147
148static const struct flash_spec flash_table[] =
149{
150#define BUFFERED_FLAGS		(BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
151#define NONBUFFERED_FLAGS	(BNX2_NV_WREN)
152	/* Slow EEPROM */
153	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
154	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
155	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
156	 "EEPROM - slow"},
157	/* Expansion entry 0001 */
158	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
159	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
160	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
161	 "Entry 0001"},
162	/* Saifun SA25F010 (non-buffered flash) */
163	/* strap, cfg1, & write1 need updates */
164	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
165	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
166	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
167	 "Non-buffered flash (128kB)"},
168	/* Saifun SA25F020 (non-buffered flash) */
169	/* strap, cfg1, & write1 need updates */
170	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
171	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
172	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
173	 "Non-buffered flash (256kB)"},
174	/* Expansion entry 0100 */
175	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
176	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
177	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
178	 "Entry 0100"},
179	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
180	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
181	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
182	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
183	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
184	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
185	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
186	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
187	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
188	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
189	/* Saifun SA25F005 (non-buffered flash) */
190	/* strap, cfg1, & write1 need updates */
191	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
192	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
193	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
194	 "Non-buffered flash (64kB)"},
195	/* Fast EEPROM */
196	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
197	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
198	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
199	 "EEPROM - fast"},
200	/* Expansion entry 1001 */
201	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
202	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
203	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
204	 "Entry 1001"},
205	/* Expansion entry 1010 */
206	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
207	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
208	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
209	 "Entry 1010"},
210	/* ATMEL AT45DB011B (buffered flash) */
211	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
212	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
213	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
214	 "Buffered flash (128kB)"},
215	/* Expansion entry 1100 */
216	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
217	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
218	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
219	 "Entry 1100"},
220	/* Expansion entry 1101 */
221	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
222	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
223	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
224	 "Entry 1101"},
225	/* Ateml Expansion entry 1110 */
226	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
227	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
228	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
229	 "Entry 1110 (Atmel)"},
230	/* ATMEL AT45DB021B (buffered flash) */
231	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
232	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
233	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
234	 "Buffered flash (256kB)"},
235};
236
237static const struct flash_spec flash_5709 = {
238	.flags		= BNX2_NV_BUFFERED,
239	.page_bits	= BCM5709_FLASH_PAGE_BITS,
240	.page_size	= BCM5709_FLASH_PAGE_SIZE,
241	.addr_mask	= BCM5709_FLASH_BYTE_ADDR_MASK,
242	.total_size	= BUFFERED_FLASH_TOTAL_SIZE*2,
243	.name		= "5709 Buffered flash (256kB)",
244};
245
246MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
247
248static void bnx2_init_napi(struct bnx2 *bp);
249static void bnx2_del_napi(struct bnx2 *bp);
250
251static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
252{
253	u32 diff;
254
255	/* Tell compiler to fetch tx_prod and tx_cons from memory. */
256	barrier();
257
258	/* The ring uses 256 indices for 255 entries, one of them
259	 * needs to be skipped.
260	 */
261	diff = txr->tx_prod - txr->tx_cons;
262	if (unlikely(diff >= TX_DESC_CNT)) {
263		diff &= 0xffff;
264		if (diff == TX_DESC_CNT)
265			diff = MAX_TX_DESC_CNT;
266	}
267	return bp->tx_ring_size - diff;
268}
269
270static u32
271bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
272{
273	u32 val;
274
275	spin_lock_bh(&bp->indirect_lock);
276	REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
277	val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
278	spin_unlock_bh(&bp->indirect_lock);
279	return val;
280}
281
282static void
283bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
284{
285	spin_lock_bh(&bp->indirect_lock);
286	REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
287	REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
288	spin_unlock_bh(&bp->indirect_lock);
289}
290
291static void
292bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
293{
294	bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
295}
296
297static u32
298bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
299{
300	return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
301}
302
303static void
304bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
305{
306	offset += cid_addr;
307	spin_lock_bh(&bp->indirect_lock);
308	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
309		int i;
310
311		REG_WR(bp, BNX2_CTX_CTX_DATA, val);
312		REG_WR(bp, BNX2_CTX_CTX_CTRL,
313		       offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
314		for (i = 0; i < 5; i++) {
315			val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
316			if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
317				break;
318			udelay(5);
319		}
320	} else {
321		REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
322		REG_WR(bp, BNX2_CTX_DATA, val);
323	}
324	spin_unlock_bh(&bp->indirect_lock);
325}
326
327#ifdef BCM_CNIC
328static int
329bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
330{
331	struct bnx2 *bp = netdev_priv(dev);
332	struct drv_ctl_io *io = &info->data.io;
333
334	switch (info->cmd) {
335	case DRV_CTL_IO_WR_CMD:
336		bnx2_reg_wr_ind(bp, io->offset, io->data);
337		break;
338	case DRV_CTL_IO_RD_CMD:
339		io->data = bnx2_reg_rd_ind(bp, io->offset);
340		break;
341	case DRV_CTL_CTX_WR_CMD:
342		bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
343		break;
344	default:
345		return -EINVAL;
346	}
347	return 0;
348}
349
350static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
351{
352	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
353	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
354	int sb_id;
355
356	if (bp->flags & BNX2_FLAG_USING_MSIX) {
357		cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
358		bnapi->cnic_present = 0;
359		sb_id = bp->irq_nvecs;
360		cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
361	} else {
362		cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
363		bnapi->cnic_tag = bnapi->last_status_idx;
364		bnapi->cnic_present = 1;
365		sb_id = 0;
366		cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
367	}
368
369	cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
370	cp->irq_arr[0].status_blk = (void *)
371		((unsigned long) bnapi->status_blk.msi +
372		(BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
373	cp->irq_arr[0].status_blk_num = sb_id;
374	cp->num_irq = 1;
375}
376
377static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
378			      void *data)
379{
380	struct bnx2 *bp = netdev_priv(dev);
381	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
382
383	if (ops == NULL)
384		return -EINVAL;
385
386	if (cp->drv_state & CNIC_DRV_STATE_REGD)
387		return -EBUSY;
388
389	if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
390		return -ENODEV;
391
392	bp->cnic_data = data;
393	rcu_assign_pointer(bp->cnic_ops, ops);
394
395	cp->num_irq = 0;
396	cp->drv_state = CNIC_DRV_STATE_REGD;
397
398	bnx2_setup_cnic_irq_info(bp);
399
400	return 0;
401}
402
403static int bnx2_unregister_cnic(struct net_device *dev)
404{
405	struct bnx2 *bp = netdev_priv(dev);
406	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
407	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
408
409	mutex_lock(&bp->cnic_lock);
410	cp->drv_state = 0;
411	bnapi->cnic_present = 0;
412	RCU_INIT_POINTER(bp->cnic_ops, NULL);
413	mutex_unlock(&bp->cnic_lock);
414	synchronize_rcu();
415	return 0;
416}
417
418struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
419{
420	struct bnx2 *bp = netdev_priv(dev);
421	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
422
423	if (!cp->max_iscsi_conn)
424		return NULL;
425
426	cp->drv_owner = THIS_MODULE;
427	cp->chip_id = bp->chip_id;
428	cp->pdev = bp->pdev;
429	cp->io_base = bp->regview;
430	cp->drv_ctl = bnx2_drv_ctl;
431	cp->drv_register_cnic = bnx2_register_cnic;
432	cp->drv_unregister_cnic = bnx2_unregister_cnic;
433
434	return cp;
435}
436EXPORT_SYMBOL(bnx2_cnic_probe);
437
438static void
439bnx2_cnic_stop(struct bnx2 *bp)
440{
441	struct cnic_ops *c_ops;
442	struct cnic_ctl_info info;
443
444	mutex_lock(&bp->cnic_lock);
445	c_ops = rcu_dereference_protected(bp->cnic_ops,
446					  lockdep_is_held(&bp->cnic_lock));
447	if (c_ops) {
448		info.cmd = CNIC_CTL_STOP_CMD;
449		c_ops->cnic_ctl(bp->cnic_data, &info);
450	}
451	mutex_unlock(&bp->cnic_lock);
452}
453
454static void
455bnx2_cnic_start(struct bnx2 *bp)
456{
457	struct cnic_ops *c_ops;
458	struct cnic_ctl_info info;
459
460	mutex_lock(&bp->cnic_lock);
461	c_ops = rcu_dereference_protected(bp->cnic_ops,
462					  lockdep_is_held(&bp->cnic_lock));
463	if (c_ops) {
464		if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
465			struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
466
467			bnapi->cnic_tag = bnapi->last_status_idx;
468		}
469		info.cmd = CNIC_CTL_START_CMD;
470		c_ops->cnic_ctl(bp->cnic_data, &info);
471	}
472	mutex_unlock(&bp->cnic_lock);
473}
474
475#else
476
477static void
478bnx2_cnic_stop(struct bnx2 *bp)
479{
480}
481
482static void
483bnx2_cnic_start(struct bnx2 *bp)
484{
485}
486
487#endif
488
489static int
490bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
491{
492	u32 val1;
493	int i, ret;
494
495	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
496		val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
497		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
498
499		REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
500		REG_RD(bp, BNX2_EMAC_MDIO_MODE);
501
502		udelay(40);
503	}
504
505	val1 = (bp->phy_addr << 21) | (reg << 16) |
506		BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
507		BNX2_EMAC_MDIO_COMM_START_BUSY;
508	REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
509
510	for (i = 0; i < 50; i++) {
511		udelay(10);
512
513		val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
514		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
515			udelay(5);
516
517			val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
518			val1 &= BNX2_EMAC_MDIO_COMM_DATA;
519
520			break;
521		}
522	}
523
524	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
525		*val = 0x0;
526		ret = -EBUSY;
527	}
528	else {
529		*val = val1;
530		ret = 0;
531	}
532
533	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
534		val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
535		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
536
537		REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
538		REG_RD(bp, BNX2_EMAC_MDIO_MODE);
539
540		udelay(40);
541	}
542
543	return ret;
544}
545
546static int
547bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
548{
549	u32 val1;
550	int i, ret;
551
552	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
553		val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
554		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
555
556		REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
557		REG_RD(bp, BNX2_EMAC_MDIO_MODE);
558
559		udelay(40);
560	}
561
562	val1 = (bp->phy_addr << 21) | (reg << 16) | val |
563		BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
564		BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
565	REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
566
567	for (i = 0; i < 50; i++) {
568		udelay(10);
569
570		val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
571		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
572			udelay(5);
573			break;
574		}
575	}
576
577	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
578        	ret = -EBUSY;
579	else
580		ret = 0;
581
582	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
583		val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
584		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
585
586		REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
587		REG_RD(bp, BNX2_EMAC_MDIO_MODE);
588
589		udelay(40);
590	}
591
592	return ret;
593}
594
595static void
596bnx2_disable_int(struct bnx2 *bp)
597{
598	int i;
599	struct bnx2_napi *bnapi;
600
601	for (i = 0; i < bp->irq_nvecs; i++) {
602		bnapi = &bp->bnx2_napi[i];
603		REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
604		       BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
605	}
606	REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
607}
608
609static void
610bnx2_enable_int(struct bnx2 *bp)
611{
612	int i;
613	struct bnx2_napi *bnapi;
614
615	for (i = 0; i < bp->irq_nvecs; i++) {
616		bnapi = &bp->bnx2_napi[i];
617
618		REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
619		       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
620		       BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
621		       bnapi->last_status_idx);
622
623		REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
624		       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
625		       bnapi->last_status_idx);
626	}
627	REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
628}
629
630static void
631bnx2_disable_int_sync(struct bnx2 *bp)
632{
633	int i;
634
635	atomic_inc(&bp->intr_sem);
636	if (!netif_running(bp->dev))
637		return;
638
639	bnx2_disable_int(bp);
640	for (i = 0; i < bp->irq_nvecs; i++)
641		synchronize_irq(bp->irq_tbl[i].vector);
642}
643
644static void
645bnx2_napi_disable(struct bnx2 *bp)
646{
647	int i;
648
649	for (i = 0; i < bp->irq_nvecs; i++)
650		napi_disable(&bp->bnx2_napi[i].napi);
651}
652
653static void
654bnx2_napi_enable(struct bnx2 *bp)
655{
656	int i;
657
658	for (i = 0; i < bp->irq_nvecs; i++)
659		napi_enable(&bp->bnx2_napi[i].napi);
660}
661
662static void
663bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
664{
665	if (stop_cnic)
666		bnx2_cnic_stop(bp);
667	if (netif_running(bp->dev)) {
668		bnx2_napi_disable(bp);
669		netif_tx_disable(bp->dev);
670	}
671	bnx2_disable_int_sync(bp);
672	netif_carrier_off(bp->dev);	/* prevent tx timeout */
673}
674
675static void
676bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
677{
678	if (atomic_dec_and_test(&bp->intr_sem)) {
679		if (netif_running(bp->dev)) {
680			netif_tx_wake_all_queues(bp->dev);
681			spin_lock_bh(&bp->phy_lock);
682			if (bp->link_up)
683				netif_carrier_on(bp->dev);
684			spin_unlock_bh(&bp->phy_lock);
685			bnx2_napi_enable(bp);
686			bnx2_enable_int(bp);
687			if (start_cnic)
688				bnx2_cnic_start(bp);
689		}
690	}
691}
692
693static void
694bnx2_free_tx_mem(struct bnx2 *bp)
695{
696	int i;
697
698	for (i = 0; i < bp->num_tx_rings; i++) {
699		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
700		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
701
702		if (txr->tx_desc_ring) {
703			dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
704					  txr->tx_desc_ring,
705					  txr->tx_desc_mapping);
706			txr->tx_desc_ring = NULL;
707		}
708		kfree(txr->tx_buf_ring);
709		txr->tx_buf_ring = NULL;
710	}
711}
712
713static void
714bnx2_free_rx_mem(struct bnx2 *bp)
715{
716	int i;
717
718	for (i = 0; i < bp->num_rx_rings; i++) {
719		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
720		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
721		int j;
722
723		for (j = 0; j < bp->rx_max_ring; j++) {
724			if (rxr->rx_desc_ring[j])
725				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
726						  rxr->rx_desc_ring[j],
727						  rxr->rx_desc_mapping[j]);
728			rxr->rx_desc_ring[j] = NULL;
729		}
730		vfree(rxr->rx_buf_ring);
731		rxr->rx_buf_ring = NULL;
732
733		for (j = 0; j < bp->rx_max_pg_ring; j++) {
734			if (rxr->rx_pg_desc_ring[j])
735				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
736						  rxr->rx_pg_desc_ring[j],
737						  rxr->rx_pg_desc_mapping[j]);
738			rxr->rx_pg_desc_ring[j] = NULL;
739		}
740		vfree(rxr->rx_pg_ring);
741		rxr->rx_pg_ring = NULL;
742	}
743}
744
745static int
746bnx2_alloc_tx_mem(struct bnx2 *bp)
747{
748	int i;
749
750	for (i = 0; i < bp->num_tx_rings; i++) {
751		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
752		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
753
754		txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
755		if (txr->tx_buf_ring == NULL)
756			return -ENOMEM;
757
758		txr->tx_desc_ring =
759			dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
760					   &txr->tx_desc_mapping, GFP_KERNEL);
761		if (txr->tx_desc_ring == NULL)
762			return -ENOMEM;
763	}
764	return 0;
765}
766
767static int
768bnx2_alloc_rx_mem(struct bnx2 *bp)
769{
770	int i;
771
772	for (i = 0; i < bp->num_rx_rings; i++) {
773		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
774		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
775		int j;
776
777		rxr->rx_buf_ring =
778			vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
779		if (rxr->rx_buf_ring == NULL)
780			return -ENOMEM;
781
782		for (j = 0; j < bp->rx_max_ring; j++) {
783			rxr->rx_desc_ring[j] =
784				dma_alloc_coherent(&bp->pdev->dev,
785						   RXBD_RING_SIZE,
786						   &rxr->rx_desc_mapping[j],
787						   GFP_KERNEL);
788			if (rxr->rx_desc_ring[j] == NULL)
789				return -ENOMEM;
790
791		}
792
793		if (bp->rx_pg_ring_size) {
794			rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
795						  bp->rx_max_pg_ring);
796			if (rxr->rx_pg_ring == NULL)
797				return -ENOMEM;
798
799		}
800
801		for (j = 0; j < bp->rx_max_pg_ring; j++) {
802			rxr->rx_pg_desc_ring[j] =
803				dma_alloc_coherent(&bp->pdev->dev,
804						   RXBD_RING_SIZE,
805						   &rxr->rx_pg_desc_mapping[j],
806						   GFP_KERNEL);
807			if (rxr->rx_pg_desc_ring[j] == NULL)
808				return -ENOMEM;
809
810		}
811	}
812	return 0;
813}
814
815static void
816bnx2_free_mem(struct bnx2 *bp)
817{
818	int i;
819	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
820
821	bnx2_free_tx_mem(bp);
822	bnx2_free_rx_mem(bp);
823
824	for (i = 0; i < bp->ctx_pages; i++) {
825		if (bp->ctx_blk[i]) {
826			dma_free_coherent(&bp->pdev->dev, BCM_PAGE_SIZE,
827					  bp->ctx_blk[i],
828					  bp->ctx_blk_mapping[i]);
829			bp->ctx_blk[i] = NULL;
830		}
831	}
832	if (bnapi->status_blk.msi) {
833		dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
834				  bnapi->status_blk.msi,
835				  bp->status_blk_mapping);
836		bnapi->status_blk.msi = NULL;
837		bp->stats_blk = NULL;
838	}
839}
840
841static int
842bnx2_alloc_mem(struct bnx2 *bp)
843{
844	int i, status_blk_size, err;
845	struct bnx2_napi *bnapi;
846	void *status_blk;
847
848	/* Combine status and statistics blocks into one allocation. */
849	status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
850	if (bp->flags & BNX2_FLAG_MSIX_CAP)
851		status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
852						 BNX2_SBLK_MSIX_ALIGN_SIZE);
853	bp->status_stats_size = status_blk_size +
854				sizeof(struct statistics_block);
855
856	status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
857					&bp->status_blk_mapping, GFP_KERNEL);
858	if (status_blk == NULL)
859		goto alloc_mem_err;
860
861	memset(status_blk, 0, bp->status_stats_size);
862
863	bnapi = &bp->bnx2_napi[0];
864	bnapi->status_blk.msi = status_blk;
865	bnapi->hw_tx_cons_ptr =
866		&bnapi->status_blk.msi->status_tx_quick_consumer_index0;
867	bnapi->hw_rx_cons_ptr =
868		&bnapi->status_blk.msi->status_rx_quick_consumer_index0;
869	if (bp->flags & BNX2_FLAG_MSIX_CAP) {
870		for (i = 1; i < bp->irq_nvecs; i++) {
871			struct status_block_msix *sblk;
872
873			bnapi = &bp->bnx2_napi[i];
874
875			sblk = (void *) (status_blk +
876					 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
877			bnapi->status_blk.msix = sblk;
878			bnapi->hw_tx_cons_ptr =
879				&sblk->status_tx_quick_consumer_index;
880			bnapi->hw_rx_cons_ptr =
881				&sblk->status_rx_quick_consumer_index;
882			bnapi->int_num = i << 24;
883		}
884	}
885
886	bp->stats_blk = status_blk + status_blk_size;
887
888	bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
889
890	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
891		bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
892		if (bp->ctx_pages == 0)
893			bp->ctx_pages = 1;
894		for (i = 0; i < bp->ctx_pages; i++) {
895			bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
896						BCM_PAGE_SIZE,
897						&bp->ctx_blk_mapping[i],
898						GFP_KERNEL);
899			if (bp->ctx_blk[i] == NULL)
900				goto alloc_mem_err;
901		}
902	}
903
904	err = bnx2_alloc_rx_mem(bp);
905	if (err)
906		goto alloc_mem_err;
907
908	err = bnx2_alloc_tx_mem(bp);
909	if (err)
910		goto alloc_mem_err;
911
912	return 0;
913
914alloc_mem_err:
915	bnx2_free_mem(bp);
916	return -ENOMEM;
917}
918
919static void
920bnx2_report_fw_link(struct bnx2 *bp)
921{
922	u32 fw_link_status = 0;
923
924	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
925		return;
926
927	if (bp->link_up) {
928		u32 bmsr;
929
930		switch (bp->line_speed) {
931		case SPEED_10:
932			if (bp->duplex == DUPLEX_HALF)
933				fw_link_status = BNX2_LINK_STATUS_10HALF;
934			else
935				fw_link_status = BNX2_LINK_STATUS_10FULL;
936			break;
937		case SPEED_100:
938			if (bp->duplex == DUPLEX_HALF)
939				fw_link_status = BNX2_LINK_STATUS_100HALF;
940			else
941				fw_link_status = BNX2_LINK_STATUS_100FULL;
942			break;
943		case SPEED_1000:
944			if (bp->duplex == DUPLEX_HALF)
945				fw_link_status = BNX2_LINK_STATUS_1000HALF;
946			else
947				fw_link_status = BNX2_LINK_STATUS_1000FULL;
948			break;
949		case SPEED_2500:
950			if (bp->duplex == DUPLEX_HALF)
951				fw_link_status = BNX2_LINK_STATUS_2500HALF;
952			else
953				fw_link_status = BNX2_LINK_STATUS_2500FULL;
954			break;
955		}
956
957		fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
958
959		if (bp->autoneg) {
960			fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
961
962			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
963			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
964
965			if (!(bmsr & BMSR_ANEGCOMPLETE) ||
966			    bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
967				fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
968			else
969				fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
970		}
971	}
972	else
973		fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
974
975	bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
976}
977
978static char *
979bnx2_xceiver_str(struct bnx2 *bp)
980{
981	return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
982		((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
983		 "Copper");
984}
985
986static void
987bnx2_report_link(struct bnx2 *bp)
988{
989	if (bp->link_up) {
990		netif_carrier_on(bp->dev);
991		netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
992			    bnx2_xceiver_str(bp),
993			    bp->line_speed,
994			    bp->duplex == DUPLEX_FULL ? "full" : "half");
995
996		if (bp->flow_ctrl) {
997			if (bp->flow_ctrl & FLOW_CTRL_RX) {
998				pr_cont(", receive ");
999				if (bp->flow_ctrl & FLOW_CTRL_TX)
1000					pr_cont("& transmit ");
1001			}
1002			else {
1003				pr_cont(", transmit ");
1004			}
1005			pr_cont("flow control ON");
1006		}
1007		pr_cont("\n");
1008	} else {
1009		netif_carrier_off(bp->dev);
1010		netdev_err(bp->dev, "NIC %s Link is Down\n",
1011			   bnx2_xceiver_str(bp));
1012	}
1013
1014	bnx2_report_fw_link(bp);
1015}
1016
1017static void
1018bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1019{
1020	u32 local_adv, remote_adv;
1021
1022	bp->flow_ctrl = 0;
1023	if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1024		(AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1025
1026		if (bp->duplex == DUPLEX_FULL) {
1027			bp->flow_ctrl = bp->req_flow_ctrl;
1028		}
1029		return;
1030	}
1031
1032	if (bp->duplex != DUPLEX_FULL) {
1033		return;
1034	}
1035
1036	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1037	    (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1038		u32 val;
1039
1040		bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1041		if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1042			bp->flow_ctrl |= FLOW_CTRL_TX;
1043		if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1044			bp->flow_ctrl |= FLOW_CTRL_RX;
1045		return;
1046	}
1047
1048	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1049	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1050
1051	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1052		u32 new_local_adv = 0;
1053		u32 new_remote_adv = 0;
1054
1055		if (local_adv & ADVERTISE_1000XPAUSE)
1056			new_local_adv |= ADVERTISE_PAUSE_CAP;
1057		if (local_adv & ADVERTISE_1000XPSE_ASYM)
1058			new_local_adv |= ADVERTISE_PAUSE_ASYM;
1059		if (remote_adv & ADVERTISE_1000XPAUSE)
1060			new_remote_adv |= ADVERTISE_PAUSE_CAP;
1061		if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1062			new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1063
1064		local_adv = new_local_adv;
1065		remote_adv = new_remote_adv;
1066	}
1067
1068	/* See Table 28B-3 of 802.3ab-1999 spec. */
1069	if (local_adv & ADVERTISE_PAUSE_CAP) {
1070		if(local_adv & ADVERTISE_PAUSE_ASYM) {
1071	                if (remote_adv & ADVERTISE_PAUSE_CAP) {
1072				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1073			}
1074			else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1075				bp->flow_ctrl = FLOW_CTRL_RX;
1076			}
1077		}
1078		else {
1079			if (remote_adv & ADVERTISE_PAUSE_CAP) {
1080				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1081			}
1082		}
1083	}
1084	else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1085		if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1086			(remote_adv & ADVERTISE_PAUSE_ASYM)) {
1087
1088			bp->flow_ctrl = FLOW_CTRL_TX;
1089		}
1090	}
1091}
1092
1093static int
1094bnx2_5709s_linkup(struct bnx2 *bp)
1095{
1096	u32 val, speed;
1097
1098	bp->link_up = 1;
1099
1100	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1101	bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1102	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1103
1104	if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1105		bp->line_speed = bp->req_line_speed;
1106		bp->duplex = bp->req_duplex;
1107		return 0;
1108	}
1109	speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1110	switch (speed) {
1111		case MII_BNX2_GP_TOP_AN_SPEED_10:
1112			bp->line_speed = SPEED_10;
1113			break;
1114		case MII_BNX2_GP_TOP_AN_SPEED_100:
1115			bp->line_speed = SPEED_100;
1116			break;
1117		case MII_BNX2_GP_TOP_AN_SPEED_1G:
1118		case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1119			bp->line_speed = SPEED_1000;
1120			break;
1121		case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1122			bp->line_speed = SPEED_2500;
1123			break;
1124	}
1125	if (val & MII_BNX2_GP_TOP_AN_FD)
1126		bp->duplex = DUPLEX_FULL;
1127	else
1128		bp->duplex = DUPLEX_HALF;
1129	return 0;
1130}
1131
1132static int
1133bnx2_5708s_linkup(struct bnx2 *bp)
1134{
1135	u32 val;
1136
1137	bp->link_up = 1;
1138	bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1139	switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1140		case BCM5708S_1000X_STAT1_SPEED_10:
1141			bp->line_speed = SPEED_10;
1142			break;
1143		case BCM5708S_1000X_STAT1_SPEED_100:
1144			bp->line_speed = SPEED_100;
1145			break;
1146		case BCM5708S_1000X_STAT1_SPEED_1G:
1147			bp->line_speed = SPEED_1000;
1148			break;
1149		case BCM5708S_1000X_STAT1_SPEED_2G5:
1150			bp->line_speed = SPEED_2500;
1151			break;
1152	}
1153	if (val & BCM5708S_1000X_STAT1_FD)
1154		bp->duplex = DUPLEX_FULL;
1155	else
1156		bp->duplex = DUPLEX_HALF;
1157
1158	return 0;
1159}
1160
1161static int
1162bnx2_5706s_linkup(struct bnx2 *bp)
1163{
1164	u32 bmcr, local_adv, remote_adv, common;
1165
1166	bp->link_up = 1;
1167	bp->line_speed = SPEED_1000;
1168
1169	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1170	if (bmcr & BMCR_FULLDPLX) {
1171		bp->duplex = DUPLEX_FULL;
1172	}
1173	else {
1174		bp->duplex = DUPLEX_HALF;
1175	}
1176
1177	if (!(bmcr & BMCR_ANENABLE)) {
1178		return 0;
1179	}
1180
1181	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1182	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1183
1184	common = local_adv & remote_adv;
1185	if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1186
1187		if (common & ADVERTISE_1000XFULL) {
1188			bp->duplex = DUPLEX_FULL;
1189		}
1190		else {
1191			bp->duplex = DUPLEX_HALF;
1192		}
1193	}
1194
1195	return 0;
1196}
1197
1198static int
1199bnx2_copper_linkup(struct bnx2 *bp)
1200{
1201	u32 bmcr;
1202
1203	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1204	if (bmcr & BMCR_ANENABLE) {
1205		u32 local_adv, remote_adv, common;
1206
1207		bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1208		bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1209
1210		common = local_adv & (remote_adv >> 2);
1211		if (common & ADVERTISE_1000FULL) {
1212			bp->line_speed = SPEED_1000;
1213			bp->duplex = DUPLEX_FULL;
1214		}
1215		else if (common & ADVERTISE_1000HALF) {
1216			bp->line_speed = SPEED_1000;
1217			bp->duplex = DUPLEX_HALF;
1218		}
1219		else {
1220			bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1221			bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1222
1223			common = local_adv & remote_adv;
1224			if (common & ADVERTISE_100FULL) {
1225				bp->line_speed = SPEED_100;
1226				bp->duplex = DUPLEX_FULL;
1227			}
1228			else if (common & ADVERTISE_100HALF) {
1229				bp->line_speed = SPEED_100;
1230				bp->duplex = DUPLEX_HALF;
1231			}
1232			else if (common & ADVERTISE_10FULL) {
1233				bp->line_speed = SPEED_10;
1234				bp->duplex = DUPLEX_FULL;
1235			}
1236			else if (common & ADVERTISE_10HALF) {
1237				bp->line_speed = SPEED_10;
1238				bp->duplex = DUPLEX_HALF;
1239			}
1240			else {
1241				bp->line_speed = 0;
1242				bp->link_up = 0;
1243			}
1244		}
1245	}
1246	else {
1247		if (bmcr & BMCR_SPEED100) {
1248			bp->line_speed = SPEED_100;
1249		}
1250		else {
1251			bp->line_speed = SPEED_10;
1252		}
1253		if (bmcr & BMCR_FULLDPLX) {
1254			bp->duplex = DUPLEX_FULL;
1255		}
1256		else {
1257			bp->duplex = DUPLEX_HALF;
1258		}
1259	}
1260
1261	return 0;
1262}
1263
1264static void
1265bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1266{
1267	u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1268
1269	val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1270	val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1271	val |= 0x02 << 8;
1272
1273	if (bp->flow_ctrl & FLOW_CTRL_TX)
1274		val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1275
1276	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1277}
1278
1279static void
1280bnx2_init_all_rx_contexts(struct bnx2 *bp)
1281{
1282	int i;
1283	u32 cid;
1284
1285	for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1286		if (i == 1)
1287			cid = RX_RSS_CID;
1288		bnx2_init_rx_context(bp, cid);
1289	}
1290}
1291
1292static void
1293bnx2_set_mac_link(struct bnx2 *bp)
1294{
1295	u32 val;
1296
1297	REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1298	if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1299		(bp->duplex == DUPLEX_HALF)) {
1300		REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1301	}
1302
1303	/* Configure the EMAC mode register. */
1304	val = REG_RD(bp, BNX2_EMAC_MODE);
1305
1306	val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1307		BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1308		BNX2_EMAC_MODE_25G_MODE);
1309
1310	if (bp->link_up) {
1311		switch (bp->line_speed) {
1312			case SPEED_10:
1313				if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1314					val |= BNX2_EMAC_MODE_PORT_MII_10M;
1315					break;
1316				}
1317				/* fall through */
1318			case SPEED_100:
1319				val |= BNX2_EMAC_MODE_PORT_MII;
1320				break;
1321			case SPEED_2500:
1322				val |= BNX2_EMAC_MODE_25G_MODE;
1323				/* fall through */
1324			case SPEED_1000:
1325				val |= BNX2_EMAC_MODE_PORT_GMII;
1326				break;
1327		}
1328	}
1329	else {
1330		val |= BNX2_EMAC_MODE_PORT_GMII;
1331	}
1332
1333	/* Set the MAC to operate in the appropriate duplex mode. */
1334	if (bp->duplex == DUPLEX_HALF)
1335		val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1336	REG_WR(bp, BNX2_EMAC_MODE, val);
1337
1338	/* Enable/disable rx PAUSE. */
1339	bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1340
1341	if (bp->flow_ctrl & FLOW_CTRL_RX)
1342		bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1343	REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1344
1345	/* Enable/disable tx PAUSE. */
1346	val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1347	val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1348
1349	if (bp->flow_ctrl & FLOW_CTRL_TX)
1350		val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1351	REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1352
1353	/* Acknowledge the interrupt. */
1354	REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1355
1356	bnx2_init_all_rx_contexts(bp);
1357}
1358
1359static void
1360bnx2_enable_bmsr1(struct bnx2 *bp)
1361{
1362	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1363	    (CHIP_NUM(bp) == CHIP_NUM_5709))
1364		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1365			       MII_BNX2_BLK_ADDR_GP_STATUS);
1366}
1367
1368static void
1369bnx2_disable_bmsr1(struct bnx2 *bp)
1370{
1371	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1372	    (CHIP_NUM(bp) == CHIP_NUM_5709))
1373		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1374			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1375}
1376
1377static int
1378bnx2_test_and_enable_2g5(struct bnx2 *bp)
1379{
1380	u32 up1;
1381	int ret = 1;
1382
1383	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1384		return 0;
1385
1386	if (bp->autoneg & AUTONEG_SPEED)
1387		bp->advertising |= ADVERTISED_2500baseX_Full;
1388
1389	if (CHIP_NUM(bp) == CHIP_NUM_5709)
1390		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1391
1392	bnx2_read_phy(bp, bp->mii_up1, &up1);
1393	if (!(up1 & BCM5708S_UP1_2G5)) {
1394		up1 |= BCM5708S_UP1_2G5;
1395		bnx2_write_phy(bp, bp->mii_up1, up1);
1396		ret = 0;
1397	}
1398
1399	if (CHIP_NUM(bp) == CHIP_NUM_5709)
1400		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1401			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1402
1403	return ret;
1404}
1405
1406static int
1407bnx2_test_and_disable_2g5(struct bnx2 *bp)
1408{
1409	u32 up1;
1410	int ret = 0;
1411
1412	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1413		return 0;
1414
1415	if (CHIP_NUM(bp) == CHIP_NUM_5709)
1416		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1417
1418	bnx2_read_phy(bp, bp->mii_up1, &up1);
1419	if (up1 & BCM5708S_UP1_2G5) {
1420		up1 &= ~BCM5708S_UP1_2G5;
1421		bnx2_write_phy(bp, bp->mii_up1, up1);
1422		ret = 1;
1423	}
1424
1425	if (CHIP_NUM(bp) == CHIP_NUM_5709)
1426		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1427			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1428
1429	return ret;
1430}
1431
1432static void
1433bnx2_enable_forced_2g5(struct bnx2 *bp)
1434{
1435	u32 uninitialized_var(bmcr);
1436	int err;
1437
1438	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1439		return;
1440
1441	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1442		u32 val;
1443
1444		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1445			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1446		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1447			val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1448			val |= MII_BNX2_SD_MISC1_FORCE |
1449				MII_BNX2_SD_MISC1_FORCE_2_5G;
1450			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1451		}
1452
1453		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1454			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1455		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1456
1457	} else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1458		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1459		if (!err)
1460			bmcr |= BCM5708S_BMCR_FORCE_2500;
1461	} else {
1462		return;
1463	}
1464
1465	if (err)
1466		return;
1467
1468	if (bp->autoneg & AUTONEG_SPEED) {
1469		bmcr &= ~BMCR_ANENABLE;
1470		if (bp->req_duplex == DUPLEX_FULL)
1471			bmcr |= BMCR_FULLDPLX;
1472	}
1473	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1474}
1475
1476static void
1477bnx2_disable_forced_2g5(struct bnx2 *bp)
1478{
1479	u32 uninitialized_var(bmcr);
1480	int err;
1481
1482	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1483		return;
1484
1485	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1486		u32 val;
1487
1488		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1489			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1490		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1491			val &= ~MII_BNX2_SD_MISC1_FORCE;
1492			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1493		}
1494
1495		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1496			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1497		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1498
1499	} else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1500		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1501		if (!err)
1502			bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1503	} else {
1504		return;
1505	}
1506
1507	if (err)
1508		return;
1509
1510	if (bp->autoneg & AUTONEG_SPEED)
1511		bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1512	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1513}
1514
1515static void
1516bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1517{
1518	u32 val;
1519
1520	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1521	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1522	if (start)
1523		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1524	else
1525		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1526}
1527
1528static int
1529bnx2_set_link(struct bnx2 *bp)
1530{
1531	u32 bmsr;
1532	u8 link_up;
1533
1534	if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1535		bp->link_up = 1;
1536		return 0;
1537	}
1538
1539	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1540		return 0;
1541
1542	link_up = bp->link_up;
1543
1544	bnx2_enable_bmsr1(bp);
1545	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1546	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1547	bnx2_disable_bmsr1(bp);
1548
1549	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1550	    (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1551		u32 val, an_dbg;
1552
1553		if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1554			bnx2_5706s_force_link_dn(bp, 0);
1555			bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1556		}
1557		val = REG_RD(bp, BNX2_EMAC_STATUS);
1558
1559		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1560		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1561		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1562
1563		if ((val & BNX2_EMAC_STATUS_LINK) &&
1564		    !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1565			bmsr |= BMSR_LSTATUS;
1566		else
1567			bmsr &= ~BMSR_LSTATUS;
1568	}
1569
1570	if (bmsr & BMSR_LSTATUS) {
1571		bp->link_up = 1;
1572
1573		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1574			if (CHIP_NUM(bp) == CHIP_NUM_5706)
1575				bnx2_5706s_linkup(bp);
1576			else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1577				bnx2_5708s_linkup(bp);
1578			else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1579				bnx2_5709s_linkup(bp);
1580		}
1581		else {
1582			bnx2_copper_linkup(bp);
1583		}
1584		bnx2_resolve_flow_ctrl(bp);
1585	}
1586	else {
1587		if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1588		    (bp->autoneg & AUTONEG_SPEED))
1589			bnx2_disable_forced_2g5(bp);
1590
1591		if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1592			u32 bmcr;
1593
1594			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1595			bmcr |= BMCR_ANENABLE;
1596			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1597
1598			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1599		}
1600		bp->link_up = 0;
1601	}
1602
1603	if (bp->link_up != link_up) {
1604		bnx2_report_link(bp);
1605	}
1606
1607	bnx2_set_mac_link(bp);
1608
1609	return 0;
1610}
1611
1612static int
1613bnx2_reset_phy(struct bnx2 *bp)
1614{
1615	int i;
1616	u32 reg;
1617
1618        bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1619
1620#define PHY_RESET_MAX_WAIT 100
1621	for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1622		udelay(10);
1623
1624		bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1625		if (!(reg & BMCR_RESET)) {
1626			udelay(20);
1627			break;
1628		}
1629	}
1630	if (i == PHY_RESET_MAX_WAIT) {
1631		return -EBUSY;
1632	}
1633	return 0;
1634}
1635
1636static u32
1637bnx2_phy_get_pause_adv(struct bnx2 *bp)
1638{
1639	u32 adv = 0;
1640
1641	if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1642		(FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1643
1644		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1645			adv = ADVERTISE_1000XPAUSE;
1646		}
1647		else {
1648			adv = ADVERTISE_PAUSE_CAP;
1649		}
1650	}
1651	else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1652		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1653			adv = ADVERTISE_1000XPSE_ASYM;
1654		}
1655		else {
1656			adv = ADVERTISE_PAUSE_ASYM;
1657		}
1658	}
1659	else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1660		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1661			adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1662		}
1663		else {
1664			adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1665		}
1666	}
1667	return adv;
1668}
1669
1670static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1671
1672static int
1673bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1674__releases(&bp->phy_lock)
1675__acquires(&bp->phy_lock)
1676{
1677	u32 speed_arg = 0, pause_adv;
1678
1679	pause_adv = bnx2_phy_get_pause_adv(bp);
1680
1681	if (bp->autoneg & AUTONEG_SPEED) {
1682		speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1683		if (bp->advertising & ADVERTISED_10baseT_Half)
1684			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1685		if (bp->advertising & ADVERTISED_10baseT_Full)
1686			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1687		if (bp->advertising & ADVERTISED_100baseT_Half)
1688			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1689		if (bp->advertising & ADVERTISED_100baseT_Full)
1690			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1691		if (bp->advertising & ADVERTISED_1000baseT_Full)
1692			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1693		if (bp->advertising & ADVERTISED_2500baseX_Full)
1694			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1695	} else {
1696		if (bp->req_line_speed == SPEED_2500)
1697			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1698		else if (bp->req_line_speed == SPEED_1000)
1699			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1700		else if (bp->req_line_speed == SPEED_100) {
1701			if (bp->req_duplex == DUPLEX_FULL)
1702				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1703			else
1704				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1705		} else if (bp->req_line_speed == SPEED_10) {
1706			if (bp->req_duplex == DUPLEX_FULL)
1707				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1708			else
1709				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1710		}
1711	}
1712
1713	if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1714		speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1715	if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1716		speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1717
1718	if (port == PORT_TP)
1719		speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1720			     BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1721
1722	bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1723
1724	spin_unlock_bh(&bp->phy_lock);
1725	bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1726	spin_lock_bh(&bp->phy_lock);
1727
1728	return 0;
1729}
1730
1731static int
1732bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1733__releases(&bp->phy_lock)
1734__acquires(&bp->phy_lock)
1735{
1736	u32 adv, bmcr;
1737	u32 new_adv = 0;
1738
1739	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1740		return bnx2_setup_remote_phy(bp, port);
1741
1742	if (!(bp->autoneg & AUTONEG_SPEED)) {
1743		u32 new_bmcr;
1744		int force_link_down = 0;
1745
1746		if (bp->req_line_speed == SPEED_2500) {
1747			if (!bnx2_test_and_enable_2g5(bp))
1748				force_link_down = 1;
1749		} else if (bp->req_line_speed == SPEED_1000) {
1750			if (bnx2_test_and_disable_2g5(bp))
1751				force_link_down = 1;
1752		}
1753		bnx2_read_phy(bp, bp->mii_adv, &adv);
1754		adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1755
1756		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1757		new_bmcr = bmcr & ~BMCR_ANENABLE;
1758		new_bmcr |= BMCR_SPEED1000;
1759
1760		if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1761			if (bp->req_line_speed == SPEED_2500)
1762				bnx2_enable_forced_2g5(bp);
1763			else if (bp->req_line_speed == SPEED_1000) {
1764				bnx2_disable_forced_2g5(bp);
1765				new_bmcr &= ~0x2000;
1766			}
1767
1768		} else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1769			if (bp->req_line_speed == SPEED_2500)
1770				new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1771			else
1772				new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1773		}
1774
1775		if (bp->req_duplex == DUPLEX_FULL) {
1776			adv |= ADVERTISE_1000XFULL;
1777			new_bmcr |= BMCR_FULLDPLX;
1778		}
1779		else {
1780			adv |= ADVERTISE_1000XHALF;
1781			new_bmcr &= ~BMCR_FULLDPLX;
1782		}
1783		if ((new_bmcr != bmcr) || (force_link_down)) {
1784			/* Force a link down visible on the other side */
1785			if (bp->link_up) {
1786				bnx2_write_phy(bp, bp->mii_adv, adv &
1787					       ~(ADVERTISE_1000XFULL |
1788						 ADVERTISE_1000XHALF));
1789				bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1790					BMCR_ANRESTART | BMCR_ANENABLE);
1791
1792				bp->link_up = 0;
1793				netif_carrier_off(bp->dev);
1794				bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1795				bnx2_report_link(bp);
1796			}
1797			bnx2_write_phy(bp, bp->mii_adv, adv);
1798			bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1799		} else {
1800			bnx2_resolve_flow_ctrl(bp);
1801			bnx2_set_mac_link(bp);
1802		}
1803		return 0;
1804	}
1805
1806	bnx2_test_and_enable_2g5(bp);
1807
1808	if (bp->advertising & ADVERTISED_1000baseT_Full)
1809		new_adv |= ADVERTISE_1000XFULL;
1810
1811	new_adv |= bnx2_phy_get_pause_adv(bp);
1812
1813	bnx2_read_phy(bp, bp->mii_adv, &adv);
1814	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1815
1816	bp->serdes_an_pending = 0;
1817	if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1818		/* Force a link down visible on the other side */
1819		if (bp->link_up) {
1820			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1821			spin_unlock_bh(&bp->phy_lock);
1822			msleep(20);
1823			spin_lock_bh(&bp->phy_lock);
1824		}
1825
1826		bnx2_write_phy(bp, bp->mii_adv, new_adv);
1827		bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1828			BMCR_ANENABLE);
1829		/* Speed up link-up time when the link partner
1830		 * does not autonegotiate which is very common
1831		 * in blade servers. Some blade servers use
1832		 * IPMI for kerboard input and it's important
1833		 * to minimize link disruptions. Autoneg. involves
1834		 * exchanging base pages plus 3 next pages and
1835		 * normally completes in about 120 msec.
1836		 */
1837		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1838		bp->serdes_an_pending = 1;
1839		mod_timer(&bp->timer, jiffies + bp->current_interval);
1840	} else {
1841		bnx2_resolve_flow_ctrl(bp);
1842		bnx2_set_mac_link(bp);
1843	}
1844
1845	return 0;
1846}
1847
1848#define ETHTOOL_ALL_FIBRE_SPEED						\
1849	(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?			\
1850		(ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1851		(ADVERTISED_1000baseT_Full)
1852
1853#define ETHTOOL_ALL_COPPER_SPEED					\
1854	(ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |		\
1855	ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |		\
1856	ADVERTISED_1000baseT_Full)
1857
1858#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1859	ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1860
1861#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1862
1863static void
1864bnx2_set_default_remote_link(struct bnx2 *bp)
1865{
1866	u32 link;
1867
1868	if (bp->phy_port == PORT_TP)
1869		link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1870	else
1871		link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1872
1873	if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1874		bp->req_line_speed = 0;
1875		bp->autoneg |= AUTONEG_SPEED;
1876		bp->advertising = ADVERTISED_Autoneg;
1877		if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1878			bp->advertising |= ADVERTISED_10baseT_Half;
1879		if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1880			bp->advertising |= ADVERTISED_10baseT_Full;
1881		if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1882			bp->advertising |= ADVERTISED_100baseT_Half;
1883		if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1884			bp->advertising |= ADVERTISED_100baseT_Full;
1885		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1886			bp->advertising |= ADVERTISED_1000baseT_Full;
1887		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1888			bp->advertising |= ADVERTISED_2500baseX_Full;
1889	} else {
1890		bp->autoneg = 0;
1891		bp->advertising = 0;
1892		bp->req_duplex = DUPLEX_FULL;
1893		if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1894			bp->req_line_speed = SPEED_10;
1895			if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1896				bp->req_duplex = DUPLEX_HALF;
1897		}
1898		if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1899			bp->req_line_speed = SPEED_100;
1900			if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1901				bp->req_duplex = DUPLEX_HALF;
1902		}
1903		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1904			bp->req_line_speed = SPEED_1000;
1905		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1906			bp->req_line_speed = SPEED_2500;
1907	}
1908}
1909
1910static void
1911bnx2_set_default_link(struct bnx2 *bp)
1912{
1913	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1914		bnx2_set_default_remote_link(bp);
1915		return;
1916	}
1917
1918	bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1919	bp->req_line_speed = 0;
1920	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1921		u32 reg;
1922
1923		bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1924
1925		reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1926		reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1927		if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1928			bp->autoneg = 0;
1929			bp->req_line_speed = bp->line_speed = SPEED_1000;
1930			bp->req_duplex = DUPLEX_FULL;
1931		}
1932	} else
1933		bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1934}
1935
1936static void
1937bnx2_send_heart_beat(struct bnx2 *bp)
1938{
1939	u32 msg;
1940	u32 addr;
1941
1942	spin_lock(&bp->indirect_lock);
1943	msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1944	addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1945	REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1946	REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1947	spin_unlock(&bp->indirect_lock);
1948}
1949
1950static void
1951bnx2_remote_phy_event(struct bnx2 *bp)
1952{
1953	u32 msg;
1954	u8 link_up = bp->link_up;
1955	u8 old_port;
1956
1957	msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1958
1959	if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1960		bnx2_send_heart_beat(bp);
1961
1962	msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1963
1964	if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1965		bp->link_up = 0;
1966	else {
1967		u32 speed;
1968
1969		bp->link_up = 1;
1970		speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1971		bp->duplex = DUPLEX_FULL;
1972		switch (speed) {
1973			case BNX2_LINK_STATUS_10HALF:
1974				bp->duplex = DUPLEX_HALF;
1975			case BNX2_LINK_STATUS_10FULL:
1976				bp->line_speed = SPEED_10;
1977				break;
1978			case BNX2_LINK_STATUS_100HALF:
1979				bp->duplex = DUPLEX_HALF;
1980			case BNX2_LINK_STATUS_100BASE_T4:
1981			case BNX2_LINK_STATUS_100FULL:
1982				bp->line_speed = SPEED_100;
1983				break;
1984			case BNX2_LINK_STATUS_1000HALF:
1985				bp->duplex = DUPLEX_HALF;
1986			case BNX2_LINK_STATUS_1000FULL:
1987				bp->line_speed = SPEED_1000;
1988				break;
1989			case BNX2_LINK_STATUS_2500HALF:
1990				bp->duplex = DUPLEX_HALF;
1991			case BNX2_LINK_STATUS_2500FULL:
1992				bp->line_speed = SPEED_2500;
1993				break;
1994			default:
1995				bp->line_speed = 0;
1996				break;
1997		}
1998
1999		bp->flow_ctrl = 0;
2000		if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2001		    (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2002			if (bp->duplex == DUPLEX_FULL)
2003				bp->flow_ctrl = bp->req_flow_ctrl;
2004		} else {
2005			if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2006				bp->flow_ctrl |= FLOW_CTRL_TX;
2007			if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2008				bp->flow_ctrl |= FLOW_CTRL_RX;
2009		}
2010
2011		old_port = bp->phy_port;
2012		if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2013			bp->phy_port = PORT_FIBRE;
2014		else
2015			bp->phy_port = PORT_TP;
2016
2017		if (old_port != bp->phy_port)
2018			bnx2_set_default_link(bp);
2019
2020	}
2021	if (bp->link_up != link_up)
2022		bnx2_report_link(bp);
2023
2024	bnx2_set_mac_link(bp);
2025}
2026
2027static int
2028bnx2_set_remote_link(struct bnx2 *bp)
2029{
2030	u32 evt_code;
2031
2032	evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2033	switch (evt_code) {
2034		case BNX2_FW_EVT_CODE_LINK_EVENT:
2035			bnx2_remote_phy_event(bp);
2036			break;
2037		case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2038		default:
2039			bnx2_send_heart_beat(bp);
2040			break;
2041	}
2042	return 0;
2043}
2044
2045static int
2046bnx2_setup_copper_phy(struct bnx2 *bp)
2047__releases(&bp->phy_lock)
2048__acquires(&bp->phy_lock)
2049{
2050	u32 bmcr;
2051	u32 new_bmcr;
2052
2053	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2054
2055	if (bp->autoneg & AUTONEG_SPEED) {
2056		u32 adv_reg, adv1000_reg;
2057		u32 new_adv = 0;
2058		u32 new_adv1000 = 0;
2059
2060		bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2061		adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2062			ADVERTISE_PAUSE_ASYM);
2063
2064		bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2065		adv1000_reg &= PHY_ALL_1000_SPEED;
2066
2067		new_adv = ethtool_adv_to_mii_adv_t(bp->advertising);
2068		new_adv |= ADVERTISE_CSMA;
2069		new_adv |= bnx2_phy_get_pause_adv(bp);
2070
2071		new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2072
2073		if ((adv1000_reg != new_adv1000) ||
2074			(adv_reg != new_adv) ||
2075			((bmcr & BMCR_ANENABLE) == 0)) {
2076
2077			bnx2_write_phy(bp, bp->mii_adv, new_adv);
2078			bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2079			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2080				BMCR_ANENABLE);
2081		}
2082		else if (bp->link_up) {
2083			/* Flow ctrl may have changed from auto to forced */
2084			/* or vice-versa. */
2085
2086			bnx2_resolve_flow_ctrl(bp);
2087			bnx2_set_mac_link(bp);
2088		}
2089		return 0;
2090	}
2091
2092	new_bmcr = 0;
2093	if (bp->req_line_speed == SPEED_100) {
2094		new_bmcr |= BMCR_SPEED100;
2095	}
2096	if (bp->req_duplex == DUPLEX_FULL) {
2097		new_bmcr |= BMCR_FULLDPLX;
2098	}
2099	if (new_bmcr != bmcr) {
2100		u32 bmsr;
2101
2102		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2103		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2104
2105		if (bmsr & BMSR_LSTATUS) {
2106			/* Force link down */
2107			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2108			spin_unlock_bh(&bp->phy_lock);
2109			msleep(50);
2110			spin_lock_bh(&bp->phy_lock);
2111
2112			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2113			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2114		}
2115
2116		bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2117
2118		/* Normally, the new speed is setup after the link has
2119		 * gone down and up again. In some cases, link will not go
2120		 * down so we need to set up the new speed here.
2121		 */
2122		if (bmsr & BMSR_LSTATUS) {
2123			bp->line_speed = bp->req_line_speed;
2124			bp->duplex = bp->req_duplex;
2125			bnx2_resolve_flow_ctrl(bp);
2126			bnx2_set_mac_link(bp);
2127		}
2128	} else {
2129		bnx2_resolve_flow_ctrl(bp);
2130		bnx2_set_mac_link(bp);
2131	}
2132	return 0;
2133}
2134
2135static int
2136bnx2_setup_phy(struct bnx2 *bp, u8 port)
2137__releases(&bp->phy_lock)
2138__acquires(&bp->phy_lock)
2139{
2140	if (bp->loopback == MAC_LOOPBACK)
2141		return 0;
2142
2143	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2144		return bnx2_setup_serdes_phy(bp, port);
2145	}
2146	else {
2147		return bnx2_setup_copper_phy(bp);
2148	}
2149}
2150
2151static int
2152bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2153{
2154	u32 val;
2155
2156	bp->mii_bmcr = MII_BMCR + 0x10;
2157	bp->mii_bmsr = MII_BMSR + 0x10;
2158	bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2159	bp->mii_adv = MII_ADVERTISE + 0x10;
2160	bp->mii_lpa = MII_LPA + 0x10;
2161	bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2162
2163	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2164	bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2165
2166	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2167	if (reset_phy)
2168		bnx2_reset_phy(bp);
2169
2170	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2171
2172	bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2173	val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2174	val |= MII_BNX2_SD_1000XCTL1_FIBER;
2175	bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2176
2177	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2178	bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2179	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2180		val |= BCM5708S_UP1_2G5;
2181	else
2182		val &= ~BCM5708S_UP1_2G5;
2183	bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2184
2185	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2186	bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2187	val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2188	bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2189
2190	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2191
2192	val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2193	      MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2194	bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2195
2196	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2197
2198	return 0;
2199}
2200
2201static int
2202bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2203{
2204	u32 val;
2205
2206	if (reset_phy)
2207		bnx2_reset_phy(bp);
2208
2209	bp->mii_up1 = BCM5708S_UP1;
2210
2211	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2212	bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2213	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2214
2215	bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2216	val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2217	bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2218
2219	bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2220	val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2221	bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2222
2223	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2224		bnx2_read_phy(bp, BCM5708S_UP1, &val);
2225		val |= BCM5708S_UP1_2G5;
2226		bnx2_write_phy(bp, BCM5708S_UP1, val);
2227	}
2228
2229	if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2230	    (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2231	    (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2232		/* increase tx signal amplitude */
2233		bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2234			       BCM5708S_BLK_ADDR_TX_MISC);
2235		bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2236		val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2237		bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2238		bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2239	}
2240
2241	val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2242	      BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2243
2244	if (val) {
2245		u32 is_backplane;
2246
2247		is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2248		if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2249			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2250				       BCM5708S_BLK_ADDR_TX_MISC);
2251			bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2252			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2253				       BCM5708S_BLK_ADDR_DIG);
2254		}
2255	}
2256	return 0;
2257}
2258
2259static int
2260bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2261{
2262	if (reset_phy)
2263		bnx2_reset_phy(bp);
2264
2265	bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2266
2267	if (CHIP_NUM(bp) == CHIP_NUM_5706)
2268        	REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2269
2270	if (bp->dev->mtu > 1500) {
2271		u32 val;
2272
2273		/* Set extended packet length bit */
2274		bnx2_write_phy(bp, 0x18, 0x7);
2275		bnx2_read_phy(bp, 0x18, &val);
2276		bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2277
2278		bnx2_write_phy(bp, 0x1c, 0x6c00);
2279		bnx2_read_phy(bp, 0x1c, &val);
2280		bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2281	}
2282	else {
2283		u32 val;
2284
2285		bnx2_write_phy(bp, 0x18, 0x7);
2286		bnx2_read_phy(bp, 0x18, &val);
2287		bnx2_write_phy(bp, 0x18, val & ~0x4007);
2288
2289		bnx2_write_phy(bp, 0x1c, 0x6c00);
2290		bnx2_read_phy(bp, 0x1c, &val);
2291		bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2292	}
2293
2294	return 0;
2295}
2296
2297static int
2298bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2299{
2300	u32 val;
2301
2302	if (reset_phy)
2303		bnx2_reset_phy(bp);
2304
2305	if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2306		bnx2_write_phy(bp, 0x18, 0x0c00);
2307		bnx2_write_phy(bp, 0x17, 0x000a);
2308		bnx2_write_phy(bp, 0x15, 0x310b);
2309		bnx2_write_phy(bp, 0x17, 0x201f);
2310		bnx2_write_phy(bp, 0x15, 0x9506);
2311		bnx2_write_phy(bp, 0x17, 0x401f);
2312		bnx2_write_phy(bp, 0x15, 0x14e2);
2313		bnx2_write_phy(bp, 0x18, 0x0400);
2314	}
2315
2316	if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2317		bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2318			       MII_BNX2_DSP_EXPAND_REG | 0x8);
2319		bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2320		val &= ~(1 << 8);
2321		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2322	}
2323
2324	if (bp->dev->mtu > 1500) {
2325		/* Set extended packet length bit */
2326		bnx2_write_phy(bp, 0x18, 0x7);
2327		bnx2_read_phy(bp, 0x18, &val);
2328		bnx2_write_phy(bp, 0x18, val | 0x4000);
2329
2330		bnx2_read_phy(bp, 0x10, &val);
2331		bnx2_write_phy(bp, 0x10, val | 0x1);
2332	}
2333	else {
2334		bnx2_write_phy(bp, 0x18, 0x7);
2335		bnx2_read_phy(bp, 0x18, &val);
2336		bnx2_write_phy(bp, 0x18, val & ~0x4007);
2337
2338		bnx2_read_phy(bp, 0x10, &val);
2339		bnx2_write_phy(bp, 0x10, val & ~0x1);
2340	}
2341
2342	/* ethernet@wirespeed */
2343	bnx2_write_phy(bp, 0x18, 0x7007);
2344	bnx2_read_phy(bp, 0x18, &val);
2345	bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2346	return 0;
2347}
2348
2349
2350static int
2351bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2352__releases(&bp->phy_lock)
2353__acquires(&bp->phy_lock)
2354{
2355	u32 val;
2356	int rc = 0;
2357
2358	bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2359	bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2360
2361	bp->mii_bmcr = MII_BMCR;
2362	bp->mii_bmsr = MII_BMSR;
2363	bp->mii_bmsr1 = MII_BMSR;
2364	bp->mii_adv = MII_ADVERTISE;
2365	bp->mii_lpa = MII_LPA;
2366
2367        REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2368
2369	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2370		goto setup_phy;
2371
2372	bnx2_read_phy(bp, MII_PHYSID1, &val);
2373	bp->phy_id = val << 16;
2374	bnx2_read_phy(bp, MII_PHYSID2, &val);
2375	bp->phy_id |= val & 0xffff;
2376
2377	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2378		if (CHIP_NUM(bp) == CHIP_NUM_5706)
2379			rc = bnx2_init_5706s_phy(bp, reset_phy);
2380		else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2381			rc = bnx2_init_5708s_phy(bp, reset_phy);
2382		else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2383			rc = bnx2_init_5709s_phy(bp, reset_phy);
2384	}
2385	else {
2386		rc = bnx2_init_copper_phy(bp, reset_phy);
2387	}
2388
2389setup_phy:
2390	if (!rc)
2391		rc = bnx2_setup_phy(bp, bp->phy_port);
2392
2393	return rc;
2394}
2395
2396static int
2397bnx2_set_mac_loopback(struct bnx2 *bp)
2398{
2399	u32 mac_mode;
2400
2401	mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2402	mac_mode &= ~BNX2_EMAC_MODE_PORT;
2403	mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2404	REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2405	bp->link_up = 1;
2406	return 0;
2407}
2408
2409static int bnx2_test_link(struct bnx2 *);
2410
2411static int
2412bnx2_set_phy_loopback(struct bnx2 *bp)
2413{
2414	u32 mac_mode;
2415	int rc, i;
2416
2417	spin_lock_bh(&bp->phy_lock);
2418	rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2419			    BMCR_SPEED1000);
2420	spin_unlock_bh(&bp->phy_lock);
2421	if (rc)
2422		return rc;
2423
2424	for (i = 0; i < 10; i++) {
2425		if (bnx2_test_link(bp) == 0)
2426			break;
2427		msleep(100);
2428	}
2429
2430	mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2431	mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2432		      BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2433		      BNX2_EMAC_MODE_25G_MODE);
2434
2435	mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2436	REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2437	bp->link_up = 1;
2438	return 0;
2439}
2440
2441static void
2442bnx2_dump_mcp_state(struct bnx2 *bp)
2443{
2444	struct net_device *dev = bp->dev;
2445	u32 mcp_p0, mcp_p1;
2446
2447	netdev_err(dev, "<--- start MCP states dump --->\n");
2448	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2449		mcp_p0 = BNX2_MCP_STATE_P0;
2450		mcp_p1 = BNX2_MCP_STATE_P1;
2451	} else {
2452		mcp_p0 = BNX2_MCP_STATE_P0_5708;
2453		mcp_p1 = BNX2_MCP_STATE_P1_5708;
2454	}
2455	netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2456		   bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2457	netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2458		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2459		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2460		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2461	netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2462		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2463		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2464		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2465	netdev_err(dev, "DEBUG: shmem states:\n");
2466	netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2467		   bnx2_shmem_rd(bp, BNX2_DRV_MB),
2468		   bnx2_shmem_rd(bp, BNX2_FW_MB),
2469		   bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2470	pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2471	netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2472		   bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2473		   bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2474	pr_cont(" condition[%08x]\n",
2475		bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2476	DP_SHMEM_LINE(bp, 0x3cc);
2477	DP_SHMEM_LINE(bp, 0x3dc);
2478	DP_SHMEM_LINE(bp, 0x3ec);
2479	netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2480	netdev_err(dev, "<--- end MCP states dump --->\n");
2481}
2482
2483static int
2484bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2485{
2486	int i;
2487	u32 val;
2488
2489	bp->fw_wr_seq++;
2490	msg_data |= bp->fw_wr_seq;
2491
2492	bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2493
2494	if (!ack)
2495		return 0;
2496
2497	/* wait for an acknowledgement. */
2498	for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2499		msleep(10);
2500
2501		val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2502
2503		if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2504			break;
2505	}
2506	if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2507		return 0;
2508
2509	/* If we timed out, inform the firmware that this is the case. */
2510	if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2511		msg_data &= ~BNX2_DRV_MSG_CODE;
2512		msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2513
2514		bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2515		if (!silent) {
2516			pr_err("fw sync timeout, reset code = %x\n", msg_data);
2517			bnx2_dump_mcp_state(bp);
2518		}
2519
2520		return -EBUSY;
2521	}
2522
2523	if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2524		return -EIO;
2525
2526	return 0;
2527}
2528
2529static int
2530bnx2_init_5709_context(struct bnx2 *bp)
2531{
2532	int i, ret = 0;
2533	u32 val;
2534
2535	val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2536	val |= (BCM_PAGE_BITS - 8) << 16;
2537	REG_WR(bp, BNX2_CTX_COMMAND, val);
2538	for (i = 0; i < 10; i++) {
2539		val = REG_RD(bp, BNX2_CTX_COMMAND);
2540		if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2541			break;
2542		udelay(2);
2543	}
2544	if (val & BNX2_CTX_COMMAND_MEM_INIT)
2545		return -EBUSY;
2546
2547	for (i = 0; i < bp->ctx_pages; i++) {
2548		int j;
2549
2550		if (bp->ctx_blk[i])
2551			memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2552		else
2553			return -ENOMEM;
2554
2555		REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2556		       (bp->ctx_blk_mapping[i] & 0xffffffff) |
2557		       BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2558		REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2559		       (u64) bp->ctx_blk_mapping[i] >> 32);
2560		REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2561		       BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2562		for (j = 0; j < 10; j++) {
2563
2564			val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2565			if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2566				break;
2567			udelay(5);
2568		}
2569		if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2570			ret = -EBUSY;
2571			break;
2572		}
2573	}
2574	return ret;
2575}
2576
2577static void
2578bnx2_init_context(struct bnx2 *bp)
2579{
2580	u32 vcid;
2581
2582	vcid = 96;
2583	while (vcid) {
2584		u32 vcid_addr, pcid_addr, offset;
2585		int i;
2586
2587		vcid--;
2588
2589		if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2590			u32 new_vcid;
2591
2592			vcid_addr = GET_PCID_ADDR(vcid);
2593			if (vcid & 0x8) {
2594				new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2595			}
2596			else {
2597				new_vcid = vcid;
2598			}
2599			pcid_addr = GET_PCID_ADDR(new_vcid);
2600		}
2601		else {
2602	    		vcid_addr = GET_CID_ADDR(vcid);
2603			pcid_addr = vcid_addr;
2604		}
2605
2606		for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2607			vcid_addr += (i << PHY_CTX_SHIFT);
2608			pcid_addr += (i << PHY_CTX_SHIFT);
2609
2610			REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2611			REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2612
2613			/* Zero out the context. */
2614			for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2615				bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2616		}
2617	}
2618}
2619
2620static int
2621bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2622{
2623	u16 *good_mbuf;
2624	u32 good_mbuf_cnt;
2625	u32 val;
2626
2627	good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2628	if (good_mbuf == NULL)
2629		return -ENOMEM;
2630
2631	REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2632		BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2633
2634	good_mbuf_cnt = 0;
2635
2636	/* Allocate a bunch of mbufs and save the good ones in an array. */
2637	val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2638	while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2639		bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2640				BNX2_RBUF_COMMAND_ALLOC_REQ);
2641
2642		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2643
2644		val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2645
2646		/* The addresses with Bit 9 set are bad memory blocks. */
2647		if (!(val & (1 << 9))) {
2648			good_mbuf[good_mbuf_cnt] = (u16) val;
2649			good_mbuf_cnt++;
2650		}
2651
2652		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2653	}
2654
2655	/* Free the good ones back to the mbuf pool thus discarding
2656	 * all the bad ones. */
2657	while (good_mbuf_cnt) {
2658		good_mbuf_cnt--;
2659
2660		val = good_mbuf[good_mbuf_cnt];
2661		val = (val << 9) | val | 1;
2662
2663		bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2664	}
2665	kfree(good_mbuf);
2666	return 0;
2667}
2668
2669static void
2670bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2671{
2672	u32 val;
2673
2674	val = (mac_addr[0] << 8) | mac_addr[1];
2675
2676	REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2677
2678	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2679		(mac_addr[4] << 8) | mac_addr[5];
2680
2681	REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2682}
2683
2684static inline int
2685bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2686{
2687	dma_addr_t mapping;
2688	struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2689	struct rx_bd *rxbd =
2690		&rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2691	struct page *page = alloc_page(gfp);
2692
2693	if (!page)
2694		return -ENOMEM;
2695	mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2696			       PCI_DMA_FROMDEVICE);
2697	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2698		__free_page(page);
2699		return -EIO;
2700	}
2701
2702	rx_pg->page = page;
2703	dma_unmap_addr_set(rx_pg, mapping, mapping);
2704	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2705	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2706	return 0;
2707}
2708
2709static void
2710bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2711{
2712	struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2713	struct page *page = rx_pg->page;
2714
2715	if (!page)
2716		return;
2717
2718	dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2719		       PAGE_SIZE, PCI_DMA_FROMDEVICE);
2720
2721	__free_page(page);
2722	rx_pg->page = NULL;
2723}
2724
2725static inline int
2726bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2727{
2728	u8 *data;
2729	struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2730	dma_addr_t mapping;
2731	struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2732
2733	data = kmalloc(bp->rx_buf_size, gfp);
2734	if (!data)
2735		return -ENOMEM;
2736
2737	mapping = dma_map_single(&bp->pdev->dev,
2738				 get_l2_fhdr(data),
2739				 bp->rx_buf_use_size,
2740				 PCI_DMA_FROMDEVICE);
2741	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2742		kfree(data);
2743		return -EIO;
2744	}
2745
2746	rx_buf->data = data;
2747	dma_unmap_addr_set(rx_buf, mapping, mapping);
2748
2749	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2750	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2751
2752	rxr->rx_prod_bseq += bp->rx_buf_use_size;
2753
2754	return 0;
2755}
2756
2757static int
2758bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2759{
2760	struct status_block *sblk = bnapi->status_blk.msi;
2761	u32 new_link_state, old_link_state;
2762	int is_set = 1;
2763
2764	new_link_state = sblk->status_attn_bits & event;
2765	old_link_state = sblk->status_attn_bits_ack & event;
2766	if (new_link_state != old_link_state) {
2767		if (new_link_state)
2768			REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2769		else
2770			REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2771	} else
2772		is_set = 0;
2773
2774	return is_set;
2775}
2776
2777static void
2778bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2779{
2780	spin_lock(&bp->phy_lock);
2781
2782	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2783		bnx2_set_link(bp);
2784	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2785		bnx2_set_remote_link(bp);
2786
2787	spin_unlock(&bp->phy_lock);
2788
2789}
2790
2791static inline u16
2792bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2793{
2794	u16 cons;
2795
2796	/* Tell compiler that status block fields can change. */
2797	barrier();
2798	cons = *bnapi->hw_tx_cons_ptr;
2799	barrier();
2800	if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2801		cons++;
2802	return cons;
2803}
2804
2805static int
2806bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2807{
2808	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2809	u16 hw_cons, sw_cons, sw_ring_cons;
2810	int tx_pkt = 0, index;
2811	unsigned int tx_bytes = 0;
2812	struct netdev_queue *txq;
2813
2814	index = (bnapi - bp->bnx2_napi);
2815	txq = netdev_get_tx_queue(bp->dev, index);
2816
2817	hw_cons = bnx2_get_hw_tx_cons(bnapi);
2818	sw_cons = txr->tx_cons;
2819
2820	while (sw_cons != hw_cons) {
2821		struct sw_tx_bd *tx_buf;
2822		struct sk_buff *skb;
2823		int i, last;
2824
2825		sw_ring_cons = TX_RING_IDX(sw_cons);
2826
2827		tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2828		skb = tx_buf->skb;
2829
2830		/* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2831		prefetch(&skb->end);
2832
2833		/* partial BD completions possible with TSO packets */
2834		if (tx_buf->is_gso) {
2835			u16 last_idx, last_ring_idx;
2836
2837			last_idx = sw_cons + tx_buf->nr_frags + 1;
2838			last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2839			if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2840				last_idx++;
2841			}
2842			if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2843				break;
2844			}
2845		}
2846
2847		dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2848			skb_headlen(skb), PCI_DMA_TODEVICE);
2849
2850		tx_buf->skb = NULL;
2851		last = tx_buf->nr_frags;
2852
2853		for (i = 0; i < last; i++) {
2854			sw_cons = NEXT_TX_BD(sw_cons);
2855
2856			dma_unmap_page(&bp->pdev->dev,
2857				dma_unmap_addr(
2858					&txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2859					mapping),
2860				skb_frag_size(&skb_shinfo(skb)->frags[i]),
2861				PCI_DMA_TODEVICE);
2862		}
2863
2864		sw_cons = NEXT_TX_BD(sw_cons);
2865
2866		tx_bytes += skb->len;
2867		dev_kfree_skb(skb);
2868		tx_pkt++;
2869		if (tx_pkt == budget)
2870			break;
2871
2872		if (hw_cons == sw_cons)
2873			hw_cons = bnx2_get_hw_tx_cons(bnapi);
2874	}
2875
2876	netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
2877	txr->hw_tx_cons = hw_cons;
2878	txr->tx_cons = sw_cons;
2879
2880	/* Need to make the tx_cons update visible to bnx2_start_xmit()
2881	 * before checking for netif_tx_queue_stopped().  Without the
2882	 * memory barrier, there is a small possibility that bnx2_start_xmit()
2883	 * will miss it and cause the queue to be stopped forever.
2884	 */
2885	smp_mb();
2886
2887	if (unlikely(netif_tx_queue_stopped(txq)) &&
2888		     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2889		__netif_tx_lock(txq, smp_processor_id());
2890		if ((netif_tx_queue_stopped(txq)) &&
2891		    (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2892			netif_tx_wake_queue(txq);
2893		__netif_tx_unlock(txq);
2894	}
2895
2896	return tx_pkt;
2897}
2898
2899static void
2900bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2901			struct sk_buff *skb, int count)
2902{
2903	struct sw_pg *cons_rx_pg, *prod_rx_pg;
2904	struct rx_bd *cons_bd, *prod_bd;
2905	int i;
2906	u16 hw_prod, prod;
2907	u16 cons = rxr->rx_pg_cons;
2908
2909	cons_rx_pg = &rxr->rx_pg_ring[cons];
2910
2911	/* The caller was unable to allocate a new page to replace the
2912	 * last one in the frags array, so we need to recycle that page
2913	 * and then free the skb.
2914	 */
2915	if (skb) {
2916		struct page *page;
2917		struct skb_shared_info *shinfo;
2918
2919		shinfo = skb_shinfo(skb);
2920		shinfo->nr_frags--;
2921		page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
2922		__skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
2923
2924		cons_rx_pg->page = page;
2925		dev_kfree_skb(skb);
2926	}
2927
2928	hw_prod = rxr->rx_pg_prod;
2929
2930	for (i = 0; i < count; i++) {
2931		prod = RX_PG_RING_IDX(hw_prod);
2932
2933		prod_rx_pg = &rxr->rx_pg_ring[prod];
2934		cons_rx_pg = &rxr->rx_pg_ring[cons];
2935		cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2936		prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2937
2938		if (prod != cons) {
2939			prod_rx_pg->page = cons_rx_pg->page;
2940			cons_rx_pg->page = NULL;
2941			dma_unmap_addr_set(prod_rx_pg, mapping,
2942				dma_unmap_addr(cons_rx_pg, mapping));
2943
2944			prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2945			prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2946
2947		}
2948		cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2949		hw_prod = NEXT_RX_BD(hw_prod);
2950	}
2951	rxr->rx_pg_prod = hw_prod;
2952	rxr->rx_pg_cons = cons;
2953}
2954
2955static inline void
2956bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2957		   u8 *data, u16 cons, u16 prod)
2958{
2959	struct sw_bd *cons_rx_buf, *prod_rx_buf;
2960	struct rx_bd *cons_bd, *prod_bd;
2961
2962	cons_rx_buf = &rxr->rx_buf_ring[cons];
2963	prod_rx_buf = &rxr->rx_buf_ring[prod];
2964
2965	dma_sync_single_for_device(&bp->pdev->dev,
2966		dma_unmap_addr(cons_rx_buf, mapping),
2967		BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2968
2969	rxr->rx_prod_bseq += bp->rx_buf_use_size;
2970
2971	prod_rx_buf->data = data;
2972
2973	if (cons == prod)
2974		return;
2975
2976	dma_unmap_addr_set(prod_rx_buf, mapping,
2977			dma_unmap_addr(cons_rx_buf, mapping));
2978
2979	cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2980	prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2981	prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2982	prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2983}
2984
2985static struct sk_buff *
2986bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
2987	    unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2988	    u32 ring_idx)
2989{
2990	int err;
2991	u16 prod = ring_idx & 0xffff;
2992	struct sk_buff *skb;
2993
2994	err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
2995	if (unlikely(err)) {
2996		bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
2997error:
2998		if (hdr_len) {
2999			unsigned int raw_len = len + 4;
3000			int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3001
3002			bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3003		}
3004		return NULL;
3005	}
3006
3007	dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3008			 PCI_DMA_FROMDEVICE);
3009	skb = build_skb(data);
3010	if (!skb) {
3011		kfree(data);
3012		goto error;
3013	}
3014	skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
3015	if (hdr_len == 0) {
3016		skb_put(skb, len);
3017		return skb;
3018	} else {
3019		unsigned int i, frag_len, frag_size, pages;
3020		struct sw_pg *rx_pg;
3021		u16 pg_cons = rxr->rx_pg_cons;
3022		u16 pg_prod = rxr->rx_pg_prod;
3023
3024		frag_size = len + 4 - hdr_len;
3025		pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3026		skb_put(skb, hdr_len);
3027
3028		for (i = 0; i < pages; i++) {
3029			dma_addr_t mapping_old;
3030
3031			frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3032			if (unlikely(frag_len <= 4)) {
3033				unsigned int tail = 4 - frag_len;
3034
3035				rxr->rx_pg_cons = pg_cons;
3036				rxr->rx_pg_prod = pg_prod;
3037				bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3038							pages - i);
3039				skb->len -= tail;
3040				if (i == 0) {
3041					skb->tail -= tail;
3042				} else {
3043					skb_frag_t *frag =
3044						&skb_shinfo(skb)->frags[i - 1];
3045					skb_frag_size_sub(frag, tail);
3046					skb->data_len -= tail;
3047				}
3048				return skb;
3049			}
3050			rx_pg = &rxr->rx_pg_ring[pg_cons];
3051
3052			/* Don't unmap yet.  If we're unable to allocate a new
3053			 * page, we need to recycle the page and the DMA addr.
3054			 */
3055			mapping_old = dma_unmap_addr(rx_pg, mapping);
3056			if (i == pages - 1)
3057				frag_len -= 4;
3058
3059			skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3060			rx_pg->page = NULL;
3061
3062			err = bnx2_alloc_rx_page(bp, rxr,
3063						 RX_PG_RING_IDX(pg_prod),
3064						 GFP_ATOMIC);
3065			if (unlikely(err)) {
3066				rxr->rx_pg_cons = pg_cons;
3067				rxr->rx_pg_prod = pg_prod;
3068				bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3069							pages - i);
3070				return NULL;
3071			}
3072
3073			dma_unmap_page(&bp->pdev->dev, mapping_old,
3074				       PAGE_SIZE, PCI_DMA_FROMDEVICE);
3075
3076			frag_size -= frag_len;
3077			skb->data_len += frag_len;
3078			skb->truesize += PAGE_SIZE;
3079			skb->len += frag_len;
3080
3081			pg_prod = NEXT_RX_BD(pg_prod);
3082			pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3083		}
3084		rxr->rx_pg_prod = pg_prod;
3085		rxr->rx_pg_cons = pg_cons;
3086	}
3087	return skb;
3088}
3089
3090static inline u16
3091bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3092{
3093	u16 cons;
3094
3095	/* Tell compiler that status block fields can change. */
3096	barrier();
3097	cons = *bnapi->hw_rx_cons_ptr;
3098	barrier();
3099	if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3100		cons++;
3101	return cons;
3102}
3103
3104static int
3105bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3106{
3107	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3108	u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3109	struct l2_fhdr *rx_hdr;
3110	int rx_pkt = 0, pg_ring_used = 0;
3111
3112	hw_cons = bnx2_get_hw_rx_cons(bnapi);
3113	sw_cons = rxr->rx_cons;
3114	sw_prod = rxr->rx_prod;
3115
3116	/* Memory barrier necessary as speculative reads of the rx
3117	 * buffer can be ahead of the index in the status block
3118	 */
3119	rmb();
3120	while (sw_cons != hw_cons) {
3121		unsigned int len, hdr_len;
3122		u32 status;
3123		struct sw_bd *rx_buf, *next_rx_buf;
3124		struct sk_buff *skb;
3125		dma_addr_t dma_addr;
3126		u8 *data;
3127
3128		sw_ring_cons = RX_RING_IDX(sw_cons);
3129		sw_ring_prod = RX_RING_IDX(sw_prod);
3130
3131		rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3132		data = rx_buf->data;
3133		rx_buf->data = NULL;
3134
3135		rx_hdr = get_l2_fhdr(data);
3136		prefetch(rx_hdr);
3137
3138		dma_addr = dma_unmap_addr(rx_buf, mapping);
3139
3140		dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3141			BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3142			PCI_DMA_FROMDEVICE);
3143
3144		next_rx_buf =
3145			&rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
3146		prefetch(get_l2_fhdr(next_rx_buf->data));
3147
3148		len = rx_hdr->l2_fhdr_pkt_len;
3149		status = rx_hdr->l2_fhdr_status;
3150
3151		hdr_len = 0;
3152		if (status & L2_FHDR_STATUS_SPLIT) {
3153			hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3154			pg_ring_used = 1;
3155		} else if (len > bp->rx_jumbo_thresh) {
3156			hdr_len = bp->rx_jumbo_thresh;
3157			pg_ring_used = 1;
3158		}
3159
3160		if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3161				       L2_FHDR_ERRORS_PHY_DECODE |
3162				       L2_FHDR_ERRORS_ALIGNMENT |
3163				       L2_FHDR_ERRORS_TOO_SHORT |
3164				       L2_FHDR_ERRORS_GIANT_FRAME))) {
3165
3166			bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3167					  sw_ring_prod);
3168			if (pg_ring_used) {
3169				int pages;
3170
3171				pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3172
3173				bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3174			}
3175			goto next_rx;
3176		}
3177
3178		len -= 4;
3179
3180		if (len <= bp->rx_copy_thresh) {
3181			skb = netdev_alloc_skb(bp->dev, len + 6);
3182			if (skb == NULL) {
3183				bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3184						  sw_ring_prod);
3185				goto next_rx;
3186			}
3187
3188			/* aligned copy */
3189			memcpy(skb->data,
3190			       (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
3191			       len + 6);
3192			skb_reserve(skb, 6);
3193			skb_put(skb, len);
3194
3195			bnx2_reuse_rx_data(bp, rxr, data,
3196				sw_ring_cons, sw_ring_prod);
3197
3198		} else {
3199			skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
3200					  (sw_ring_cons << 16) | sw_ring_prod);
3201			if (!skb)
3202				goto next_rx;
3203		}
3204		if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3205		    !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3206			__vlan_hwaccel_put_tag(skb, rx_hdr->l2_fhdr_vlan_tag);
3207
3208		skb->protocol = eth_type_trans(skb, bp->dev);
3209
3210		if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3211			(ntohs(skb->protocol) != 0x8100)) {
3212
3213			dev_kfree_skb(skb);
3214			goto next_rx;
3215
3216		}
3217
3218		skb_checksum_none_assert(skb);
3219		if ((bp->dev->features & NETIF_F_RXCSUM) &&
3220			(status & (L2_FHDR_STATUS_TCP_SEGMENT |
3221			L2_FHDR_STATUS_UDP_DATAGRAM))) {
3222
3223			if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3224					      L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3225				skb->ip_summed = CHECKSUM_UNNECESSARY;
3226		}
3227		if ((bp->dev->features & NETIF_F_RXHASH) &&
3228		    ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3229		     L2_FHDR_STATUS_USE_RXHASH))
3230			skb->rxhash = rx_hdr->l2_fhdr_hash;
3231
3232		skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3233		napi_gro_receive(&bnapi->napi, skb);
3234		rx_pkt++;
3235
3236next_rx:
3237		sw_cons = NEXT_RX_BD(sw_cons);
3238		sw_prod = NEXT_RX_BD(sw_prod);
3239
3240		if ((rx_pkt == budget))
3241			break;
3242
3243		/* Refresh hw_cons to see if there is new work */
3244		if (sw_cons == hw_cons) {
3245			hw_cons = bnx2_get_hw_rx_cons(bnapi);
3246			rmb();
3247		}
3248	}
3249	rxr->rx_cons = sw_cons;
3250	rxr->rx_prod = sw_prod;
3251
3252	if (pg_ring_used)
3253		REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3254
3255	REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3256
3257	REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3258
3259	mmiowb();
3260
3261	return rx_pkt;
3262
3263}
3264
3265/* MSI ISR - The only difference between this and the INTx ISR
3266 * is that the MSI interrupt is always serviced.
3267 */
3268static irqreturn_t
3269bnx2_msi(int irq, void *dev_instance)
3270{
3271	struct bnx2_napi *bnapi = dev_instance;
3272	struct bnx2 *bp = bnapi->bp;
3273
3274	prefetch(bnapi->status_blk.msi);
3275	REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3276		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3277		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3278
3279	/* Return here if interrupt is disabled. */
3280	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3281		return IRQ_HANDLED;
3282
3283	napi_schedule(&bnapi->napi);
3284
3285	return IRQ_HANDLED;
3286}
3287
3288static irqreturn_t
3289bnx2_msi_1shot(int irq, void *dev_instance)
3290{
3291	struct bnx2_napi *bnapi = dev_instance;
3292	struct bnx2 *bp = bnapi->bp;
3293
3294	prefetch(bnapi->status_blk.msi);
3295
3296	/* Return here if interrupt is disabled. */
3297	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3298		return IRQ_HANDLED;
3299
3300	napi_schedule(&bnapi->napi);
3301
3302	return IRQ_HANDLED;
3303}
3304
3305static irqreturn_t
3306bnx2_interrupt(int irq, void *dev_instance)
3307{
3308	struct bnx2_napi *bnapi = dev_instance;
3309	struct bnx2 *bp = bnapi->bp;
3310	struct status_block *sblk = bnapi->status_blk.msi;
3311
3312	/* When using INTx, it is possible for the interrupt to arrive
3313	 * at the CPU before the status block posted prior to the
3314	 * interrupt. Reading a register will flush the status block.
3315	 * When using MSI, the MSI message will always complete after
3316	 * the status block write.
3317	 */
3318	if ((sblk->status_idx == bnapi->last_status_idx) &&
3319	    (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3320	     BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3321		return IRQ_NONE;
3322
3323	REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3324		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3325		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3326
3327	/* Read back to deassert IRQ immediately to avoid too many
3328	 * spurious interrupts.
3329	 */
3330	REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3331
3332	/* Return here if interrupt is shared and is disabled. */
3333	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3334		return IRQ_HANDLED;
3335
3336	if (napi_schedule_prep(&bnapi->napi)) {
3337		bnapi->last_status_idx = sblk->status_idx;
3338		__napi_schedule(&bnapi->napi);
3339	}
3340
3341	return IRQ_HANDLED;
3342}
3343
3344static inline int
3345bnx2_has_fast_work(struct bnx2_napi *bnapi)
3346{
3347	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3348	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3349
3350	if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3351	    (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3352		return 1;
3353	return 0;
3354}
3355
3356#define STATUS_ATTN_EVENTS	(STATUS_ATTN_BITS_LINK_STATE | \
3357				 STATUS_ATTN_BITS_TIMER_ABORT)
3358
3359static inline int
3360bnx2_has_work(struct bnx2_napi *bnapi)
3361{
3362	struct status_block *sblk = bnapi->status_blk.msi;
3363
3364	if (bnx2_has_fast_work(bnapi))
3365		return 1;
3366
3367#ifdef BCM_CNIC
3368	if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3369		return 1;
3370#endif
3371
3372	if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3373	    (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3374		return 1;
3375
3376	return 0;
3377}
3378
3379static void
3380bnx2_chk_missed_msi(struct bnx2 *bp)
3381{
3382	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3383	u32 msi_ctrl;
3384
3385	if (bnx2_has_work(bnapi)) {
3386		msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3387		if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3388			return;
3389
3390		if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3391			REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3392			       ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3393			REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3394			bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3395		}
3396	}
3397
3398	bp->idle_chk_status_idx = bnapi->last_status_idx;
3399}
3400
3401#ifdef BCM_CNIC
3402static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3403{
3404	struct cnic_ops *c_ops;
3405
3406	if (!bnapi->cnic_present)
3407		return;
3408
3409	rcu_read_lock();
3410	c_ops = rcu_dereference(bp->cnic_ops);
3411	if (c_ops)
3412		bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3413						      bnapi->status_blk.msi);
3414	rcu_read_unlock();
3415}
3416#endif
3417
3418static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3419{
3420	struct status_block *sblk = bnapi->status_blk.msi;
3421	u32 status_attn_bits = sblk->status_attn_bits;
3422	u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3423
3424	if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3425	    (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3426
3427		bnx2_phy_int(bp, bnapi);
3428
3429		/* This is needed to take care of transient status
3430		 * during link changes.
3431		 */
3432		REG_WR(bp, BNX2_HC_COMMAND,
3433		       bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3434		REG_RD(bp, BNX2_HC_COMMAND);
3435	}
3436}
3437
3438static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3439			  int work_done, int budget)
3440{
3441	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3442	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3443
3444	if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3445		bnx2_tx_int(bp, bnapi, 0);
3446
3447	if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3448		work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3449
3450	return work_done;
3451}
3452
3453static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3454{
3455	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3456	struct bnx2 *bp = bnapi->bp;
3457	int work_done = 0;
3458	struct status_block_msix *sblk = bnapi->status_blk.msix;
3459
3460	while (1) {
3461		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3462		if (unlikely(work_done >= budget))
3463			break;
3464
3465		bnapi->last_status_idx = sblk->status_idx;
3466		/* status idx must be read before checking for more work. */
3467		rmb();
3468		if (likely(!bnx2_has_fast_work(bnapi))) {
3469
3470			napi_complete(napi);
3471			REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3472			       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3473			       bnapi->last_status_idx);
3474			break;
3475		}
3476	}
3477	return work_done;
3478}
3479
3480static int bnx2_poll(struct napi_struct *napi, int budget)
3481{
3482	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3483	struct bnx2 *bp = bnapi->bp;
3484	int work_done = 0;
3485	struct status_block *sblk = bnapi->status_blk.msi;
3486
3487	while (1) {
3488		bnx2_poll_link(bp, bnapi);
3489
3490		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3491
3492#ifdef BCM_CNIC
3493		bnx2_poll_cnic(bp, bnapi);
3494#endif
3495
3496		/* bnapi->last_status_idx is used below to tell the hw how
3497		 * much work has been processed, so we must read it before
3498		 * checking for more work.
3499		 */
3500		bnapi->last_status_idx = sblk->status_idx;
3501
3502		if (unlikely(work_done >= budget))
3503			break;
3504
3505		rmb();
3506		if (likely(!bnx2_has_work(bnapi))) {
3507			napi_complete(napi);
3508			if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3509				REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3510				       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3511				       bnapi->last_status_idx);
3512				break;
3513			}
3514			REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3515			       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3516			       BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3517			       bnapi->last_status_idx);
3518
3519			REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3520			       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3521			       bnapi->last_status_idx);
3522			break;
3523		}
3524	}
3525
3526	return work_done;
3527}
3528
3529/* Called with rtnl_lock from vlan functions and also netif_tx_lock
3530 * from set_multicast.
3531 */
3532static void
3533bnx2_set_rx_mode(struct net_device *dev)
3534{
3535	struct bnx2 *bp = netdev_priv(dev);
3536	u32 rx_mode, sort_mode;
3537	struct netdev_hw_addr *ha;
3538	int i;
3539
3540	if (!netif_running(dev))
3541		return;
3542
3543	spin_lock_bh(&bp->phy_lock);
3544
3545	rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3546				  BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3547	sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3548	if (!(dev->features & NETIF_F_HW_VLAN_RX) &&
3549	     (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3550		rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3551	if (dev->flags & IFF_PROMISC) {
3552		/* Promiscuous mode. */
3553		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3554		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3555			     BNX2_RPM_SORT_USER0_PROM_VLAN;
3556	}
3557	else if (dev->flags & IFF_ALLMULTI) {
3558		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3559			REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3560			       0xffffffff);
3561        	}
3562		sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3563	}
3564	else {
3565		/* Accept one or more multicast(s). */
3566		u32 mc_filter[NUM_MC_HASH_REGISTERS];
3567		u32 regidx;
3568		u32 bit;
3569		u32 crc;
3570
3571		memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3572
3573		netdev_for_each_mc_addr(ha, dev) {
3574			crc = ether_crc_le(ETH_ALEN, ha->addr);
3575			bit = crc & 0xff;
3576			regidx = (bit & 0xe0) >> 5;
3577			bit &= 0x1f;
3578			mc_filter[regidx] |= (1 << bit);
3579		}
3580
3581		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3582			REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3583			       mc_filter[i]);
3584		}
3585
3586		sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3587	}
3588
3589	if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3590		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3591		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3592			     BNX2_RPM_SORT_USER0_PROM_VLAN;
3593	} else if (!(dev->flags & IFF_PROMISC)) {
3594		/* Add all entries into to the match filter list */
3595		i = 0;
3596		netdev_for_each_uc_addr(ha, dev) {
3597			bnx2_set_mac_addr(bp, ha->addr,
3598					  i + BNX2_START_UNICAST_ADDRESS_INDEX);
3599			sort_mode |= (1 <<
3600				      (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3601			i++;
3602		}
3603
3604	}
3605
3606	if (rx_mode != bp->rx_mode) {
3607		bp->rx_mode = rx_mode;
3608		REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3609	}
3610
3611	REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3612	REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3613	REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3614
3615	spin_unlock_bh(&bp->phy_lock);
3616}
3617
3618static int
3619check_fw_section(const struct firmware *fw,
3620		 const struct bnx2_fw_file_section *section,
3621		 u32 alignment, bool non_empty)
3622{
3623	u32 offset = be32_to_cpu(section->offset);
3624	u32 len = be32_to_cpu(section->len);
3625
3626	if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3627		return -EINVAL;
3628	if ((non_empty && len == 0) || len > fw->size - offset ||
3629	    len & (alignment - 1))
3630		return -EINVAL;
3631	return 0;
3632}
3633
3634static int
3635check_mips_fw_entry(const struct firmware *fw,
3636		    const struct bnx2_mips_fw_file_entry *entry)
3637{
3638	if (check_fw_section(fw, &entry->text, 4, true) ||
3639	    check_fw_section(fw, &entry->data, 4, false) ||
3640	    check_fw_section(fw, &entry->rodata, 4, false))
3641		return -EINVAL;
3642	return 0;
3643}
3644
3645static void bnx2_release_firmware(struct bnx2 *bp)
3646{
3647	if (bp->rv2p_firmware) {
3648		release_firmware(bp->mips_firmware);
3649		release_firmware(bp->rv2p_firmware);
3650		bp->rv2p_firmware = NULL;
3651	}
3652}
3653
3654static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3655{
3656	const char *mips_fw_file, *rv2p_fw_file;
3657	const struct bnx2_mips_fw_file *mips_fw;
3658	const struct bnx2_rv2p_fw_file *rv2p_fw;
3659	int rc;
3660
3661	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3662		mips_fw_file = FW_MIPS_FILE_09;
3663		if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3664		    (CHIP_ID(bp) == CHIP_ID_5709_A1))
3665			rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3666		else
3667			rv2p_fw_file = FW_RV2P_FILE_09;
3668	} else {
3669		mips_fw_file = FW_MIPS_FILE_06;
3670		rv2p_fw_file = FW_RV2P_FILE_06;
3671	}
3672
3673	rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3674	if (rc) {
3675		pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3676		goto out;
3677	}
3678
3679	rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3680	if (rc) {
3681		pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3682		goto err_release_mips_firmware;
3683	}
3684	mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3685	rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3686	if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3687	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3688	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3689	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3690	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3691	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3692		pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3693		rc = -EINVAL;
3694		goto err_release_firmware;
3695	}
3696	if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3697	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3698	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3699		pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3700		rc = -EINVAL;
3701		goto err_release_firmware;
3702	}
3703out:
3704	return rc;
3705
3706err_release_firmware:
3707	release_firmware(bp->rv2p_firmware);
3708	bp->rv2p_firmware = NULL;
3709err_release_mips_firmware:
3710	release_firmware(bp->mips_firmware);
3711	goto out;
3712}
3713
3714static int bnx2_request_firmware(struct bnx2 *bp)
3715{
3716	return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
3717}
3718
3719static u32
3720rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3721{
3722	switch (idx) {
3723	case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3724		rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3725		rv2p_code |= RV2P_BD_PAGE_SIZE;
3726		break;
3727	}
3728	return rv2p_code;
3729}
3730
3731static int
3732load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3733	     const struct bnx2_rv2p_fw_file_entry *fw_entry)
3734{
3735	u32 rv2p_code_len, file_offset;
3736	__be32 *rv2p_code;
3737	int i;
3738	u32 val, cmd, addr;
3739
3740	rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3741	file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3742
3743	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3744
3745	if (rv2p_proc == RV2P_PROC1) {
3746		cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3747		addr = BNX2_RV2P_PROC1_ADDR_CMD;
3748	} else {
3749		cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3750		addr = BNX2_RV2P_PROC2_ADDR_CMD;
3751	}
3752
3753	for (i = 0; i < rv2p_code_len; i += 8) {
3754		REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3755		rv2p_code++;
3756		REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3757		rv2p_code++;
3758
3759		val = (i / 8) | cmd;
3760		REG_WR(bp, addr, val);
3761	}
3762
3763	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3764	for (i = 0; i < 8; i++) {
3765		u32 loc, code;
3766
3767		loc = be32_to_cpu(fw_entry->fixup[i]);
3768		if (loc && ((loc * 4) < rv2p_code_len)) {
3769			code = be32_to_cpu(*(rv2p_code + loc - 1));
3770			REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3771			code = be32_to_cpu(*(rv2p_code + loc));
3772			code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3773			REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3774
3775			val = (loc / 2) | cmd;
3776			REG_WR(bp, addr, val);
3777		}
3778	}
3779
3780	/* Reset the processor, un-stall is done later. */
3781	if (rv2p_proc == RV2P_PROC1) {
3782		REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3783	}
3784	else {
3785		REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3786	}
3787
3788	return 0;
3789}
3790
3791static int
3792load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3793	    const struct bnx2_mips_fw_file_entry *fw_entry)
3794{
3795	u32 addr, len, file_offset;
3796	__be32 *data;
3797	u32 offset;
3798	u32 val;
3799
3800	/* Halt the CPU. */
3801	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3802	val |= cpu_reg->mode_value_halt;
3803	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3804	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3805
3806	/* Load the Text area. */
3807	addr = be32_to_cpu(fw_entry->text.addr);
3808	len = be32_to_cpu(fw_entry->text.len);
3809	file_offset = be32_to_cpu(fw_entry->text.offset);
3810	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3811
3812	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3813	if (len) {
3814		int j;
3815
3816		for (j = 0; j < (len / 4); j++, offset += 4)
3817			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3818	}
3819
3820	/* Load the Data area. */
3821	addr = be32_to_cpu(fw_entry->data.addr);
3822	len = be32_to_cpu(fw_entry->data.len);
3823	file_offset = be32_to_cpu(fw_entry->data.offset);
3824	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3825
3826	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3827	if (len) {
3828		int j;
3829
3830		for (j = 0; j < (len / 4); j++, offset += 4)
3831			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3832	}
3833
3834	/* Load the Read-Only area. */
3835	addr = be32_to_cpu(fw_entry->rodata.addr);
3836	len = be32_to_cpu(fw_entry->rodata.len);
3837	file_offset = be32_to_cpu(fw_entry->rodata.offset);
3838	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3839
3840	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3841	if (len) {
3842		int j;
3843
3844		for (j = 0; j < (len / 4); j++, offset += 4)
3845			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3846	}
3847
3848	/* Clear the pre-fetch instruction. */
3849	bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3850
3851	val = be32_to_cpu(fw_entry->start_addr);
3852	bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3853
3854	/* Start the CPU. */
3855	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3856	val &= ~cpu_reg->mode_value_halt;
3857	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3858	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3859
3860	return 0;
3861}
3862
3863static int
3864bnx2_init_cpus(struct bnx2 *bp)
3865{
3866	const struct bnx2_mips_fw_file *mips_fw =
3867		(const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3868	const struct bnx2_rv2p_fw_file *rv2p_fw =
3869		(const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3870	int rc;
3871
3872	/* Initialize the RV2P processor. */
3873	load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3874	load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3875
3876	/* Initialize the RX Processor. */
3877	rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3878	if (rc)
3879		goto init_cpu_err;
3880
3881	/* Initialize the TX Processor. */
3882	rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3883	if (rc)
3884		goto init_cpu_err;
3885
3886	/* Initialize the TX Patch-up Processor. */
3887	rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3888	if (rc)
3889		goto init_cpu_err;
3890
3891	/* Initialize the Completion Processor. */
3892	rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3893	if (rc)
3894		goto init_cpu_err;
3895
3896	/* Initialize the Command Processor. */
3897	rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3898
3899init_cpu_err:
3900	return rc;
3901}
3902
3903static int
3904bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3905{
3906	u16 pmcsr;
3907
3908	pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3909
3910	switch (state) {
3911	case PCI_D0: {
3912		u32 val;
3913
3914		pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3915			(pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3916			PCI_PM_CTRL_PME_STATUS);
3917
3918		if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3919			/* delay required during transition out of D3hot */
3920			msleep(20);
3921
3922		val = REG_RD(bp, BNX2_EMAC_MODE);
3923		val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3924		val &= ~BNX2_EMAC_MODE_MPKT;
3925		REG_WR(bp, BNX2_EMAC_MODE, val);
3926
3927		val = REG_RD(bp, BNX2_RPM_CONFIG);
3928		val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3929		REG_WR(bp, BNX2_RPM_CONFIG, val);
3930		break;
3931	}
3932	case PCI_D3hot: {
3933		int i;
3934		u32 val, wol_msg;
3935
3936		if (bp->wol) {
3937			u32 advertising;
3938			u8 autoneg;
3939
3940			autoneg = bp->autoneg;
3941			advertising = bp->advertising;
3942
3943			if (bp->phy_port == PORT_TP) {
3944				bp->autoneg = AUTONEG_SPEED;
3945				bp->advertising = ADVERTISED_10baseT_Half |
3946					ADVERTISED_10baseT_Full |
3947					ADVERTISED_100baseT_Half |
3948					ADVERTISED_100baseT_Full |
3949					ADVERTISED_Autoneg;
3950			}
3951
3952			spin_lock_bh(&bp->phy_lock);
3953			bnx2_setup_phy(bp, bp->phy_port);
3954			spin_unlock_bh(&bp->phy_lock);
3955
3956			bp->autoneg = autoneg;
3957			bp->advertising = advertising;
3958
3959			bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3960
3961			val = REG_RD(bp, BNX2_EMAC_MODE);
3962
3963			/* Enable port mode. */
3964			val &= ~BNX2_EMAC_MODE_PORT;
3965			val |= BNX2_EMAC_MODE_MPKT_RCVD |
3966			       BNX2_EMAC_MODE_ACPI_RCVD |
3967			       BNX2_EMAC_MODE_MPKT;
3968			if (bp->phy_port == PORT_TP)
3969				val |= BNX2_EMAC_MODE_PORT_MII;
3970			else {
3971				val |= BNX2_EMAC_MODE_PORT_GMII;
3972				if (bp->line_speed == SPEED_2500)
3973					val |= BNX2_EMAC_MODE_25G_MODE;
3974			}
3975
3976			REG_WR(bp, BNX2_EMAC_MODE, val);
3977
3978			/* receive all multicast */
3979			for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3980				REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3981				       0xffffffff);
3982			}
3983			REG_WR(bp, BNX2_EMAC_RX_MODE,
3984			       BNX2_EMAC_RX_MODE_SORT_MODE);
3985
3986			val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3987			      BNX2_RPM_SORT_USER0_MC_EN;
3988			REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3989			REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3990			REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3991			       BNX2_RPM_SORT_USER0_ENA);
3992
3993			/* Need to enable EMAC and RPM for WOL. */
3994			REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3995			       BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3996			       BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3997			       BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3998
3999			val = REG_RD(bp, BNX2_RPM_CONFIG);
4000			val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4001			REG_WR(bp, BNX2_RPM_CONFIG, val);
4002
4003			wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4004		}
4005		else {
4006			wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4007		}
4008
4009		if (!(bp->flags & BNX2_FLAG_NO_WOL))
4010			bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
4011				     1, 0);
4012
4013		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
4014		if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4015		    (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
4016
4017			if (bp->wol)
4018				pmcsr |= 3;
4019		}
4020		else {
4021			pmcsr |= 3;
4022		}
4023		if (bp->wol) {
4024			pmcsr |= PCI_PM_CTRL_PME_ENABLE;
4025		}
4026		pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
4027				      pmcsr);
4028
4029		/* No more memory access after this point until
4030		 * device is brought back to D0.
4031		 */
4032		udelay(50);
4033		break;
4034	}
4035	default:
4036		return -EINVAL;
4037	}
4038	return 0;
4039}
4040
4041static int
4042bnx2_acquire_nvram_lock(struct bnx2 *bp)
4043{
4044	u32 val;
4045	int j;
4046
4047	/* Request access to the flash interface. */
4048	REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4049	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4050		val = REG_RD(bp, BNX2_NVM_SW_ARB);
4051		if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4052			break;
4053
4054		udelay(5);
4055	}
4056
4057	if (j >= NVRAM_TIMEOUT_COUNT)
4058		return -EBUSY;
4059
4060	return 0;
4061}
4062
4063static int
4064bnx2_release_nvram_lock(struct bnx2 *bp)
4065{
4066	int j;
4067	u32 val;
4068
4069	/* Relinquish nvram interface. */
4070	REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4071
4072	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4073		val = REG_RD(bp, BNX2_NVM_SW_ARB);
4074		if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4075			break;
4076
4077		udelay(5);
4078	}
4079
4080	if (j >= NVRAM_TIMEOUT_COUNT)
4081		return -EBUSY;
4082
4083	return 0;
4084}
4085
4086
4087static int
4088bnx2_enable_nvram_write(struct bnx2 *bp)
4089{
4090	u32 val;
4091
4092	val = REG_RD(bp, BNX2_MISC_CFG);
4093	REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4094
4095	if (bp->flash_info->flags & BNX2_NV_WREN) {
4096		int j;
4097
4098		REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4099		REG_WR(bp, BNX2_NVM_COMMAND,
4100		       BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4101
4102		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4103			udelay(5);
4104
4105			val = REG_RD(bp, BNX2_NVM_COMMAND);
4106			if (val & BNX2_NVM_COMMAND_DONE)
4107				break;
4108		}
4109
4110		if (j >= NVRAM_TIMEOUT_COUNT)
4111			return -EBUSY;
4112	}
4113	return 0;
4114}
4115
4116static void
4117bnx2_disable_nvram_write(struct bnx2 *bp)
4118{
4119	u32 val;
4120
4121	val = REG_RD(bp, BNX2_MISC_CFG);
4122	REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4123}
4124
4125
4126static void
4127bnx2_enable_nvram_access(struct bnx2 *bp)
4128{
4129	u32 val;
4130
4131	val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4132	/* Enable both bits, even on read. */
4133	REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4134	       val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4135}
4136
4137static void
4138bnx2_disable_nvram_access(struct bnx2 *bp)
4139{
4140	u32 val;
4141
4142	val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4143	/* Disable both bits, even after read. */
4144	REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4145		val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4146			BNX2_NVM_ACCESS_ENABLE_WR_EN));
4147}
4148
4149static int
4150bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4151{
4152	u32 cmd;
4153	int j;
4154
4155	if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4156		/* Buffered flash, no erase needed */
4157		return 0;
4158
4159	/* Build an erase command */
4160	cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4161	      BNX2_NVM_COMMAND_DOIT;
4162
4163	/* Need to clear DONE bit separately. */
4164	REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4165
4166	/* Address of the NVRAM to read from. */
4167	REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4168
4169	/* Issue an erase command. */
4170	REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4171
4172	/* Wait for completion. */
4173	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4174		u32 val;
4175
4176		udelay(5);
4177
4178		val = REG_RD(bp, BNX2_NVM_COMMAND);
4179		if (val & BNX2_NVM_COMMAND_DONE)
4180			break;
4181	}
4182
4183	if (j >= NVRAM_TIMEOUT_COUNT)
4184		return -EBUSY;
4185
4186	return 0;
4187}
4188
4189static int
4190bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4191{
4192	u32 cmd;
4193	int j;
4194
4195	/* Build the command word. */
4196	cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4197
4198	/* Calculate an offset of a buffered flash, not needed for 5709. */
4199	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4200		offset = ((offset / bp->flash_info->page_size) <<
4201			   bp->flash_info->page_bits) +
4202			  (offset % bp->flash_info->page_size);
4203	}
4204
4205	/* Need to clear DONE bit separately. */
4206	REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4207
4208	/* Address of the NVRAM to read from. */
4209	REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4210
4211	/* Issue a read command. */
4212	REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4213
4214	/* Wait for completion. */
4215	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4216		u32 val;
4217
4218		udelay(5);
4219
4220		val = REG_RD(bp, BNX2_NVM_COMMAND);
4221		if (val & BNX2_NVM_COMMAND_DONE) {
4222			__be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4223			memcpy(ret_val, &v, 4);
4224			break;
4225		}
4226	}
4227	if (j >= NVRAM_TIMEOUT_COUNT)
4228		return -EBUSY;
4229
4230	return 0;
4231}
4232
4233
4234static int
4235bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4236{
4237	u32 cmd;
4238	__be32 val32;
4239	int j;
4240
4241	/* Build the command word. */
4242	cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4243
4244	/* Calculate an offset of a buffered flash, not needed for 5709. */
4245	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4246		offset = ((offset / bp->flash_info->page_size) <<
4247			  bp->flash_info->page_bits) +
4248			 (offset % bp->flash_info->page_size);
4249	}
4250
4251	/* Need to clear DONE bit separately. */
4252	REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4253
4254	memcpy(&val32, val, 4);
4255
4256	/* Write the data. */
4257	REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4258
4259	/* Address of the NVRAM to write to. */
4260	REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4261
4262	/* Issue the write command. */
4263	REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4264
4265	/* Wait for completion. */
4266	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4267		udelay(5);
4268
4269		if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4270			break;
4271	}
4272	if (j >= NVRAM_TIMEOUT_COUNT)
4273		return -EBUSY;
4274
4275	return 0;
4276}
4277
4278static int
4279bnx2_init_nvram(struct bnx2 *bp)
4280{
4281	u32 val;
4282	int j, entry_count, rc = 0;
4283	const struct flash_spec *flash;
4284
4285	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4286		bp->flash_info = &flash_5709;
4287		goto get_flash_size;
4288	}
4289
4290	/* Determine the selected interface. */
4291	val = REG_RD(bp, BNX2_NVM_CFG1);
4292
4293	entry_count = ARRAY_SIZE(flash_table);
4294
4295	if (val & 0x40000000) {
4296
4297		/* Flash interface has been reconfigured */
4298		for (j = 0, flash = &flash_table[0]; j < entry_count;
4299		     j++, flash++) {
4300			if ((val & FLASH_BACKUP_STRAP_MASK) ==
4301			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4302				bp->flash_info = flash;
4303				break;
4304			}
4305		}
4306	}
4307	else {
4308		u32 mask;
4309		/* Not yet been reconfigured */
4310
4311		if (val & (1 << 23))
4312			mask = FLASH_BACKUP_STRAP_MASK;
4313		else
4314			mask = FLASH_STRAP_MASK;
4315
4316		for (j = 0, flash = &flash_table[0]; j < entry_count;
4317			j++, flash++) {
4318
4319			if ((val & mask) == (flash->strapping & mask)) {
4320				bp->flash_info = flash;
4321
4322				/* Request access to the flash interface. */
4323				if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4324					return rc;
4325
4326				/* Enable access to flash interface */
4327				bnx2_enable_nvram_access(bp);
4328
4329				/* Reconfigure the flash interface */
4330				REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4331				REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4332				REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4333				REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4334
4335				/* Disable access to flash interface */
4336				bnx2_disable_nvram_access(bp);
4337				bnx2_release_nvram_lock(bp);
4338
4339				break;
4340			}
4341		}
4342	} /* if (val & 0x40000000) */
4343
4344	if (j == entry_count) {
4345		bp->flash_info = NULL;
4346		pr_alert("Unknown flash/EEPROM type\n");
4347		return -ENODEV;
4348	}
4349
4350get_flash_size:
4351	val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4352	val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4353	if (val)
4354		bp->flash_size = val;
4355	else
4356		bp->flash_size = bp->flash_info->total_size;
4357
4358	return rc;
4359}
4360
4361static int
4362bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4363		int buf_size)
4364{
4365	int rc = 0;
4366	u32 cmd_flags, offset32, len32, extra;
4367
4368	if (buf_size == 0)
4369		return 0;
4370
4371	/* Request access to the flash interface. */
4372	if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4373		return rc;
4374
4375	/* Enable access to flash interface */
4376	bnx2_enable_nvram_access(bp);
4377
4378	len32 = buf_size;
4379	offset32 = offset;
4380	extra = 0;
4381
4382	cmd_flags = 0;
4383
4384	if (offset32 & 3) {
4385		u8 buf[4];
4386		u32 pre_len;
4387
4388		offset32 &= ~3;
4389		pre_len = 4 - (offset & 3);
4390
4391		if (pre_len >= len32) {
4392			pre_len = len32;
4393			cmd_flags = BNX2_NVM_COMMAND_FIRST |
4394				    BNX2_NVM_COMMAND_LAST;
4395		}
4396		else {
4397			cmd_flags = BNX2_NVM_COMMAND_FIRST;
4398		}
4399
4400		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4401
4402		if (rc)
4403			return rc;
4404
4405		memcpy(ret_buf, buf + (offset & 3), pre_len);
4406
4407		offset32 += 4;
4408		ret_buf += pre_len;
4409		len32 -= pre_len;
4410	}
4411	if (len32 & 3) {
4412		extra = 4 - (len32 & 3);
4413		len32 = (len32 + 4) & ~3;
4414	}
4415
4416	if (len32 == 4) {
4417		u8 buf[4];
4418
4419		if (cmd_flags)
4420			cmd_flags = BNX2_NVM_COMMAND_LAST;
4421		else
4422			cmd_flags = BNX2_NVM_COMMAND_FIRST |
4423				    BNX2_NVM_COMMAND_LAST;
4424
4425		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4426
4427		memcpy(ret_buf, buf, 4 - extra);
4428	}
4429	else if (len32 > 0) {
4430		u8 buf[4];
4431
4432		/* Read the first word. */
4433		if (cmd_flags)
4434			cmd_flags = 0;
4435		else
4436			cmd_flags = BNX2_NVM_COMMAND_FIRST;
4437
4438		rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4439
4440		/* Advance to the next dword. */
4441		offset32 += 4;
4442		ret_buf += 4;
4443		len32 -= 4;
4444
4445		while (len32 > 4 && rc == 0) {
4446			rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4447
4448			/* Advance to the next dword. */
4449			offset32 += 4;
4450			ret_buf += 4;
4451			len32 -= 4;
4452		}
4453
4454		if (rc)
4455			return rc;
4456
4457		cmd_flags = BNX2_NVM_COMMAND_LAST;
4458		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4459
4460		memcpy(ret_buf, buf, 4 - extra);
4461	}
4462
4463	/* Disable access to flash interface */
4464	bnx2_disable_nvram_access(bp);
4465
4466	bnx2_release_nvram_lock(bp);
4467
4468	return rc;
4469}
4470
4471static int
4472bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4473		int buf_size)
4474{
4475	u32 written, offset32, len32;
4476	u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4477	int rc = 0;
4478	int align_start, align_end;
4479
4480	buf = data_buf;
4481	offset32 = offset;
4482	len32 = buf_size;
4483	align_start = align_end = 0;
4484
4485	if ((align_start = (offset32 & 3))) {
4486		offset32 &= ~3;
4487		len32 += align_start;
4488		if (len32 < 4)
4489			len32 = 4;
4490		if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4491			return rc;
4492	}
4493
4494	if (len32 & 3) {
4495		align_end = 4 - (len32 & 3);
4496		len32 += align_end;
4497		if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4498			return rc;
4499	}
4500
4501	if (align_start || align_end) {
4502		align_buf = kmalloc(len32, GFP_KERNEL);
4503		if (align_buf == NULL)
4504			return -ENOMEM;
4505		if (align_start) {
4506			memcpy(align_buf, start, 4);
4507		}
4508		if (align_end) {
4509			memcpy(align_buf + len32 - 4, end, 4);
4510		}
4511		memcpy(align_buf + align_start, data_buf, buf_size);
4512		buf = align_buf;
4513	}
4514
4515	if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4516		flash_buffer = kmalloc(264, GFP_KERNEL);
4517		if (flash_buffer == NULL) {
4518			rc = -ENOMEM;
4519			goto nvram_write_end;
4520		}
4521	}
4522
4523	written = 0;
4524	while ((written < len32) && (rc == 0)) {
4525		u32 page_start, page_end, data_start, data_end;
4526		u32 addr, cmd_flags;
4527		int i;
4528
4529	        /* Find the page_start addr */
4530		page_start = offset32 + written;
4531		page_start -= (page_start % bp->flash_info->page_size);
4532		/* Find the page_end addr */
4533		page_end = page_start + bp->flash_info->page_size;
4534		/* Find the data_start addr */
4535		data_start = (written == 0) ? offset32 : page_start;
4536		/* Find the data_end addr */
4537		data_end = (page_end > offset32 + len32) ?
4538			(offset32 + len32) : page_end;
4539
4540		/* Request access to the flash interface. */
4541		if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4542			goto nvram_write_end;
4543
4544		/* Enable access to flash interface */
4545		bnx2_enable_nvram_access(bp);
4546
4547		cmd_flags = BNX2_NVM_COMMAND_FIRST;
4548		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4549			int j;
4550
4551			/* Read the whole page into the buffer
4552			 * (non-buffer flash only) */
4553			for (j = 0; j < bp->flash_info->page_size; j += 4) {
4554				if (j == (bp->flash_info->page_size - 4)) {
4555					cmd_flags |= BNX2_NVM_COMMAND_LAST;
4556				}
4557				rc = bnx2_nvram_read_dword(bp,
4558					page_start + j,
4559					&flash_buffer[j],
4560					cmd_flags);
4561
4562				if (rc)
4563					goto nvram_write_end;
4564
4565				cmd_flags = 0;
4566			}
4567		}
4568
4569		/* Enable writes to flash interface (unlock write-protect) */
4570		if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4571			goto nvram_write_end;
4572
4573		/* Loop to write back the buffer data from page_start to
4574		 * data_start */
4575		i = 0;
4576		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4577			/* Erase the page */
4578			if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4579				goto nvram_write_end;
4580
4581			/* Re-enable the write again for the actual write */
4582			bnx2_enable_nvram_write(bp);
4583
4584			for (addr = page_start; addr < data_start;
4585				addr += 4, i += 4) {
4586
4587				rc = bnx2_nvram_write_dword(bp, addr,
4588					&flash_buffer[i], cmd_flags);
4589
4590				if (rc != 0)
4591					goto nvram_write_end;
4592
4593				cmd_flags = 0;
4594			}
4595		}
4596
4597		/* Loop to write the new data from data_start to data_end */
4598		for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4599			if ((addr == page_end - 4) ||
4600				((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4601				 (addr == data_end - 4))) {
4602
4603				cmd_flags |= BNX2_NVM_COMMAND_LAST;
4604			}
4605			rc = bnx2_nvram_write_dword(bp, addr, buf,
4606				cmd_flags);
4607
4608			if (rc != 0)
4609				goto nvram_write_end;
4610
4611			cmd_flags = 0;
4612			buf += 4;
4613		}
4614
4615		/* Loop to write back the buffer data from data_end
4616		 * to page_end */
4617		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4618			for (addr = data_end; addr < page_end;
4619				addr += 4, i += 4) {
4620
4621				if (addr == page_end-4) {
4622					cmd_flags = BNX2_NVM_COMMAND_LAST;
4623                		}
4624				rc = bnx2_nvram_write_dword(bp, addr,
4625					&flash_buffer[i], cmd_flags);
4626
4627				if (rc != 0)
4628					goto nvram_write_end;
4629
4630				cmd_flags = 0;
4631			}
4632		}
4633
4634		/* Disable writes to flash interface (lock write-protect) */
4635		bnx2_disable_nvram_write(bp);
4636
4637		/* Disable access to flash interface */
4638		bnx2_disable_nvram_access(bp);
4639		bnx2_release_nvram_lock(bp);
4640
4641		/* Increment written */
4642		written += data_end - data_start;
4643	}
4644
4645nvram_write_end:
4646	kfree(flash_buffer);
4647	kfree(align_buf);
4648	return rc;
4649}
4650
4651static void
4652bnx2_init_fw_cap(struct bnx2 *bp)
4653{
4654	u32 val, sig = 0;
4655
4656	bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4657	bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4658
4659	if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4660		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4661
4662	val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4663	if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4664		return;
4665
4666	if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4667		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4668		sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4669	}
4670
4671	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4672	    (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4673		u32 link;
4674
4675		bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4676
4677		link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4678		if (link & BNX2_LINK_STATUS_SERDES_LINK)
4679			bp->phy_port = PORT_FIBRE;
4680		else
4681			bp->phy_port = PORT_TP;
4682
4683		sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4684		       BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4685	}
4686
4687	if (netif_running(bp->dev) && sig)
4688		bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4689}
4690
4691static void
4692bnx2_setup_msix_tbl(struct bnx2 *bp)
4693{
4694	REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4695
4696	REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4697	REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4698}
4699
4700static int
4701bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4702{
4703	u32 val;
4704	int i, rc = 0;
4705	u8 old_port;
4706
4707	/* Wait for the current PCI transaction to complete before
4708	 * issuing a reset. */
4709	if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4710	    (CHIP_NUM(bp) == CHIP_NUM_5708)) {
4711		REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4712		       BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4713		       BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4714		       BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4715		       BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4716		val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4717		udelay(5);
4718	} else {  /* 5709 */
4719		val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4720		val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4721		REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4722		val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4723
4724		for (i = 0; i < 100; i++) {
4725			msleep(1);
4726			val = REG_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4727			if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4728				break;
4729		}
4730	}
4731
4732	/* Wait for the firmware to tell us it is ok to issue a reset. */
4733	bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4734
4735	/* Deposit a driver reset signature so the firmware knows that
4736	 * this is a soft reset. */
4737	bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4738		      BNX2_DRV_RESET_SIGNATURE_MAGIC);
4739
4740	/* Do a dummy read to force the chip to complete all current transaction
4741	 * before we issue a reset. */
4742	val = REG_RD(bp, BNX2_MISC_ID);
4743
4744	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4745		REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4746		REG_RD(bp, BNX2_MISC_COMMAND);
4747		udelay(5);
4748
4749		val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4750		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4751
4752		REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4753
4754	} else {
4755		val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4756		      BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4757		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4758
4759		/* Chip reset. */
4760		REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4761
4762		/* Reading back any register after chip reset will hang the
4763		 * bus on 5706 A0 and A1.  The msleep below provides plenty
4764		 * of margin for write posting.
4765		 */
4766		if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4767		    (CHIP_ID(bp) == CHIP_ID_5706_A1))
4768			msleep(20);
4769
4770		/* Reset takes approximate 30 usec */
4771		for (i = 0; i < 10; i++) {
4772			val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4773			if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4774				    BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4775				break;
4776			udelay(10);
4777		}
4778
4779		if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4780			   BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4781			pr_err("Chip reset did not complete\n");
4782			return -EBUSY;
4783		}
4784	}
4785
4786	/* Make sure byte swapping is properly configured. */
4787	val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4788	if (val != 0x01020304) {
4789		pr_err("Chip not in correct endian mode\n");
4790		return -ENODEV;
4791	}
4792
4793	/* Wait for the firmware to finish its initialization. */
4794	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4795	if (rc)
4796		return rc;
4797
4798	spin_lock_bh(&bp->phy_lock);
4799	old_port = bp->phy_port;
4800	bnx2_init_fw_cap(bp);
4801	if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4802	    old_port != bp->phy_port)
4803		bnx2_set_default_remote_link(bp);
4804	spin_unlock_bh(&bp->phy_lock);
4805
4806	if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4807		/* Adjust the voltage regular to two steps lower.  The default
4808		 * of this register is 0x0000000e. */
4809		REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4810
4811		/* Remove bad rbuf memory from the free pool. */
4812		rc = bnx2_alloc_bad_rbuf(bp);
4813	}
4814
4815	if (bp->flags & BNX2_FLAG_USING_MSIX) {
4816		bnx2_setup_msix_tbl(bp);
4817		/* Prevent MSIX table reads and write from timing out */
4818		REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
4819			BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4820	}
4821
4822	return rc;
4823}
4824
4825static int
4826bnx2_init_chip(struct bnx2 *bp)
4827{
4828	u32 val, mtu;
4829	int rc, i;
4830
4831	/* Make sure the interrupt is not active. */
4832	REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4833
4834	val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4835	      BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4836#ifdef __BIG_ENDIAN
4837	      BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4838#endif
4839	      BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4840	      DMA_READ_CHANS << 12 |
4841	      DMA_WRITE_CHANS << 16;
4842
4843	val |= (0x2 << 20) | (1 << 11);
4844
4845	if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4846		val |= (1 << 23);
4847
4848	if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4849	    (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4850		val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4851
4852	REG_WR(bp, BNX2_DMA_CONFIG, val);
4853
4854	if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4855		val = REG_RD(bp, BNX2_TDMA_CONFIG);
4856		val |= BNX2_TDMA_CONFIG_ONE_DMA;
4857		REG_WR(bp, BNX2_TDMA_CONFIG, val);
4858	}
4859
4860	if (bp->flags & BNX2_FLAG_PCIX) {
4861		u16 val16;
4862
4863		pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4864				     &val16);
4865		pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4866				      val16 & ~PCI_X_CMD_ERO);
4867	}
4868
4869	REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4870	       BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4871	       BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4872	       BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4873
4874	/* Initialize context mapping and zero out the quick contexts.  The
4875	 * context block must have already been enabled. */
4876	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4877		rc = bnx2_init_5709_context(bp);
4878		if (rc)
4879			return rc;
4880	} else
4881		bnx2_init_context(bp);
4882
4883	if ((rc = bnx2_init_cpus(bp)) != 0)
4884		return rc;
4885
4886	bnx2_init_nvram(bp);
4887
4888	bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4889
4890	val = REG_RD(bp, BNX2_MQ_CONFIG);
4891	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4892	val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4893	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4894		val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4895		if (CHIP_REV(bp) == CHIP_REV_Ax)
4896			val |= BNX2_MQ_CONFIG_HALT_DIS;
4897	}
4898
4899	REG_WR(bp, BNX2_MQ_CONFIG, val);
4900
4901	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4902	REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4903	REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4904
4905	val = (BCM_PAGE_BITS - 8) << 24;
4906	REG_WR(bp, BNX2_RV2P_CONFIG, val);
4907
4908	/* Configure page size. */
4909	val = REG_RD(bp, BNX2_TBDR_CONFIG);
4910	val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4911	val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4912	REG_WR(bp, BNX2_TBDR_CONFIG, val);
4913
4914	val = bp->mac_addr[0] +
4915	      (bp->mac_addr[1] << 8) +
4916	      (bp->mac_addr[2] << 16) +
4917	      bp->mac_addr[3] +
4918	      (bp->mac_addr[4] << 8) +
4919	      (bp->mac_addr[5] << 16);
4920	REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4921
4922	/* Program the MTU.  Also include 4 bytes for CRC32. */
4923	mtu = bp->dev->mtu;
4924	val = mtu + ETH_HLEN + ETH_FCS_LEN;
4925	if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4926		val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4927	REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4928
4929	if (mtu < 1500)
4930		mtu = 1500;
4931
4932	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4933	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4934	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4935
4936	memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4937	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4938		bp->bnx2_napi[i].last_status_idx = 0;
4939
4940	bp->idle_chk_status_idx = 0xffff;
4941
4942	bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4943
4944	/* Set up how to generate a link change interrupt. */
4945	REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4946
4947	REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4948	       (u64) bp->status_blk_mapping & 0xffffffff);
4949	REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4950
4951	REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4952	       (u64) bp->stats_blk_mapping & 0xffffffff);
4953	REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4954	       (u64) bp->stats_blk_mapping >> 32);
4955
4956	REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4957	       (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4958
4959	REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4960	       (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4961
4962	REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4963	       (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4964
4965	REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4966
4967	REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4968
4969	REG_WR(bp, BNX2_HC_COM_TICKS,
4970	       (bp->com_ticks_int << 16) | bp->com_ticks);
4971
4972	REG_WR(bp, BNX2_HC_CMD_TICKS,
4973	       (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4974
4975	if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4976		REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4977	else
4978		REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4979	REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4980
4981	if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4982		val = BNX2_HC_CONFIG_COLLECT_STATS;
4983	else {
4984		val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4985		      BNX2_HC_CONFIG_COLLECT_STATS;
4986	}
4987
4988	if (bp->flags & BNX2_FLAG_USING_MSIX) {
4989		REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4990		       BNX2_HC_MSIX_BIT_VECTOR_VAL);
4991
4992		val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4993	}
4994
4995	if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4996		val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4997
4998	REG_WR(bp, BNX2_HC_CONFIG, val);
4999
5000	if (bp->rx_ticks < 25)
5001		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
5002	else
5003		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
5004
5005	for (i = 1; i < bp->irq_nvecs; i++) {
5006		u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5007			   BNX2_HC_SB_CONFIG_1;
5008
5009		REG_WR(bp, base,
5010			BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5011			BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
5012			BNX2_HC_SB_CONFIG_1_ONE_SHOT);
5013
5014		REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5015			(bp->tx_quick_cons_trip_int << 16) |
5016			 bp->tx_quick_cons_trip);
5017
5018		REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5019			(bp->tx_ticks_int << 16) | bp->tx_ticks);
5020
5021		REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5022		       (bp->rx_quick_cons_trip_int << 16) |
5023			bp->rx_quick_cons_trip);
5024
5025		REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5026			(bp->rx_ticks_int << 16) | bp->rx_ticks);
5027	}
5028
5029	/* Clear internal stats counters. */
5030	REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5031
5032	REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5033
5034	/* Initialize the receive filter. */
5035	bnx2_set_rx_mode(bp->dev);
5036
5037	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5038		val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5039		val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5040		REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5041	}
5042	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5043			  1, 0);
5044
5045	REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5046	REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5047
5048	udelay(20);
5049
5050	bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
5051
5052	return rc;
5053}
5054
5055static void
5056bnx2_clear_ring_states(struct bnx2 *bp)
5057{
5058	struct bnx2_napi *bnapi;
5059	struct bnx2_tx_ring_info *txr;
5060	struct bnx2_rx_ring_info *rxr;
5061	int i;
5062
5063	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5064		bnapi = &bp->bnx2_napi[i];
5065		txr = &bnapi->tx_ring;
5066		rxr = &bnapi->rx_ring;
5067
5068		txr->tx_cons = 0;
5069		txr->hw_tx_cons = 0;
5070		rxr->rx_prod_bseq = 0;
5071		rxr->rx_prod = 0;
5072		rxr->rx_cons = 0;
5073		rxr->rx_pg_prod = 0;
5074		rxr->rx_pg_cons = 0;
5075	}
5076}
5077
5078static void
5079bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5080{
5081	u32 val, offset0, offset1, offset2, offset3;
5082	u32 cid_addr = GET_CID_ADDR(cid);
5083
5084	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5085		offset0 = BNX2_L2CTX_TYPE_XI;
5086		offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5087		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5088		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5089	} else {
5090		offset0 = BNX2_L2CTX_TYPE;
5091		offset1 = BNX2_L2CTX_CMD_TYPE;
5092		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5093		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5094	}
5095	val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5096	bnx2_ctx_wr(bp, cid_addr, offset0, val);
5097
5098	val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5099	bnx2_ctx_wr(bp, cid_addr, offset1, val);
5100
5101	val = (u64) txr->tx_desc_mapping >> 32;
5102	bnx2_ctx_wr(bp, cid_addr, offset2, val);
5103
5104	val = (u64) txr->tx_desc_mapping & 0xffffffff;
5105	bnx2_ctx_wr(bp, cid_addr, offset3, val);
5106}
5107
5108static void
5109bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5110{
5111	struct tx_bd *txbd;
5112	u32 cid = TX_CID;
5113	struct bnx2_napi *bnapi;
5114	struct bnx2_tx_ring_info *txr;
5115
5116	bnapi = &bp->bnx2_napi[ring_num];
5117	txr = &bnapi->tx_ring;
5118
5119	if (ring_num == 0)
5120		cid = TX_CID;
5121	else
5122		cid = TX_TSS_CID + ring_num - 1;
5123
5124	bp->tx_wake_thresh = bp->tx_ring_size / 2;
5125
5126	txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5127
5128	txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5129	txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5130
5131	txr->tx_prod = 0;
5132	txr->tx_prod_bseq = 0;
5133
5134	txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5135	txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5136
5137	bnx2_init_tx_context(bp, cid, txr);
5138}
5139
5140static void
5141bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5142		     int num_rings)
5143{
5144	int i;
5145	struct rx_bd *rxbd;
5146
5147	for (i = 0; i < num_rings; i++) {
5148		int j;
5149
5150		rxbd = &rx_ring[i][0];
5151		for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5152			rxbd->rx_bd_len = buf_size;
5153			rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5154		}
5155		if (i == (num_rings - 1))
5156			j = 0;
5157		else
5158			j = i + 1;
5159		rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5160		rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5161	}
5162}
5163
5164static void
5165bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5166{
5167	int i;
5168	u16 prod, ring_prod;
5169	u32 cid, rx_cid_addr, val;
5170	struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5171	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5172
5173	if (ring_num == 0)
5174		cid = RX_CID;
5175	else
5176		cid = RX_RSS_CID + ring_num - 1;
5177
5178	rx_cid_addr = GET_CID_ADDR(cid);
5179
5180	bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5181			     bp->rx_buf_use_size, bp->rx_max_ring);
5182
5183	bnx2_init_rx_context(bp, cid);
5184
5185	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5186		val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5187		REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5188	}
5189
5190	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5191	if (bp->rx_pg_ring_size) {
5192		bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5193				     rxr->rx_pg_desc_mapping,
5194				     PAGE_SIZE, bp->rx_max_pg_ring);
5195		val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5196		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5197		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5198		       BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5199
5200		val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5201		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5202
5203		val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5204		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5205
5206		if (CHIP_NUM(bp) == CHIP_NUM_5709)
5207			REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5208	}
5209
5210	val = (u64) rxr->rx_desc_mapping[0] >> 32;
5211	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5212
5213	val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5214	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5215
5216	ring_prod = prod = rxr->rx_pg_prod;
5217	for (i = 0; i < bp->rx_pg_ring_size; i++) {
5218		if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5219			netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5220				    ring_num, i, bp->rx_pg_ring_size);
5221			break;
5222		}
5223		prod = NEXT_RX_BD(prod);
5224		ring_prod = RX_PG_RING_IDX(prod);
5225	}
5226	rxr->rx_pg_prod = prod;
5227
5228	ring_prod = prod = rxr->rx_prod;
5229	for (i = 0; i < bp->rx_ring_size; i++) {
5230		if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5231			netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5232				    ring_num, i, bp->rx_ring_size);
5233			break;
5234		}
5235		prod = NEXT_RX_BD(prod);
5236		ring_prod = RX_RING_IDX(prod);
5237	}
5238	rxr->rx_prod = prod;
5239
5240	rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5241	rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5242	rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5243
5244	REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5245	REG_WR16(bp, rxr->rx_bidx_addr, prod);
5246
5247	REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5248}
5249
5250static void
5251bnx2_init_all_rings(struct bnx2 *bp)
5252{
5253	int i;
5254	u32 val;
5255
5256	bnx2_clear_ring_states(bp);
5257
5258	REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5259	for (i = 0; i < bp->num_tx_rings; i++)
5260		bnx2_init_tx_ring(bp, i);
5261
5262	if (bp->num_tx_rings > 1)
5263		REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5264		       (TX_TSS_CID << 7));
5265
5266	REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5267	bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5268
5269	for (i = 0; i < bp->num_rx_rings; i++)
5270		bnx2_init_rx_ring(bp, i);
5271
5272	if (bp->num_rx_rings > 1) {
5273		u32 tbl_32 = 0;
5274
5275		for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5276			int shift = (i % 8) << 2;
5277
5278			tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5279			if ((i % 8) == 7) {
5280				REG_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5281				REG_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5282					BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5283					BNX2_RLUP_RSS_COMMAND_WRITE |
5284					BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5285				tbl_32 = 0;
5286			}
5287		}
5288
5289		val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5290		      BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5291
5292		REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5293
5294	}
5295}
5296
5297static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5298{
5299	u32 max, num_rings = 1;
5300
5301	while (ring_size > MAX_RX_DESC_CNT) {
5302		ring_size -= MAX_RX_DESC_CNT;
5303		num_rings++;
5304	}
5305	/* round to next power of 2 */
5306	max = max_size;
5307	while ((max & num_rings) == 0)
5308		max >>= 1;
5309
5310	if (num_rings != max)
5311		max <<= 1;
5312
5313	return max;
5314}
5315
5316static void
5317bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5318{
5319	u32 rx_size, rx_space, jumbo_size;
5320
5321	/* 8 for CRC and VLAN */
5322	rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5323
5324	rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5325		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5326
5327	bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5328	bp->rx_pg_ring_size = 0;
5329	bp->rx_max_pg_ring = 0;
5330	bp->rx_max_pg_ring_idx = 0;
5331	if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5332		int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5333
5334		jumbo_size = size * pages;
5335		if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5336			jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5337
5338		bp->rx_pg_ring_size = jumbo_size;
5339		bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5340							MAX_RX_PG_RINGS);
5341		bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5342		rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5343		bp->rx_copy_thresh = 0;
5344	}
5345
5346	bp->rx_buf_use_size = rx_size;
5347	/* hw alignment + build_skb() overhead*/
5348	bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
5349		NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5350	bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5351	bp->rx_ring_size = size;
5352	bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5353	bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5354}
5355
5356static void
5357bnx2_free_tx_skbs(struct bnx2 *bp)
5358{
5359	int i;
5360
5361	for (i = 0; i < bp->num_tx_rings; i++) {
5362		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5363		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5364		int j;
5365
5366		if (txr->tx_buf_ring == NULL)
5367			continue;
5368
5369		for (j = 0; j < TX_DESC_CNT; ) {
5370			struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5371			struct sk_buff *skb = tx_buf->skb;
5372			int k, last;
5373
5374			if (skb == NULL) {
5375				j++;
5376				continue;
5377			}
5378
5379			dma_unmap_single(&bp->pdev->dev,
5380					 dma_unmap_addr(tx_buf, mapping),
5381					 skb_headlen(skb),
5382					 PCI_DMA_TODEVICE);
5383
5384			tx_buf->skb = NULL;
5385
5386			last = tx_buf->nr_frags;
5387			j++;
5388			for (k = 0; k < last; k++, j++) {
5389				tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5390				dma_unmap_page(&bp->pdev->dev,
5391					dma_unmap_addr(tx_buf, mapping),
5392					skb_frag_size(&skb_shinfo(skb)->frags[k]),
5393					PCI_DMA_TODEVICE);
5394			}
5395			dev_kfree_skb(skb);
5396		}
5397		netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
5398	}
5399}
5400
5401static void
5402bnx2_free_rx_skbs(struct bnx2 *bp)
5403{
5404	int i;
5405
5406	for (i = 0; i < bp->num_rx_rings; i++) {
5407		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5408		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5409		int j;
5410
5411		if (rxr->rx_buf_ring == NULL)
5412			return;
5413
5414		for (j = 0; j < bp->rx_max_ring_idx; j++) {
5415			struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5416			u8 *data = rx_buf->data;
5417
5418			if (data == NULL)
5419				continue;
5420
5421			dma_unmap_single(&bp->pdev->dev,
5422					 dma_unmap_addr(rx_buf, mapping),
5423					 bp->rx_buf_use_size,
5424					 PCI_DMA_FROMDEVICE);
5425
5426			rx_buf->data = NULL;
5427
5428			kfree(data);
5429		}
5430		for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5431			bnx2_free_rx_page(bp, rxr, j);
5432	}
5433}
5434
5435static void
5436bnx2_free_skbs(struct bnx2 *bp)
5437{
5438	bnx2_free_tx_skbs(bp);
5439	bnx2_free_rx_skbs(bp);
5440}
5441
5442static int
5443bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5444{
5445	int rc;
5446
5447	rc = bnx2_reset_chip(bp, reset_code);
5448	bnx2_free_skbs(bp);
5449	if (rc)
5450		return rc;
5451
5452	if ((rc = bnx2_init_chip(bp)) != 0)
5453		return rc;
5454
5455	bnx2_init_all_rings(bp);
5456	return 0;
5457}
5458
5459static int
5460bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5461{
5462	int rc;
5463
5464	if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5465		return rc;
5466
5467	spin_lock_bh(&bp->phy_lock);
5468	bnx2_init_phy(bp, reset_phy);
5469	bnx2_set_link(bp);
5470	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5471		bnx2_remote_phy_event(bp);
5472	spin_unlock_bh(&bp->phy_lock);
5473	return 0;
5474}
5475
5476static int
5477bnx2_shutdown_chip(struct bnx2 *bp)
5478{
5479	u32 reset_code;
5480
5481	if (bp->flags & BNX2_FLAG_NO_WOL)
5482		reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5483	else if (bp->wol)
5484		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5485	else
5486		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5487
5488	return bnx2_reset_chip(bp, reset_code);
5489}
5490
5491static int
5492bnx2_test_registers(struct bnx2 *bp)
5493{
5494	int ret;
5495	int i, is_5709;
5496	static const struct {
5497		u16   offset;
5498		u16   flags;
5499#define BNX2_FL_NOT_5709	1
5500		u32   rw_mask;
5501		u32   ro_mask;
5502	} reg_tbl[] = {
5503		{ 0x006c, 0, 0x00000000, 0x0000003f },
5504		{ 0x0090, 0, 0xffffffff, 0x00000000 },
5505		{ 0x0094, 0, 0x00000000, 0x00000000 },
5506
5507		{ 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5508		{ 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5509		{ 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5510		{ 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5511		{ 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5512		{ 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5513		{ 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5514		{ 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5515		{ 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5516
5517		{ 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5518		{ 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5519		{ 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5520		{ 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5521		{ 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5522		{ 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5523
5524		{ 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5525		{ 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5526		{ 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5527
5528		{ 0x1000, 0, 0x00000000, 0x00000001 },
5529		{ 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5530
5531		{ 0x1408, 0, 0x01c00800, 0x00000000 },
5532		{ 0x149c, 0, 0x8000ffff, 0x00000000 },
5533		{ 0x14a8, 0, 0x00000000, 0x000001ff },
5534		{ 0x14ac, 0, 0x0fffffff, 0x10000000 },
5535		{ 0x14b0, 0, 0x00000002, 0x00000001 },
5536		{ 0x14b8, 0, 0x00000000, 0x00000000 },
5537		{ 0x14c0, 0, 0x00000000, 0x00000009 },
5538		{ 0x14c4, 0, 0x00003fff, 0x00000000 },
5539		{ 0x14cc, 0, 0x00000000, 0x00000001 },
5540		{ 0x14d0, 0, 0xffffffff, 0x00000000 },
5541
5542		{ 0x1800, 0, 0x00000000, 0x00000001 },
5543		{ 0x1804, 0, 0x00000000, 0x00000003 },
5544
5545		{ 0x2800, 0, 0x00000000, 0x00000001 },
5546		{ 0x2804, 0, 0x00000000, 0x00003f01 },
5547		{ 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5548		{ 0x2810, 0, 0xffff0000, 0x00000000 },
5549		{ 0x2814, 0, 0xffff0000, 0x00000000 },
5550		{ 0x2818, 0, 0xffff0000, 0x00000000 },
5551		{ 0x281c, 0, 0xffff0000, 0x00000000 },
5552		{ 0x2834, 0, 0xffffffff, 0x00000000 },
5553		{ 0x2840, 0, 0x00000000, 0xffffffff },
5554		{ 0x2844, 0, 0x00000000, 0xffffffff },
5555		{ 0x2848, 0, 0xffffffff, 0x00000000 },
5556		{ 0x284c, 0, 0xf800f800, 0x07ff07ff },
5557
5558		{ 0x2c00, 0, 0x00000000, 0x00000011 },
5559		{ 0x2c04, 0, 0x00000000, 0x00030007 },
5560
5561		{ 0x3c00, 0, 0x00000000, 0x00000001 },
5562		{ 0x3c04, 0, 0x00000000, 0x00070000 },
5563		{ 0x3c08, 0, 0x00007f71, 0x07f00000 },
5564		{ 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5565		{ 0x3c10, 0, 0xffffffff, 0x00000000 },
5566		{ 0x3c14, 0, 0x00000000, 0xffffffff },
5567		{ 0x3c18, 0, 0x00000000, 0xffffffff },
5568		{ 0x3c1c, 0, 0xfffff000, 0x00000000 },
5569		{ 0x3c20, 0, 0xffffff00, 0x00000000 },
5570
5571		{ 0x5004, 0, 0x00000000, 0x0000007f },
5572		{ 0x5008, 0, 0x0f0007ff, 0x00000000 },
5573
5574		{ 0x5c00, 0, 0x00000000, 0x00000001 },
5575		{ 0x5c04, 0, 0x00000000, 0x0003000f },
5576		{ 0x5c08, 0, 0x00000003, 0x00000000 },
5577		{ 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5578		{ 0x5c10, 0, 0x00000000, 0xffffffff },
5579		{ 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5580		{ 0x5c84, 0, 0x00000000, 0x0000f333 },
5581		{ 0x5c88, 0, 0x00000000, 0x00077373 },
5582		{ 0x5c8c, 0, 0x00000000, 0x0007f737 },
5583
5584		{ 0x6808, 0, 0x0000ff7f, 0x00000000 },
5585		{ 0x680c, 0, 0xffffffff, 0x00000000 },
5586		{ 0x6810, 0, 0xffffffff, 0x00000000 },
5587		{ 0x6814, 0, 0xffffffff, 0x00000000 },
5588		{ 0x6818, 0, 0xffffffff, 0x00000000 },
5589		{ 0x681c, 0, 0xffffffff, 0x00000000 },
5590		{ 0x6820, 0, 0x00ff00ff, 0x00000000 },
5591		{ 0x6824, 0, 0x00ff00ff, 0x00000000 },
5592		{ 0x6828, 0, 0x00ff00ff, 0x00000000 },
5593		{ 0x682c, 0, 0x03ff03ff, 0x00000000 },
5594		{ 0x6830, 0, 0x03ff03ff, 0x00000000 },
5595		{ 0x6834, 0, 0x03ff03ff, 0x00000000 },
5596		{ 0x6838, 0, 0x03ff03ff, 0x00000000 },
5597		{ 0x683c, 0, 0x0000ffff, 0x00000000 },
5598		{ 0x6840, 0, 0x00000ff0, 0x00000000 },
5599		{ 0x6844, 0, 0x00ffff00, 0x00000000 },
5600		{ 0x684c, 0, 0xffffffff, 0x00000000 },
5601		{ 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5602		{ 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5603		{ 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5604		{ 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5605		{ 0x6908, 0, 0x00000000, 0x0001ff0f },
5606		{ 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5607
5608		{ 0xffff, 0, 0x00000000, 0x00000000 },
5609	};
5610
5611	ret = 0;
5612	is_5709 = 0;
5613	if (CHIP_NUM(bp) == CHIP_NUM_5709)
5614		is_5709 = 1;
5615
5616	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5617		u32 offset, rw_mask, ro_mask, save_val, val;
5618		u16 flags = reg_tbl[i].flags;
5619
5620		if (is_5709 && (flags & BNX2_FL_NOT_5709))
5621			continue;
5622
5623		offset = (u32) reg_tbl[i].offset;
5624		rw_mask = reg_tbl[i].rw_mask;
5625		ro_mask = reg_tbl[i].ro_mask;
5626
5627		save_val = readl(bp->regview + offset);
5628
5629		writel(0, bp->regview + offset);
5630
5631		val = readl(bp->regview + offset);
5632		if ((val & rw_mask) != 0) {
5633			goto reg_test_err;
5634		}
5635
5636		if ((val & ro_mask) != (save_val & ro_mask)) {
5637			goto reg_test_err;
5638		}
5639
5640		writel(0xffffffff, bp->regview + offset);
5641
5642		val = readl(bp->regview + offset);
5643		if ((val & rw_mask) != rw_mask) {
5644			goto reg_test_err;
5645		}
5646
5647		if ((val & ro_mask) != (save_val & ro_mask)) {
5648			goto reg_test_err;
5649		}
5650
5651		writel(save_val, bp->regview + offset);
5652		continue;
5653
5654reg_test_err:
5655		writel(save_val, bp->regview + offset);
5656		ret = -ENODEV;
5657		break;
5658	}
5659	return ret;
5660}
5661
5662static int
5663bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5664{
5665	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5666		0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5667	int i;
5668
5669	for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5670		u32 offset;
5671
5672		for (offset = 0; offset < size; offset += 4) {
5673
5674			bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5675
5676			if (bnx2_reg_rd_ind(bp, start + offset) !=
5677				test_pattern[i]) {
5678				return -ENODEV;
5679			}
5680		}
5681	}
5682	return 0;
5683}
5684
5685static int
5686bnx2_test_memory(struct bnx2 *bp)
5687{
5688	int ret = 0;
5689	int i;
5690	static struct mem_entry {
5691		u32   offset;
5692		u32   len;
5693	} mem_tbl_5706[] = {
5694		{ 0x60000,  0x4000 },
5695		{ 0xa0000,  0x3000 },
5696		{ 0xe0000,  0x4000 },
5697		{ 0x120000, 0x4000 },
5698		{ 0x1a0000, 0x4000 },
5699		{ 0x160000, 0x4000 },
5700		{ 0xffffffff, 0    },
5701	},
5702	mem_tbl_5709[] = {
5703		{ 0x60000,  0x4000 },
5704		{ 0xa0000,  0x3000 },
5705		{ 0xe0000,  0x4000 },
5706		{ 0x120000, 0x4000 },
5707		{ 0x1a0000, 0x4000 },
5708		{ 0xffffffff, 0    },
5709	};
5710	struct mem_entry *mem_tbl;
5711
5712	if (CHIP_NUM(bp) == CHIP_NUM_5709)
5713		mem_tbl = mem_tbl_5709;
5714	else
5715		mem_tbl = mem_tbl_5706;
5716
5717	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5718		if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5719			mem_tbl[i].len)) != 0) {
5720			return ret;
5721		}
5722	}
5723
5724	return ret;
5725}
5726
5727#define BNX2_MAC_LOOPBACK	0
5728#define BNX2_PHY_LOOPBACK	1
5729
5730static int
5731bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5732{
5733	unsigned int pkt_size, num_pkts, i;
5734	struct sk_buff *skb;
5735	u8 *data;
5736	unsigned char *packet;
5737	u16 rx_start_idx, rx_idx;
5738	dma_addr_t map;
5739	struct tx_bd *txbd;
5740	struct sw_bd *rx_buf;
5741	struct l2_fhdr *rx_hdr;
5742	int ret = -ENODEV;
5743	struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5744	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5745	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5746
5747	tx_napi = bnapi;
5748
5749	txr = &tx_napi->tx_ring;
5750	rxr = &bnapi->rx_ring;
5751	if (loopback_mode == BNX2_MAC_LOOPBACK) {
5752		bp->loopback = MAC_LOOPBACK;
5753		bnx2_set_mac_loopback(bp);
5754	}
5755	else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5756		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5757			return 0;
5758
5759		bp->loopback = PHY_LOOPBACK;
5760		bnx2_set_phy_loopback(bp);
5761	}
5762	else
5763		return -EINVAL;
5764
5765	pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5766	skb = netdev_alloc_skb(bp->dev, pkt_size);
5767	if (!skb)
5768		return -ENOMEM;
5769	packet = skb_put(skb, pkt_size);
5770	memcpy(packet, bp->dev->dev_addr, 6);
5771	memset(packet + 6, 0x0, 8);
5772	for (i = 14; i < pkt_size; i++)
5773		packet[i] = (unsigned char) (i & 0xff);
5774
5775	map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5776			     PCI_DMA_TODEVICE);
5777	if (dma_mapping_error(&bp->pdev->dev, map)) {
5778		dev_kfree_skb(skb);
5779		return -EIO;
5780	}
5781
5782	REG_WR(bp, BNX2_HC_COMMAND,
5783	       bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5784
5785	REG_RD(bp, BNX2_HC_COMMAND);
5786
5787	udelay(5);
5788	rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5789
5790	num_pkts = 0;
5791
5792	txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5793
5794	txbd->tx_bd_haddr_hi = (u64) map >> 32;
5795	txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5796	txbd->tx_bd_mss_nbytes = pkt_size;
5797	txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5798
5799	num_pkts++;
5800	txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5801	txr->tx_prod_bseq += pkt_size;
5802
5803	REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5804	REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5805
5806	udelay(100);
5807
5808	REG_WR(bp, BNX2_HC_COMMAND,
5809	       bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5810
5811	REG_RD(bp, BNX2_HC_COMMAND);
5812
5813	udelay(5);
5814
5815	dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5816	dev_kfree_skb(skb);
5817
5818	if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5819		goto loopback_test_done;
5820
5821	rx_idx = bnx2_get_hw_rx_cons(bnapi);
5822	if (rx_idx != rx_start_idx + num_pkts) {
5823		goto loopback_test_done;
5824	}
5825
5826	rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5827	data = rx_buf->data;
5828
5829	rx_hdr = get_l2_fhdr(data);
5830	data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
5831
5832	dma_sync_single_for_cpu(&bp->pdev->dev,
5833		dma_unmap_addr(rx_buf, mapping),
5834		bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
5835
5836	if (rx_hdr->l2_fhdr_status &
5837		(L2_FHDR_ERRORS_BAD_CRC |
5838		L2_FHDR_ERRORS_PHY_DECODE |
5839		L2_FHDR_ERRORS_ALIGNMENT |
5840		L2_FHDR_ERRORS_TOO_SHORT |
5841		L2_FHDR_ERRORS_GIANT_FRAME)) {
5842
5843		goto loopback_test_done;
5844	}
5845
5846	if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5847		goto loopback_test_done;
5848	}
5849
5850	for (i = 14; i < pkt_size; i++) {
5851		if (*(data + i) != (unsigned char) (i & 0xff)) {
5852			goto loopback_test_done;
5853		}
5854	}
5855
5856	ret = 0;
5857
5858loopback_test_done:
5859	bp->loopback = 0;
5860	return ret;
5861}
5862
5863#define BNX2_MAC_LOOPBACK_FAILED	1
5864#define BNX2_PHY_LOOPBACK_FAILED	2
5865#define BNX2_LOOPBACK_FAILED		(BNX2_MAC_LOOPBACK_FAILED |	\
5866					 BNX2_PHY_LOOPBACK_FAILED)
5867
5868static int
5869bnx2_test_loopback(struct bnx2 *bp)
5870{
5871	int rc = 0;
5872
5873	if (!netif_running(bp->dev))
5874		return BNX2_LOOPBACK_FAILED;
5875
5876	bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5877	spin_lock_bh(&bp->phy_lock);
5878	bnx2_init_phy(bp, 1);
5879	spin_unlock_bh(&bp->phy_lock);
5880	if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5881		rc |= BNX2_MAC_LOOPBACK_FAILED;
5882	if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5883		rc |= BNX2_PHY_LOOPBACK_FAILED;
5884	return rc;
5885}
5886
5887#define NVRAM_SIZE 0x200
5888#define CRC32_RESIDUAL 0xdebb20e3
5889
5890static int
5891bnx2_test_nvram(struct bnx2 *bp)
5892{
5893	__be32 buf[NVRAM_SIZE / 4];
5894	u8 *data = (u8 *) buf;
5895	int rc = 0;
5896	u32 magic, csum;
5897
5898	if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5899		goto test_nvram_done;
5900
5901        magic = be32_to_cpu(buf[0]);
5902	if (magic != 0x669955aa) {
5903		rc = -ENODEV;
5904		goto test_nvram_done;
5905	}
5906
5907	if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5908		goto test_nvram_done;
5909
5910	csum = ether_crc_le(0x100, data);
5911	if (csum != CRC32_RESIDUAL) {
5912		rc = -ENODEV;
5913		goto test_nvram_done;
5914	}
5915
5916	csum = ether_crc_le(0x100, data + 0x100);
5917	if (csum != CRC32_RESIDUAL) {
5918		rc = -ENODEV;
5919	}
5920
5921test_nvram_done:
5922	return rc;
5923}
5924
5925static int
5926bnx2_test_link(struct bnx2 *bp)
5927{
5928	u32 bmsr;
5929
5930	if (!netif_running(bp->dev))
5931		return -ENODEV;
5932
5933	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5934		if (bp->link_up)
5935			return 0;
5936		return -ENODEV;
5937	}
5938	spin_lock_bh(&bp->phy_lock);
5939	bnx2_enable_bmsr1(bp);
5940	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5941	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5942	bnx2_disable_bmsr1(bp);
5943	spin_unlock_bh(&bp->phy_lock);
5944
5945	if (bmsr & BMSR_LSTATUS) {
5946		return 0;
5947	}
5948	return -ENODEV;
5949}
5950
5951static int
5952bnx2_test_intr(struct bnx2 *bp)
5953{
5954	int i;
5955	u16 status_idx;
5956
5957	if (!netif_running(bp->dev))
5958		return -ENODEV;
5959
5960	status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5961
5962	/* This register is not touched during run-time. */
5963	REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5964	REG_RD(bp, BNX2_HC_COMMAND);
5965
5966	for (i = 0; i < 10; i++) {
5967		if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5968			status_idx) {
5969
5970			break;
5971		}
5972
5973		msleep_interruptible(10);
5974	}
5975	if (i < 10)
5976		return 0;
5977
5978	return -ENODEV;
5979}
5980
5981/* Determining link for parallel detection. */
5982static int
5983bnx2_5706_serdes_has_link(struct bnx2 *bp)
5984{
5985	u32 mode_ctl, an_dbg, exp;
5986
5987	if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5988		return 0;
5989
5990	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5991	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5992
5993	if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5994		return 0;
5995
5996	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5997	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5998	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5999
6000	if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
6001		return 0;
6002
6003	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
6004	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6005	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6006
6007	if (exp & MII_EXPAND_REG1_RUDI_C)	/* receiving CONFIG */
6008		return 0;
6009
6010	return 1;
6011}
6012
6013static void
6014bnx2_5706_serdes_timer(struct bnx2 *bp)
6015{
6016	int check_link = 1;
6017
6018	spin_lock(&bp->phy_lock);
6019	if (bp->serdes_an_pending) {
6020		bp->serdes_an_pending--;
6021		check_link = 0;
6022	} else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6023		u32 bmcr;
6024
6025		bp->current_interval = BNX2_TIMER_INTERVAL;
6026
6027		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6028
6029		if (bmcr & BMCR_ANENABLE) {
6030			if (bnx2_5706_serdes_has_link(bp)) {
6031				bmcr &= ~BMCR_ANENABLE;
6032				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6033				bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6034				bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6035			}
6036		}
6037	}
6038	else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6039		 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6040		u32 phy2;
6041
6042		bnx2_write_phy(bp, 0x17, 0x0f01);
6043		bnx2_read_phy(bp, 0x15, &phy2);
6044		if (phy2 & 0x20) {
6045			u32 bmcr;
6046
6047			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6048			bmcr |= BMCR_ANENABLE;
6049			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6050
6051			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6052		}
6053	} else
6054		bp->current_interval = BNX2_TIMER_INTERVAL;
6055
6056	if (check_link) {
6057		u32 val;
6058
6059		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6060		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6061		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6062
6063		if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6064			if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6065				bnx2_5706s_force_link_dn(bp, 1);
6066				bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6067			} else
6068				bnx2_set_link(bp);
6069		} else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6070			bnx2_set_link(bp);
6071	}
6072	spin_unlock(&bp->phy_lock);
6073}
6074
6075static void
6076bnx2_5708_serdes_timer(struct bnx2 *bp)
6077{
6078	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6079		return;
6080
6081	if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6082		bp->serdes_an_pending = 0;
6083		return;
6084	}
6085
6086	spin_lock(&bp->phy_lock);
6087	if (bp->serdes_an_pending)
6088		bp->serdes_an_pending--;
6089	else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6090		u32 bmcr;
6091
6092		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6093		if (bmcr & BMCR_ANENABLE) {
6094			bnx2_enable_forced_2g5(bp);
6095			bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6096		} else {
6097			bnx2_disable_forced_2g5(bp);
6098			bp->serdes_an_pending = 2;
6099			bp->current_interval = BNX2_TIMER_INTERVAL;
6100		}
6101
6102	} else
6103		bp->current_interval = BNX2_TIMER_INTERVAL;
6104
6105	spin_unlock(&bp->phy_lock);
6106}
6107
6108static void
6109bnx2_timer(unsigned long data)
6110{
6111	struct bnx2 *bp = (struct bnx2 *) data;
6112
6113	if (!netif_running(bp->dev))
6114		return;
6115
6116	if (atomic_read(&bp->intr_sem) != 0)
6117		goto bnx2_restart_timer;
6118
6119	if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6120	     BNX2_FLAG_USING_MSI)
6121		bnx2_chk_missed_msi(bp);
6122
6123	bnx2_send_heart_beat(bp);
6124
6125	bp->stats_blk->stat_FwRxDrop =
6126		bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6127
6128	/* workaround occasional corrupted counters */
6129	if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6130		REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6131					    BNX2_HC_COMMAND_STATS_NOW);
6132
6133	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6134		if (CHIP_NUM(bp) == CHIP_NUM_5706)
6135			bnx2_5706_serdes_timer(bp);
6136		else
6137			bnx2_5708_serdes_timer(bp);
6138	}
6139
6140bnx2_restart_timer:
6141	mod_timer(&bp->timer, jiffies + bp->current_interval);
6142}
6143
6144static int
6145bnx2_request_irq(struct bnx2 *bp)
6146{
6147	unsigned long flags;
6148	struct bnx2_irq *irq;
6149	int rc = 0, i;
6150
6151	if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6152		flags = 0;
6153	else
6154		flags = IRQF_SHARED;
6155
6156	for (i = 0; i < bp->irq_nvecs; i++) {
6157		irq = &bp->irq_tbl[i];
6158		rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6159				 &bp->bnx2_napi[i]);
6160		if (rc)
6161			break;
6162		irq->requested = 1;
6163	}
6164	return rc;
6165}
6166
6167static void
6168__bnx2_free_irq(struct bnx2 *bp)
6169{
6170	struct bnx2_irq *irq;
6171	int i;
6172
6173	for (i = 0; i < bp->irq_nvecs; i++) {
6174		irq = &bp->irq_tbl[i];
6175		if (irq->requested)
6176			free_irq(irq->vector, &bp->bnx2_napi[i]);
6177		irq->requested = 0;
6178	}
6179}
6180
6181static void
6182bnx2_free_irq(struct bnx2 *bp)
6183{
6184
6185	__bnx2_free_irq(bp);
6186	if (bp->flags & BNX2_FLAG_USING_MSI)
6187		pci_disable_msi(bp->pdev);
6188	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6189		pci_disable_msix(bp->pdev);
6190
6191	bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6192}
6193
6194static void
6195bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6196{
6197	int i, total_vecs, rc;
6198	struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6199	struct net_device *dev = bp->dev;
6200	const int len = sizeof(bp->irq_tbl[0].name);
6201
6202	bnx2_setup_msix_tbl(bp);
6203	REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6204	REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6205	REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6206
6207	/*  Need to flush the previous three writes to ensure MSI-X
6208	 *  is setup properly */
6209	REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
6210
6211	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6212		msix_ent[i].entry = i;
6213		msix_ent[i].vector = 0;
6214	}
6215
6216	total_vecs = msix_vecs;
6217#ifdef BCM_CNIC
6218	total_vecs++;
6219#endif
6220	rc = -ENOSPC;
6221	while (total_vecs >= BNX2_MIN_MSIX_VEC) {
6222		rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs);
6223		if (rc <= 0)
6224			break;
6225		if (rc > 0)
6226			total_vecs = rc;
6227	}
6228
6229	if (rc != 0)
6230		return;
6231
6232	msix_vecs = total_vecs;
6233#ifdef BCM_CNIC
6234	msix_vecs--;
6235#endif
6236	bp->irq_nvecs = msix_vecs;
6237	bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6238	for (i = 0; i < total_vecs; i++) {
6239		bp->irq_tbl[i].vector = msix_ent[i].vector;
6240		snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6241		bp->irq_tbl[i].handler = bnx2_msi_1shot;
6242	}
6243}
6244
6245static int
6246bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6247{
6248	int cpus = num_online_cpus();
6249	int msix_vecs;
6250
6251	if (!bp->num_req_rx_rings)
6252		msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
6253	else if (!bp->num_req_tx_rings)
6254		msix_vecs = max(cpus, bp->num_req_rx_rings);
6255	else
6256		msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
6257
6258	msix_vecs = min(msix_vecs, RX_MAX_RINGS);
6259
6260	bp->irq_tbl[0].handler = bnx2_interrupt;
6261	strcpy(bp->irq_tbl[0].name, bp->dev->name);
6262	bp->irq_nvecs = 1;
6263	bp->irq_tbl[0].vector = bp->pdev->irq;
6264
6265	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6266		bnx2_enable_msix(bp, msix_vecs);
6267
6268	if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6269	    !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6270		if (pci_enable_msi(bp->pdev) == 0) {
6271			bp->flags |= BNX2_FLAG_USING_MSI;
6272			if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6273				bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6274				bp->irq_tbl[0].handler = bnx2_msi_1shot;
6275			} else
6276				bp->irq_tbl[0].handler = bnx2_msi;
6277
6278			bp->irq_tbl[0].vector = bp->pdev->irq;
6279		}
6280	}
6281
6282	if (!bp->num_req_tx_rings)
6283		bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6284	else
6285		bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
6286
6287	if (!bp->num_req_rx_rings)
6288		bp->num_rx_rings = bp->irq_nvecs;
6289	else
6290		bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
6291
6292	netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6293
6294	return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6295}
6296
6297/* Called with rtnl_lock */
6298static int
6299bnx2_open(struct net_device *dev)
6300{
6301	struct bnx2 *bp = netdev_priv(dev);
6302	int rc;
6303
6304	rc = bnx2_request_firmware(bp);
6305	if (rc < 0)
6306		goto out;
6307
6308	netif_carrier_off(dev);
6309
6310	bnx2_set_power_state(bp, PCI_D0);
6311	bnx2_disable_int(bp);
6312
6313	rc = bnx2_setup_int_mode(bp, disable_msi);
6314	if (rc)
6315		goto open_err;
6316	bnx2_init_napi(bp);
6317	bnx2_napi_enable(bp);
6318	rc = bnx2_alloc_mem(bp);
6319	if (rc)
6320		goto open_err;
6321
6322	rc = bnx2_request_irq(bp);
6323	if (rc)
6324		goto open_err;
6325
6326	rc = bnx2_init_nic(bp, 1);
6327	if (rc)
6328		goto open_err;
6329
6330	mod_timer(&bp->timer, jiffies + bp->current_interval);
6331
6332	atomic_set(&bp->intr_sem, 0);
6333
6334	memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6335
6336	bnx2_enable_int(bp);
6337
6338	if (bp->flags & BNX2_FLAG_USING_MSI) {
6339		/* Test MSI to make sure it is working
6340		 * If MSI test fails, go back to INTx mode
6341		 */
6342		if (bnx2_test_intr(bp) != 0) {
6343			netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6344
6345			bnx2_disable_int(bp);
6346			bnx2_free_irq(bp);
6347
6348			bnx2_setup_int_mode(bp, 1);
6349
6350			rc = bnx2_init_nic(bp, 0);
6351
6352			if (!rc)
6353				rc = bnx2_request_irq(bp);
6354
6355			if (rc) {
6356				del_timer_sync(&bp->timer);
6357				goto open_err;
6358			}
6359			bnx2_enable_int(bp);
6360		}
6361	}
6362	if (bp->flags & BNX2_FLAG_USING_MSI)
6363		netdev_info(dev, "using MSI\n");
6364	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6365		netdev_info(dev, "using MSIX\n");
6366
6367	netif_tx_start_all_queues(dev);
6368out:
6369	return rc;
6370
6371open_err:
6372	bnx2_napi_disable(bp);
6373	bnx2_free_skbs(bp);
6374	bnx2_free_irq(bp);
6375	bnx2_free_mem(bp);
6376	bnx2_del_napi(bp);
6377	bnx2_release_firmware(bp);
6378	goto out;
6379}
6380
6381static void
6382bnx2_reset_task(struct work_struct *work)
6383{
6384	struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6385	int rc;
6386
6387	rtnl_lock();
6388	if (!netif_running(bp->dev)) {
6389		rtnl_unlock();
6390		return;
6391	}
6392
6393	bnx2_netif_stop(bp, true);
6394
6395	rc = bnx2_init_nic(bp, 1);
6396	if (rc) {
6397		netdev_err(bp->dev, "failed to reset NIC, closing\n");
6398		bnx2_napi_enable(bp);
6399		dev_close(bp->dev);
6400		rtnl_unlock();
6401		return;
6402	}
6403
6404	atomic_set(&bp->intr_sem, 1);
6405	bnx2_netif_start(bp, true);
6406	rtnl_unlock();
6407}
6408
6409static void
6410bnx2_dump_state(struct bnx2 *bp)
6411{
6412	struct net_device *dev = bp->dev;
6413	u32 val1, val2;
6414
6415	pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6416	netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6417		   atomic_read(&bp->intr_sem), val1);
6418	pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6419	pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6420	netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6421	netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6422		   REG_RD(bp, BNX2_EMAC_TX_STATUS),
6423		   REG_RD(bp, BNX2_EMAC_RX_STATUS));
6424	netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6425		   REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6426	netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6427		   REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6428	if (bp->flags & BNX2_FLAG_USING_MSIX)
6429		netdev_err(dev, "DEBUG: PBA[%08x]\n",
6430			   REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6431}
6432
6433static void
6434bnx2_tx_timeout(struct net_device *dev)
6435{
6436	struct bnx2 *bp = netdev_priv(dev);
6437
6438	bnx2_dump_state(bp);
6439	bnx2_dump_mcp_state(bp);
6440
6441	/* This allows the netif to be shutdown gracefully before resetting */
6442	schedule_work(&bp->reset_task);
6443}
6444
6445/* Called with netif_tx_lock.
6446 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6447 * netif_wake_queue().
6448 */
6449static netdev_tx_t
6450bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6451{
6452	struct bnx2 *bp = netdev_priv(dev);
6453	dma_addr_t mapping;
6454	struct tx_bd *txbd;
6455	struct sw_tx_bd *tx_buf;
6456	u32 len, vlan_tag_flags, last_frag, mss;
6457	u16 prod, ring_prod;
6458	int i;
6459	struct bnx2_napi *bnapi;
6460	struct bnx2_tx_ring_info *txr;
6461	struct netdev_queue *txq;
6462
6463	/*  Determine which tx ring we will be placed on */
6464	i = skb_get_queue_mapping(skb);
6465	bnapi = &bp->bnx2_napi[i];
6466	txr = &bnapi->tx_ring;
6467	txq = netdev_get_tx_queue(dev, i);
6468
6469	if (unlikely(bnx2_tx_avail(bp, txr) <
6470	    (skb_shinfo(skb)->nr_frags + 1))) {
6471		netif_tx_stop_queue(txq);
6472		netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6473
6474		return NETDEV_TX_BUSY;
6475	}
6476	len = skb_headlen(skb);
6477	prod = txr->tx_prod;
6478	ring_prod = TX_RING_IDX(prod);
6479
6480	vlan_tag_flags = 0;
6481	if (skb->ip_summed == CHECKSUM_PARTIAL) {
6482		vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6483	}
6484
6485	if (vlan_tx_tag_present(skb)) {
6486		vlan_tag_flags |=
6487			(TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6488	}
6489
6490	if ((mss = skb_shinfo(skb)->gso_size)) {
6491		u32 tcp_opt_len;
6492		struct iphdr *iph;
6493
6494		vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6495
6496		tcp_opt_len = tcp_optlen(skb);
6497
6498		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6499			u32 tcp_off = skb_transport_offset(skb) -
6500				      sizeof(struct ipv6hdr) - ETH_HLEN;
6501
6502			vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6503					  TX_BD_FLAGS_SW_FLAGS;
6504			if (likely(tcp_off == 0))
6505				vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6506			else {
6507				tcp_off >>= 3;
6508				vlan_tag_flags |= ((tcp_off & 0x3) <<
6509						   TX_BD_FLAGS_TCP6_OFF0_SHL) |
6510						  ((tcp_off & 0x10) <<
6511						   TX_BD_FLAGS_TCP6_OFF4_SHL);
6512				mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6513			}
6514		} else {
6515			iph = ip_hdr(skb);
6516			if (tcp_opt_len || (iph->ihl > 5)) {
6517				vlan_tag_flags |= ((iph->ihl - 5) +
6518						   (tcp_opt_len >> 2)) << 8;
6519			}
6520		}
6521	} else
6522		mss = 0;
6523
6524	mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6525	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6526		dev_kfree_skb(skb);
6527		return NETDEV_TX_OK;
6528	}
6529
6530	tx_buf = &txr->tx_buf_ring[ring_prod];
6531	tx_buf->skb = skb;
6532	dma_unmap_addr_set(tx_buf, mapping, mapping);
6533
6534	txbd = &txr->tx_desc_ring[ring_prod];
6535
6536	txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6537	txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6538	txbd->tx_bd_mss_nbytes = len | (mss << 16);
6539	txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6540
6541	last_frag = skb_shinfo(skb)->nr_frags;
6542	tx_buf->nr_frags = last_frag;
6543	tx_buf->is_gso = skb_is_gso(skb);
6544
6545	for (i = 0; i < last_frag; i++) {
6546		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6547
6548		prod = NEXT_TX_BD(prod);
6549		ring_prod = TX_RING_IDX(prod);
6550		txbd = &txr->tx_desc_ring[ring_prod];
6551
6552		len = skb_frag_size(frag);
6553		mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
6554					   DMA_TO_DEVICE);
6555		if (dma_mapping_error(&bp->pdev->dev, mapping))
6556			goto dma_error;
6557		dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6558				   mapping);
6559
6560		txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6561		txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6562		txbd->tx_bd_mss_nbytes = len | (mss << 16);
6563		txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6564
6565	}
6566	txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6567
6568	/* Sync BD data before updating TX mailbox */
6569	wmb();
6570
6571	netdev_tx_sent_queue(txq, skb->len);
6572
6573	prod = NEXT_TX_BD(prod);
6574	txr->tx_prod_bseq += skb->len;
6575
6576	REG_WR16(bp, txr->tx_bidx_addr, prod);
6577	REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6578
6579	mmiowb();
6580
6581	txr->tx_prod = prod;
6582
6583	if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6584		netif_tx_stop_queue(txq);
6585
6586		/* netif_tx_stop_queue() must be done before checking
6587		 * tx index in bnx2_tx_avail() below, because in
6588		 * bnx2_tx_int(), we update tx index before checking for
6589		 * netif_tx_queue_stopped().
6590		 */
6591		smp_mb();
6592		if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6593			netif_tx_wake_queue(txq);
6594	}
6595
6596	return NETDEV_TX_OK;
6597dma_error:
6598	/* save value of frag that failed */
6599	last_frag = i;
6600
6601	/* start back at beginning and unmap skb */
6602	prod = txr->tx_prod;
6603	ring_prod = TX_RING_IDX(prod);
6604	tx_buf = &txr->tx_buf_ring[ring_prod];
6605	tx_buf->skb = NULL;
6606	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6607			 skb_headlen(skb), PCI_DMA_TODEVICE);
6608
6609	/* unmap remaining mapped pages */
6610	for (i = 0; i < last_frag; i++) {
6611		prod = NEXT_TX_BD(prod);
6612		ring_prod = TX_RING_IDX(prod);
6613		tx_buf = &txr->tx_buf_ring[ring_prod];
6614		dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6615			       skb_frag_size(&skb_shinfo(skb)->frags[i]),
6616			       PCI_DMA_TODEVICE);
6617	}
6618
6619	dev_kfree_skb(skb);
6620	return NETDEV_TX_OK;
6621}
6622
6623/* Called with rtnl_lock */
6624static int
6625bnx2_close(struct net_device *dev)
6626{
6627	struct bnx2 *bp = netdev_priv(dev);
6628
6629	bnx2_disable_int_sync(bp);
6630	bnx2_napi_disable(bp);
6631	del_timer_sync(&bp->timer);
6632	bnx2_shutdown_chip(bp);
6633	bnx2_free_irq(bp);
6634	bnx2_free_skbs(bp);
6635	bnx2_free_mem(bp);
6636	bnx2_del_napi(bp);
6637	bp->link_up = 0;
6638	netif_carrier_off(bp->dev);
6639	bnx2_set_power_state(bp, PCI_D3hot);
6640	return 0;
6641}
6642
6643static void
6644bnx2_save_stats(struct bnx2 *bp)
6645{
6646	u32 *hw_stats = (u32 *) bp->stats_blk;
6647	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6648	int i;
6649
6650	/* The 1st 10 counters are 64-bit counters */
6651	for (i = 0; i < 20; i += 2) {
6652		u32 hi;
6653		u64 lo;
6654
6655		hi = temp_stats[i] + hw_stats[i];
6656		lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6657		if (lo > 0xffffffff)
6658			hi++;
6659		temp_stats[i] = hi;
6660		temp_stats[i + 1] = lo & 0xffffffff;
6661	}
6662
6663	for ( ; i < sizeof(struct statistics_block) / 4; i++)
6664		temp_stats[i] += hw_stats[i];
6665}
6666
6667#define GET_64BIT_NET_STATS64(ctr)		\
6668	(((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6669
6670#define GET_64BIT_NET_STATS(ctr)				\
6671	GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +		\
6672	GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6673
6674#define GET_32BIT_NET_STATS(ctr)				\
6675	(unsigned long) (bp->stats_blk->ctr +			\
6676			 bp->temp_stats_blk->ctr)
6677
6678static struct rtnl_link_stats64 *
6679bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6680{
6681	struct bnx2 *bp = netdev_priv(dev);
6682
6683	if (bp->stats_blk == NULL)
6684		return net_stats;
6685
6686	net_stats->rx_packets =
6687		GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6688		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6689		GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6690
6691	net_stats->tx_packets =
6692		GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6693		GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6694		GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6695
6696	net_stats->rx_bytes =
6697		GET_64BIT_NET_STATS(stat_IfHCInOctets);
6698
6699	net_stats->tx_bytes =
6700		GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6701
6702	net_stats->multicast =
6703		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6704
6705	net_stats->collisions =
6706		GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6707
6708	net_stats->rx_length_errors =
6709		GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6710		GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6711
6712	net_stats->rx_over_errors =
6713		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6714		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6715
6716	net_stats->rx_frame_errors =
6717		GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6718
6719	net_stats->rx_crc_errors =
6720		GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6721
6722	net_stats->rx_errors = net_stats->rx_length_errors +
6723		net_stats->rx_over_errors + net_stats->rx_frame_errors +
6724		net_stats->rx_crc_errors;
6725
6726	net_stats->tx_aborted_errors =
6727		GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6728		GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6729
6730	if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6731	    (CHIP_ID(bp) == CHIP_ID_5708_A0))
6732		net_stats->tx_carrier_errors = 0;
6733	else {
6734		net_stats->tx_carrier_errors =
6735			GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6736	}
6737
6738	net_stats->tx_errors =
6739		GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6740		net_stats->tx_aborted_errors +
6741		net_stats->tx_carrier_errors;
6742
6743	net_stats->rx_missed_errors =
6744		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6745		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6746		GET_32BIT_NET_STATS(stat_FwRxDrop);
6747
6748	return net_stats;
6749}
6750
6751/* All ethtool functions called with rtnl_lock */
6752
6753static int
6754bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6755{
6756	struct bnx2 *bp = netdev_priv(dev);
6757	int support_serdes = 0, support_copper = 0;
6758
6759	cmd->supported = SUPPORTED_Autoneg;
6760	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6761		support_serdes = 1;
6762		support_copper = 1;
6763	} else if (bp->phy_port == PORT_FIBRE)
6764		support_serdes = 1;
6765	else
6766		support_copper = 1;
6767
6768	if (support_serdes) {
6769		cmd->supported |= SUPPORTED_1000baseT_Full |
6770			SUPPORTED_FIBRE;
6771		if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6772			cmd->supported |= SUPPORTED_2500baseX_Full;
6773
6774	}
6775	if (support_copper) {
6776		cmd->supported |= SUPPORTED_10baseT_Half |
6777			SUPPORTED_10baseT_Full |
6778			SUPPORTED_100baseT_Half |
6779			SUPPORTED_100baseT_Full |
6780			SUPPORTED_1000baseT_Full |
6781			SUPPORTED_TP;
6782
6783	}
6784
6785	spin_lock_bh(&bp->phy_lock);
6786	cmd->port = bp->phy_port;
6787	cmd->advertising = bp->advertising;
6788
6789	if (bp->autoneg & AUTONEG_SPEED) {
6790		cmd->autoneg = AUTONEG_ENABLE;
6791	} else {
6792		cmd->autoneg = AUTONEG_DISABLE;
6793	}
6794
6795	if (netif_carrier_ok(dev)) {
6796		ethtool_cmd_speed_set(cmd, bp->line_speed);
6797		cmd->duplex = bp->duplex;
6798	}
6799	else {
6800		ethtool_cmd_speed_set(cmd, -1);
6801		cmd->duplex = -1;
6802	}
6803	spin_unlock_bh(&bp->phy_lock);
6804
6805	cmd->transceiver = XCVR_INTERNAL;
6806	cmd->phy_address = bp->phy_addr;
6807
6808	return 0;
6809}
6810
6811static int
6812bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6813{
6814	struct bnx2 *bp = netdev_priv(dev);
6815	u8 autoneg = bp->autoneg;
6816	u8 req_duplex = bp->req_duplex;
6817	u16 req_line_speed = bp->req_line_speed;
6818	u32 advertising = bp->advertising;
6819	int err = -EINVAL;
6820
6821	spin_lock_bh(&bp->phy_lock);
6822
6823	if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6824		goto err_out_unlock;
6825
6826	if (cmd->port != bp->phy_port &&
6827	    !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6828		goto err_out_unlock;
6829
6830	/* If device is down, we can store the settings only if the user
6831	 * is setting the currently active port.
6832	 */
6833	if (!netif_running(dev) && cmd->port != bp->phy_port)
6834		goto err_out_unlock;
6835
6836	if (cmd->autoneg == AUTONEG_ENABLE) {
6837		autoneg |= AUTONEG_SPEED;
6838
6839		advertising = cmd->advertising;
6840		if (cmd->port == PORT_TP) {
6841			advertising &= ETHTOOL_ALL_COPPER_SPEED;
6842			if (!advertising)
6843				advertising = ETHTOOL_ALL_COPPER_SPEED;
6844		} else {
6845			advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6846			if (!advertising)
6847				advertising = ETHTOOL_ALL_FIBRE_SPEED;
6848		}
6849		advertising |= ADVERTISED_Autoneg;
6850	}
6851	else {
6852		u32 speed = ethtool_cmd_speed(cmd);
6853		if (cmd->port == PORT_FIBRE) {
6854			if ((speed != SPEED_1000 &&
6855			     speed != SPEED_2500) ||
6856			    (cmd->duplex != DUPLEX_FULL))
6857				goto err_out_unlock;
6858
6859			if (speed == SPEED_2500 &&
6860			    !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6861				goto err_out_unlock;
6862		} else if (speed == SPEED_1000 || speed == SPEED_2500)
6863			goto err_out_unlock;
6864
6865		autoneg &= ~AUTONEG_SPEED;
6866		req_line_speed = speed;
6867		req_duplex = cmd->duplex;
6868		advertising = 0;
6869	}
6870
6871	bp->autoneg = autoneg;
6872	bp->advertising = advertising;
6873	bp->req_line_speed = req_line_speed;
6874	bp->req_duplex = req_duplex;
6875
6876	err = 0;
6877	/* If device is down, the new settings will be picked up when it is
6878	 * brought up.
6879	 */
6880	if (netif_running(dev))
6881		err = bnx2_setup_phy(bp, cmd->port);
6882
6883err_out_unlock:
6884	spin_unlock_bh(&bp->phy_lock);
6885
6886	return err;
6887}
6888
6889static void
6890bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6891{
6892	struct bnx2 *bp = netdev_priv(dev);
6893
6894	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
6895	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
6896	strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
6897	strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
6898}
6899
6900#define BNX2_REGDUMP_LEN		(32 * 1024)
6901
6902static int
6903bnx2_get_regs_len(struct net_device *dev)
6904{
6905	return BNX2_REGDUMP_LEN;
6906}
6907
6908static void
6909bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6910{
6911	u32 *p = _p, i, offset;
6912	u8 *orig_p = _p;
6913	struct bnx2 *bp = netdev_priv(dev);
6914	static const u32 reg_boundaries[] = {
6915		0x0000, 0x0098, 0x0400, 0x045c,
6916		0x0800, 0x0880, 0x0c00, 0x0c10,
6917		0x0c30, 0x0d08, 0x1000, 0x101c,
6918		0x1040, 0x1048, 0x1080, 0x10a4,
6919		0x1400, 0x1490, 0x1498, 0x14f0,
6920		0x1500, 0x155c, 0x1580, 0x15dc,
6921		0x1600, 0x1658, 0x1680, 0x16d8,
6922		0x1800, 0x1820, 0x1840, 0x1854,
6923		0x1880, 0x1894, 0x1900, 0x1984,
6924		0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6925		0x1c80, 0x1c94, 0x1d00, 0x1d84,
6926		0x2000, 0x2030, 0x23c0, 0x2400,
6927		0x2800, 0x2820, 0x2830, 0x2850,
6928		0x2b40, 0x2c10, 0x2fc0, 0x3058,
6929		0x3c00, 0x3c94, 0x4000, 0x4010,
6930		0x4080, 0x4090, 0x43c0, 0x4458,
6931		0x4c00, 0x4c18, 0x4c40, 0x4c54,
6932		0x4fc0, 0x5010, 0x53c0, 0x5444,
6933		0x5c00, 0x5c18, 0x5c80, 0x5c90,
6934		0x5fc0, 0x6000, 0x6400, 0x6428,
6935		0x6800, 0x6848, 0x684c, 0x6860,
6936		0x6888, 0x6910, 0x8000
6937	};
6938
6939	regs->version = 0;
6940
6941	memset(p, 0, BNX2_REGDUMP_LEN);
6942
6943	if (!netif_running(bp->dev))
6944		return;
6945
6946	i = 0;
6947	offset = reg_boundaries[0];
6948	p += offset;
6949	while (offset < BNX2_REGDUMP_LEN) {
6950		*p++ = REG_RD(bp, offset);
6951		offset += 4;
6952		if (offset == reg_boundaries[i + 1]) {
6953			offset = reg_boundaries[i + 2];
6954			p = (u32 *) (orig_p + offset);
6955			i += 2;
6956		}
6957	}
6958}
6959
6960static void
6961bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6962{
6963	struct bnx2 *bp = netdev_priv(dev);
6964
6965	if (bp->flags & BNX2_FLAG_NO_WOL) {
6966		wol->supported = 0;
6967		wol->wolopts = 0;
6968	}
6969	else {
6970		wol->supported = WAKE_MAGIC;
6971		if (bp->wol)
6972			wol->wolopts = WAKE_MAGIC;
6973		else
6974			wol->wolopts = 0;
6975	}
6976	memset(&wol->sopass, 0, sizeof(wol->sopass));
6977}
6978
6979static int
6980bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6981{
6982	struct bnx2 *bp = netdev_priv(dev);
6983
6984	if (wol->wolopts & ~WAKE_MAGIC)
6985		return -EINVAL;
6986
6987	if (wol->wolopts & WAKE_MAGIC) {
6988		if (bp->flags & BNX2_FLAG_NO_WOL)
6989			return -EINVAL;
6990
6991		bp->wol = 1;
6992	}
6993	else {
6994		bp->wol = 0;
6995	}
6996	return 0;
6997}
6998
6999static int
7000bnx2_nway_reset(struct net_device *dev)
7001{
7002	struct bnx2 *bp = netdev_priv(dev);
7003	u32 bmcr;
7004
7005	if (!netif_running(dev))
7006		return -EAGAIN;
7007
7008	if (!(bp->autoneg & AUTONEG_SPEED)) {
7009		return -EINVAL;
7010	}
7011
7012	spin_lock_bh(&bp->phy_lock);
7013
7014	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7015		int rc;
7016
7017		rc = bnx2_setup_remote_phy(bp, bp->phy_port);
7018		spin_unlock_bh(&bp->phy_lock);
7019		return rc;
7020	}
7021
7022	/* Force a link down visible on the other side */
7023	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7024		bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
7025		spin_unlock_bh(&bp->phy_lock);
7026
7027		msleep(20);
7028
7029		spin_lock_bh(&bp->phy_lock);
7030
7031		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
7032		bp->serdes_an_pending = 1;
7033		mod_timer(&bp->timer, jiffies + bp->current_interval);
7034	}
7035
7036	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
7037	bmcr &= ~BMCR_LOOPBACK;
7038	bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
7039
7040	spin_unlock_bh(&bp->phy_lock);
7041
7042	return 0;
7043}
7044
7045static u32
7046bnx2_get_link(struct net_device *dev)
7047{
7048	struct bnx2 *bp = netdev_priv(dev);
7049
7050	return bp->link_up;
7051}
7052
7053static int
7054bnx2_get_eeprom_len(struct net_device *dev)
7055{
7056	struct bnx2 *bp = netdev_priv(dev);
7057
7058	if (bp->flash_info == NULL)
7059		return 0;
7060
7061	return (int) bp->flash_size;
7062}
7063
7064static int
7065bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7066		u8 *eebuf)
7067{
7068	struct bnx2 *bp = netdev_priv(dev);
7069	int rc;
7070
7071	if (!netif_running(dev))
7072		return -EAGAIN;
7073
7074	/* parameters already validated in ethtool_get_eeprom */
7075
7076	rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7077
7078	return rc;
7079}
7080
7081static int
7082bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7083		u8 *eebuf)
7084{
7085	struct bnx2 *bp = netdev_priv(dev);
7086	int rc;
7087
7088	if (!netif_running(dev))
7089		return -EAGAIN;
7090
7091	/* parameters already validated in ethtool_set_eeprom */
7092
7093	rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7094
7095	return rc;
7096}
7097
7098static int
7099bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7100{
7101	struct bnx2 *bp = netdev_priv(dev);
7102
7103	memset(coal, 0, sizeof(struct ethtool_coalesce));
7104
7105	coal->rx_coalesce_usecs = bp->rx_ticks;
7106	coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7107	coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7108	coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7109
7110	coal->tx_coalesce_usecs = bp->tx_ticks;
7111	coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7112	coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7113	coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7114
7115	coal->stats_block_coalesce_usecs = bp->stats_ticks;
7116
7117	return 0;
7118}
7119
7120static int
7121bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7122{
7123	struct bnx2 *bp = netdev_priv(dev);
7124
7125	bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7126	if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7127
7128	bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7129	if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7130
7131	bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7132	if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7133
7134	bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7135	if (bp->rx_quick_cons_trip_int > 0xff)
7136		bp->rx_quick_cons_trip_int = 0xff;
7137
7138	bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7139	if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7140
7141	bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7142	if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7143
7144	bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7145	if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7146
7147	bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7148	if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7149		0xff;
7150
7151	bp->stats_ticks = coal->stats_block_coalesce_usecs;
7152	if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7153		if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7154			bp->stats_ticks = USEC_PER_SEC;
7155	}
7156	if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7157		bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7158	bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7159
7160	if (netif_running(bp->dev)) {
7161		bnx2_netif_stop(bp, true);
7162		bnx2_init_nic(bp, 0);
7163		bnx2_netif_start(bp, true);
7164	}
7165
7166	return 0;
7167}
7168
7169static void
7170bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7171{
7172	struct bnx2 *bp = netdev_priv(dev);
7173
7174	ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
7175	ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
7176
7177	ering->rx_pending = bp->rx_ring_size;
7178	ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7179
7180	ering->tx_max_pending = MAX_TX_DESC_CNT;
7181	ering->tx_pending = bp->tx_ring_size;
7182}
7183
7184static int
7185bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
7186{
7187	if (netif_running(bp->dev)) {
7188		/* Reset will erase chipset stats; save them */
7189		bnx2_save_stats(bp);
7190
7191		bnx2_netif_stop(bp, true);
7192		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7193		if (reset_irq) {
7194			bnx2_free_irq(bp);
7195			bnx2_del_napi(bp);
7196		} else {
7197			__bnx2_free_irq(bp);
7198		}
7199		bnx2_free_skbs(bp);
7200		bnx2_free_mem(bp);
7201	}
7202
7203	bnx2_set_rx_ring_size(bp, rx);
7204	bp->tx_ring_size = tx;
7205
7206	if (netif_running(bp->dev)) {
7207		int rc = 0;
7208
7209		if (reset_irq) {
7210			rc = bnx2_setup_int_mode(bp, disable_msi);
7211			bnx2_init_napi(bp);
7212		}
7213
7214		if (!rc)
7215			rc = bnx2_alloc_mem(bp);
7216
7217		if (!rc)
7218			rc = bnx2_request_irq(bp);
7219
7220		if (!rc)
7221			rc = bnx2_init_nic(bp, 0);
7222
7223		if (rc) {
7224			bnx2_napi_enable(bp);
7225			dev_close(bp->dev);
7226			return rc;
7227		}
7228#ifdef BCM_CNIC
7229		mutex_lock(&bp->cnic_lock);
7230		/* Let cnic know about the new status block. */
7231		if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7232			bnx2_setup_cnic_irq_info(bp);
7233		mutex_unlock(&bp->cnic_lock);
7234#endif
7235		bnx2_netif_start(bp, true);
7236	}
7237	return 0;
7238}
7239
7240static int
7241bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7242{
7243	struct bnx2 *bp = netdev_priv(dev);
7244	int rc;
7245
7246	if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
7247		(ering->tx_pending > MAX_TX_DESC_CNT) ||
7248		(ering->tx_pending <= MAX_SKB_FRAGS)) {
7249
7250		return -EINVAL;
7251	}
7252	rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
7253				   false);
7254	return rc;
7255}
7256
7257static void
7258bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7259{
7260	struct bnx2 *bp = netdev_priv(dev);
7261
7262	epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7263	epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7264	epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7265}
7266
7267static int
7268bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7269{
7270	struct bnx2 *bp = netdev_priv(dev);
7271
7272	bp->req_flow_ctrl = 0;
7273	if (epause->rx_pause)
7274		bp->req_flow_ctrl |= FLOW_CTRL_RX;
7275	if (epause->tx_pause)
7276		bp->req_flow_ctrl |= FLOW_CTRL_TX;
7277
7278	if (epause->autoneg) {
7279		bp->autoneg |= AUTONEG_FLOW_CTRL;
7280	}
7281	else {
7282		bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7283	}
7284
7285	if (netif_running(dev)) {
7286		spin_lock_bh(&bp->phy_lock);
7287		bnx2_setup_phy(bp, bp->phy_port);
7288		spin_unlock_bh(&bp->phy_lock);
7289	}
7290
7291	return 0;
7292}
7293
7294static struct {
7295	char string[ETH_GSTRING_LEN];
7296} bnx2_stats_str_arr[] = {
7297	{ "rx_bytes" },
7298	{ "rx_error_bytes" },
7299	{ "tx_bytes" },
7300	{ "tx_error_bytes" },
7301	{ "rx_ucast_packets" },
7302	{ "rx_mcast_packets" },
7303	{ "rx_bcast_packets" },
7304	{ "tx_ucast_packets" },
7305	{ "tx_mcast_packets" },
7306	{ "tx_bcast_packets" },
7307	{ "tx_mac_errors" },
7308	{ "tx_carrier_errors" },
7309	{ "rx_crc_errors" },
7310	{ "rx_align_errors" },
7311	{ "tx_single_collisions" },
7312	{ "tx_multi_collisions" },
7313	{ "tx_deferred" },
7314	{ "tx_excess_collisions" },
7315	{ "tx_late_collisions" },
7316	{ "tx_total_collisions" },
7317	{ "rx_fragments" },
7318	{ "rx_jabbers" },
7319	{ "rx_undersize_packets" },
7320	{ "rx_oversize_packets" },
7321	{ "rx_64_byte_packets" },
7322	{ "rx_65_to_127_byte_packets" },
7323	{ "rx_128_to_255_byte_packets" },
7324	{ "rx_256_to_511_byte_packets" },
7325	{ "rx_512_to_1023_byte_packets" },
7326	{ "rx_1024_to_1522_byte_packets" },
7327	{ "rx_1523_to_9022_byte_packets" },
7328	{ "tx_64_byte_packets" },
7329	{ "tx_65_to_127_byte_packets" },
7330	{ "tx_128_to_255_byte_packets" },
7331	{ "tx_256_to_511_byte_packets" },
7332	{ "tx_512_to_1023_byte_packets" },
7333	{ "tx_1024_to_1522_byte_packets" },
7334	{ "tx_1523_to_9022_byte_packets" },
7335	{ "rx_xon_frames" },
7336	{ "rx_xoff_frames" },
7337	{ "tx_xon_frames" },
7338	{ "tx_xoff_frames" },
7339	{ "rx_mac_ctrl_frames" },
7340	{ "rx_filtered_packets" },
7341	{ "rx_ftq_discards" },
7342	{ "rx_discards" },
7343	{ "rx_fw_discards" },
7344};
7345
7346#define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\
7347			sizeof(bnx2_stats_str_arr[0]))
7348
7349#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7350
7351static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7352    STATS_OFFSET32(stat_IfHCInOctets_hi),
7353    STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7354    STATS_OFFSET32(stat_IfHCOutOctets_hi),
7355    STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7356    STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7357    STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7358    STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7359    STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7360    STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7361    STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7362    STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7363    STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7364    STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7365    STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7366    STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7367    STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7368    STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7369    STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7370    STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7371    STATS_OFFSET32(stat_EtherStatsCollisions),
7372    STATS_OFFSET32(stat_EtherStatsFragments),
7373    STATS_OFFSET32(stat_EtherStatsJabbers),
7374    STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7375    STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7376    STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7377    STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7378    STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7379    STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7380    STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7381    STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7382    STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7383    STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7384    STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7385    STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7386    STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7387    STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7388    STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7389    STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7390    STATS_OFFSET32(stat_XonPauseFramesReceived),
7391    STATS_OFFSET32(stat_XoffPauseFramesReceived),
7392    STATS_OFFSET32(stat_OutXonSent),
7393    STATS_OFFSET32(stat_OutXoffSent),
7394    STATS_OFFSET32(stat_MacControlFramesReceived),
7395    STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7396    STATS_OFFSET32(stat_IfInFTQDiscards),
7397    STATS_OFFSET32(stat_IfInMBUFDiscards),
7398    STATS_OFFSET32(stat_FwRxDrop),
7399};
7400
7401/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7402 * skipped because of errata.
7403 */
7404static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7405	8,0,8,8,8,8,8,8,8,8,
7406	4,0,4,4,4,4,4,4,4,4,
7407	4,4,4,4,4,4,4,4,4,4,
7408	4,4,4,4,4,4,4,4,4,4,
7409	4,4,4,4,4,4,4,
7410};
7411
7412static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7413	8,0,8,8,8,8,8,8,8,8,
7414	4,4,4,4,4,4,4,4,4,4,
7415	4,4,4,4,4,4,4,4,4,4,
7416	4,4,4,4,4,4,4,4,4,4,
7417	4,4,4,4,4,4,4,
7418};
7419
7420#define BNX2_NUM_TESTS 6
7421
7422static struct {
7423	char string[ETH_GSTRING_LEN];
7424} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7425	{ "register_test (offline)" },
7426	{ "memory_test (offline)" },
7427	{ "loopback_test (offline)" },
7428	{ "nvram_test (online)" },
7429	{ "interrupt_test (online)" },
7430	{ "link_test (online)" },
7431};
7432
7433static int
7434bnx2_get_sset_count(struct net_device *dev, int sset)
7435{
7436	switch (sset) {
7437	case ETH_SS_TEST:
7438		return BNX2_NUM_TESTS;
7439	case ETH_SS_STATS:
7440		return BNX2_NUM_STATS;
7441	default:
7442		return -EOPNOTSUPP;
7443	}
7444}
7445
7446static void
7447bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7448{
7449	struct bnx2 *bp = netdev_priv(dev);
7450
7451	bnx2_set_power_state(bp, PCI_D0);
7452
7453	memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7454	if (etest->flags & ETH_TEST_FL_OFFLINE) {
7455		int i;
7456
7457		bnx2_netif_stop(bp, true);
7458		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7459		bnx2_free_skbs(bp);
7460
7461		if (bnx2_test_registers(bp) != 0) {
7462			buf[0] = 1;
7463			etest->flags |= ETH_TEST_FL_FAILED;
7464		}
7465		if (bnx2_test_memory(bp) != 0) {
7466			buf[1] = 1;
7467			etest->flags |= ETH_TEST_FL_FAILED;
7468		}
7469		if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7470			etest->flags |= ETH_TEST_FL_FAILED;
7471
7472		if (!netif_running(bp->dev))
7473			bnx2_shutdown_chip(bp);
7474		else {
7475			bnx2_init_nic(bp, 1);
7476			bnx2_netif_start(bp, true);
7477		}
7478
7479		/* wait for link up */
7480		for (i = 0; i < 7; i++) {
7481			if (bp->link_up)
7482				break;
7483			msleep_interruptible(1000);
7484		}
7485	}
7486
7487	if (bnx2_test_nvram(bp) != 0) {
7488		buf[3] = 1;
7489		etest->flags |= ETH_TEST_FL_FAILED;
7490	}
7491	if (bnx2_test_intr(bp) != 0) {
7492		buf[4] = 1;
7493		etest->flags |= ETH_TEST_FL_FAILED;
7494	}
7495
7496	if (bnx2_test_link(bp) != 0) {
7497		buf[5] = 1;
7498		etest->flags |= ETH_TEST_FL_FAILED;
7499
7500	}
7501	if (!netif_running(bp->dev))
7502		bnx2_set_power_state(bp, PCI_D3hot);
7503}
7504
7505static void
7506bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7507{
7508	switch (stringset) {
7509	case ETH_SS_STATS:
7510		memcpy(buf, bnx2_stats_str_arr,
7511			sizeof(bnx2_stats_str_arr));
7512		break;
7513	case ETH_SS_TEST:
7514		memcpy(buf, bnx2_tests_str_arr,
7515			sizeof(bnx2_tests_str_arr));
7516		break;
7517	}
7518}
7519
7520static void
7521bnx2_get_ethtool_stats(struct net_device *dev,
7522		struct ethtool_stats *stats, u64 *buf)
7523{
7524	struct bnx2 *bp = netdev_priv(dev);
7525	int i;
7526	u32 *hw_stats = (u32 *) bp->stats_blk;
7527	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7528	u8 *stats_len_arr = NULL;
7529
7530	if (hw_stats == NULL) {
7531		memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7532		return;
7533	}
7534
7535	if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7536	    (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7537	    (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7538	    (CHIP_ID(bp) == CHIP_ID_5708_A0))
7539		stats_len_arr = bnx2_5706_stats_len_arr;
7540	else
7541		stats_len_arr = bnx2_5708_stats_len_arr;
7542
7543	for (i = 0; i < BNX2_NUM_STATS; i++) {
7544		unsigned long offset;
7545
7546		if (stats_len_arr[i] == 0) {
7547			/* skip this counter */
7548			buf[i] = 0;
7549			continue;
7550		}
7551
7552		offset = bnx2_stats_offset_arr[i];
7553		if (stats_len_arr[i] == 4) {
7554			/* 4-byte counter */
7555			buf[i] = (u64) *(hw_stats + offset) +
7556				 *(temp_stats + offset);
7557			continue;
7558		}
7559		/* 8-byte counter */
7560		buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7561			 *(hw_stats + offset + 1) +
7562			 (((u64) *(temp_stats + offset)) << 32) +
7563			 *(temp_stats + offset + 1);
7564	}
7565}
7566
7567static int
7568bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7569{
7570	struct bnx2 *bp = netdev_priv(dev);
7571
7572	switch (state) {
7573	case ETHTOOL_ID_ACTIVE:
7574		bnx2_set_power_state(bp, PCI_D0);
7575
7576		bp->leds_save = REG_RD(bp, BNX2_MISC_CFG);
7577		REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7578		return 1;	/* cycle on/off once per second */
7579
7580	case ETHTOOL_ID_ON:
7581		REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7582		       BNX2_EMAC_LED_1000MB_OVERRIDE |
7583		       BNX2_EMAC_LED_100MB_OVERRIDE |
7584		       BNX2_EMAC_LED_10MB_OVERRIDE |
7585		       BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7586		       BNX2_EMAC_LED_TRAFFIC);
7587		break;
7588
7589	case ETHTOOL_ID_OFF:
7590		REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7591		break;
7592
7593	case ETHTOOL_ID_INACTIVE:
7594		REG_WR(bp, BNX2_EMAC_LED, 0);
7595		REG_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7596
7597		if (!netif_running(dev))
7598			bnx2_set_power_state(bp, PCI_D3hot);
7599		break;
7600	}
7601
7602	return 0;
7603}
7604
7605static netdev_features_t
7606bnx2_fix_features(struct net_device *dev, netdev_features_t features)
7607{
7608	struct bnx2 *bp = netdev_priv(dev);
7609
7610	if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
7611		features |= NETIF_F_HW_VLAN_RX;
7612
7613	return features;
7614}
7615
7616static int
7617bnx2_set_features(struct net_device *dev, netdev_features_t features)
7618{
7619	struct bnx2 *bp = netdev_priv(dev);
7620
7621	/* TSO with VLAN tag won't work with current firmware */
7622	if (features & NETIF_F_HW_VLAN_TX)
7623		dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7624	else
7625		dev->vlan_features &= ~NETIF_F_ALL_TSO;
7626
7627	if ((!!(features & NETIF_F_HW_VLAN_RX) !=
7628	    !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7629	    netif_running(dev)) {
7630		bnx2_netif_stop(bp, false);
7631		dev->features = features;
7632		bnx2_set_rx_mode(dev);
7633		bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7634		bnx2_netif_start(bp, false);
7635		return 1;
7636	}
7637
7638	return 0;
7639}
7640
7641static void bnx2_get_channels(struct net_device *dev,
7642			      struct ethtool_channels *channels)
7643{
7644	struct bnx2 *bp = netdev_priv(dev);
7645	u32 max_rx_rings = 1;
7646	u32 max_tx_rings = 1;
7647
7648	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7649		max_rx_rings = RX_MAX_RINGS;
7650		max_tx_rings = TX_MAX_RINGS;
7651	}
7652
7653	channels->max_rx = max_rx_rings;
7654	channels->max_tx = max_tx_rings;
7655	channels->max_other = 0;
7656	channels->max_combined = 0;
7657	channels->rx_count = bp->num_rx_rings;
7658	channels->tx_count = bp->num_tx_rings;
7659	channels->other_count = 0;
7660	channels->combined_count = 0;
7661}
7662
7663static int bnx2_set_channels(struct net_device *dev,
7664			      struct ethtool_channels *channels)
7665{
7666	struct bnx2 *bp = netdev_priv(dev);
7667	u32 max_rx_rings = 1;
7668	u32 max_tx_rings = 1;
7669	int rc = 0;
7670
7671	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7672		max_rx_rings = RX_MAX_RINGS;
7673		max_tx_rings = TX_MAX_RINGS;
7674	}
7675	if (channels->rx_count > max_rx_rings ||
7676	    channels->tx_count > max_tx_rings)
7677		return -EINVAL;
7678
7679	bp->num_req_rx_rings = channels->rx_count;
7680	bp->num_req_tx_rings = channels->tx_count;
7681
7682	if (netif_running(dev))
7683		rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
7684					   bp->tx_ring_size, true);
7685
7686	return rc;
7687}
7688
7689static const struct ethtool_ops bnx2_ethtool_ops = {
7690	.get_settings		= bnx2_get_settings,
7691	.set_settings		= bnx2_set_settings,
7692	.get_drvinfo		= bnx2_get_drvinfo,
7693	.get_regs_len		= bnx2_get_regs_len,
7694	.get_regs		= bnx2_get_regs,
7695	.get_wol		= bnx2_get_wol,
7696	.set_wol		= bnx2_set_wol,
7697	.nway_reset		= bnx2_nway_reset,
7698	.get_link		= bnx2_get_link,
7699	.get_eeprom_len		= bnx2_get_eeprom_len,
7700	.get_eeprom		= bnx2_get_eeprom,
7701	.set_eeprom		= bnx2_set_eeprom,
7702	.get_coalesce		= bnx2_get_coalesce,
7703	.set_coalesce		= bnx2_set_coalesce,
7704	.get_ringparam		= bnx2_get_ringparam,
7705	.set_ringparam		= bnx2_set_ringparam,
7706	.get_pauseparam		= bnx2_get_pauseparam,
7707	.set_pauseparam		= bnx2_set_pauseparam,
7708	.self_test		= bnx2_self_test,
7709	.get_strings		= bnx2_get_strings,
7710	.set_phys_id		= bnx2_set_phys_id,
7711	.get_ethtool_stats	= bnx2_get_ethtool_stats,
7712	.get_sset_count		= bnx2_get_sset_count,
7713	.get_channels		= bnx2_get_channels,
7714	.set_channels		= bnx2_set_channels,
7715};
7716
7717/* Called with rtnl_lock */
7718static int
7719bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7720{
7721	struct mii_ioctl_data *data = if_mii(ifr);
7722	struct bnx2 *bp = netdev_priv(dev);
7723	int err;
7724
7725	switch(cmd) {
7726	case SIOCGMIIPHY:
7727		data->phy_id = bp->phy_addr;
7728
7729		/* fallthru */
7730	case SIOCGMIIREG: {
7731		u32 mii_regval;
7732
7733		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7734			return -EOPNOTSUPP;
7735
7736		if (!netif_running(dev))
7737			return -EAGAIN;
7738
7739		spin_lock_bh(&bp->phy_lock);
7740		err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7741		spin_unlock_bh(&bp->phy_lock);
7742
7743		data->val_out = mii_regval;
7744
7745		return err;
7746	}
7747
7748	case SIOCSMIIREG:
7749		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7750			return -EOPNOTSUPP;
7751
7752		if (!netif_running(dev))
7753			return -EAGAIN;
7754
7755		spin_lock_bh(&bp->phy_lock);
7756		err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7757		spin_unlock_bh(&bp->phy_lock);
7758
7759		return err;
7760
7761	default:
7762		/* do nothing */
7763		break;
7764	}
7765	return -EOPNOTSUPP;
7766}
7767
7768/* Called with rtnl_lock */
7769static int
7770bnx2_change_mac_addr(struct net_device *dev, void *p)
7771{
7772	struct sockaddr *addr = p;
7773	struct bnx2 *bp = netdev_priv(dev);
7774
7775	if (!is_valid_ether_addr(addr->sa_data))
7776		return -EADDRNOTAVAIL;
7777
7778	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7779	if (netif_running(dev))
7780		bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7781
7782	return 0;
7783}
7784
7785/* Called with rtnl_lock */
7786static int
7787bnx2_change_mtu(struct net_device *dev, int new_mtu)
7788{
7789	struct bnx2 *bp = netdev_priv(dev);
7790
7791	if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7792		((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7793		return -EINVAL;
7794
7795	dev->mtu = new_mtu;
7796	return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
7797				     false);
7798}
7799
7800#ifdef CONFIG_NET_POLL_CONTROLLER
7801static void
7802poll_bnx2(struct net_device *dev)
7803{
7804	struct bnx2 *bp = netdev_priv(dev);
7805	int i;
7806
7807	for (i = 0; i < bp->irq_nvecs; i++) {
7808		struct bnx2_irq *irq = &bp->irq_tbl[i];
7809
7810		disable_irq(irq->vector);
7811		irq->handler(irq->vector, &bp->bnx2_napi[i]);
7812		enable_irq(irq->vector);
7813	}
7814}
7815#endif
7816
7817static void __devinit
7818bnx2_get_5709_media(struct bnx2 *bp)
7819{
7820	u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7821	u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7822	u32 strap;
7823
7824	if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7825		return;
7826	else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7827		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7828		return;
7829	}
7830
7831	if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7832		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7833	else
7834		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7835
7836	if (PCI_FUNC(bp->pdev->devfn) == 0) {
7837		switch (strap) {
7838		case 0x4:
7839		case 0x5:
7840		case 0x6:
7841			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7842			return;
7843		}
7844	} else {
7845		switch (strap) {
7846		case 0x1:
7847		case 0x2:
7848		case 0x4:
7849			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7850			return;
7851		}
7852	}
7853}
7854
7855static void __devinit
7856bnx2_get_pci_speed(struct bnx2 *bp)
7857{
7858	u32 reg;
7859
7860	reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7861	if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7862		u32 clkreg;
7863
7864		bp->flags |= BNX2_FLAG_PCIX;
7865
7866		clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7867
7868		clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7869		switch (clkreg) {
7870		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7871			bp->bus_speed_mhz = 133;
7872			break;
7873
7874		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7875			bp->bus_speed_mhz = 100;
7876			break;
7877
7878		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7879		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7880			bp->bus_speed_mhz = 66;
7881			break;
7882
7883		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7884		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7885			bp->bus_speed_mhz = 50;
7886			break;
7887
7888		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7889		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7890		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7891			bp->bus_speed_mhz = 33;
7892			break;
7893		}
7894	}
7895	else {
7896		if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7897			bp->bus_speed_mhz = 66;
7898		else
7899			bp->bus_speed_mhz = 33;
7900	}
7901
7902	if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7903		bp->flags |= BNX2_FLAG_PCI_32BIT;
7904
7905}
7906
7907static void __devinit
7908bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7909{
7910	int rc, i, j;
7911	u8 *data;
7912	unsigned int block_end, rosize, len;
7913
7914#define BNX2_VPD_NVRAM_OFFSET	0x300
7915#define BNX2_VPD_LEN		128
7916#define BNX2_MAX_VER_SLEN	30
7917
7918	data = kmalloc(256, GFP_KERNEL);
7919	if (!data)
7920		return;
7921
7922	rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
7923			     BNX2_VPD_LEN);
7924	if (rc)
7925		goto vpd_done;
7926
7927	for (i = 0; i < BNX2_VPD_LEN; i += 4) {
7928		data[i] = data[i + BNX2_VPD_LEN + 3];
7929		data[i + 1] = data[i + BNX2_VPD_LEN + 2];
7930		data[i + 2] = data[i + BNX2_VPD_LEN + 1];
7931		data[i + 3] = data[i + BNX2_VPD_LEN];
7932	}
7933
7934	i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
7935	if (i < 0)
7936		goto vpd_done;
7937
7938	rosize = pci_vpd_lrdt_size(&data[i]);
7939	i += PCI_VPD_LRDT_TAG_SIZE;
7940	block_end = i + rosize;
7941
7942	if (block_end > BNX2_VPD_LEN)
7943		goto vpd_done;
7944
7945	j = pci_vpd_find_info_keyword(data, i, rosize,
7946				      PCI_VPD_RO_KEYWORD_MFR_ID);
7947	if (j < 0)
7948		goto vpd_done;
7949
7950	len = pci_vpd_info_field_size(&data[j]);
7951
7952	j += PCI_VPD_INFO_FLD_HDR_SIZE;
7953	if (j + len > block_end || len != 4 ||
7954	    memcmp(&data[j], "1028", 4))
7955		goto vpd_done;
7956
7957	j = pci_vpd_find_info_keyword(data, i, rosize,
7958				      PCI_VPD_RO_KEYWORD_VENDOR0);
7959	if (j < 0)
7960		goto vpd_done;
7961
7962	len = pci_vpd_info_field_size(&data[j]);
7963
7964	j += PCI_VPD_INFO_FLD_HDR_SIZE;
7965	if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
7966		goto vpd_done;
7967
7968	memcpy(bp->fw_version, &data[j], len);
7969	bp->fw_version[len] = ' ';
7970
7971vpd_done:
7972	kfree(data);
7973}
7974
7975static int __devinit
7976bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7977{
7978	struct bnx2 *bp;
7979	unsigned long mem_len;
7980	int rc, i, j;
7981	u32 reg;
7982	u64 dma_mask, persist_dma_mask;
7983	int err;
7984
7985	SET_NETDEV_DEV(dev, &pdev->dev);
7986	bp = netdev_priv(dev);
7987
7988	bp->flags = 0;
7989	bp->phy_flags = 0;
7990
7991	bp->temp_stats_blk =
7992		kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
7993
7994	if (bp->temp_stats_blk == NULL) {
7995		rc = -ENOMEM;
7996		goto err_out;
7997	}
7998
7999	/* enable device (incl. PCI PM wakeup), and bus-mastering */
8000	rc = pci_enable_device(pdev);
8001	if (rc) {
8002		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
8003		goto err_out;
8004	}
8005
8006	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8007		dev_err(&pdev->dev,
8008			"Cannot find PCI device base address, aborting\n");
8009		rc = -ENODEV;
8010		goto err_out_disable;
8011	}
8012
8013	rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8014	if (rc) {
8015		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
8016		goto err_out_disable;
8017	}
8018
8019	pci_set_master(pdev);
8020
8021	bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8022	if (bp->pm_cap == 0) {
8023		dev_err(&pdev->dev,
8024			"Cannot find power management capability, aborting\n");
8025		rc = -EIO;
8026		goto err_out_release;
8027	}
8028
8029	bp->dev = dev;
8030	bp->pdev = pdev;
8031
8032	spin_lock_init(&bp->phy_lock);
8033	spin_lock_init(&bp->indirect_lock);
8034#ifdef BCM_CNIC
8035	mutex_init(&bp->cnic_lock);
8036#endif
8037	INIT_WORK(&bp->reset_task, bnx2_reset_task);
8038
8039	dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
8040	mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
8041	dev->mem_end = dev->mem_start + mem_len;
8042	dev->irq = pdev->irq;
8043
8044	bp->regview = ioremap_nocache(dev->base_addr, mem_len);
8045
8046	if (!bp->regview) {
8047		dev_err(&pdev->dev, "Cannot map register space, aborting\n");
8048		rc = -ENOMEM;
8049		goto err_out_release;
8050	}
8051
8052	bnx2_set_power_state(bp, PCI_D0);
8053
8054	/* Configure byte swap and enable write to the reg_window registers.
8055	 * Rely on CPU to do target byte swapping on big endian systems
8056	 * The chip's target access swapping will not swap all accesses
8057	 */
8058	REG_WR(bp, BNX2_PCICFG_MISC_CONFIG,
8059		   BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
8060		   BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
8061
8062	bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
8063
8064	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8065		if (!pci_is_pcie(pdev)) {
8066			dev_err(&pdev->dev, "Not PCIE, aborting\n");
8067			rc = -EIO;
8068			goto err_out_unmap;
8069		}
8070		bp->flags |= BNX2_FLAG_PCIE;
8071		if (CHIP_REV(bp) == CHIP_REV_Ax)
8072			bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
8073
8074		/* AER (Advanced Error Reporting) hooks */
8075		err = pci_enable_pcie_error_reporting(pdev);
8076		if (!err)
8077			bp->flags |= BNX2_FLAG_AER_ENABLED;
8078
8079	} else {
8080		bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
8081		if (bp->pcix_cap == 0) {
8082			dev_err(&pdev->dev,
8083				"Cannot find PCIX capability, aborting\n");
8084			rc = -EIO;
8085			goto err_out_unmap;
8086		}
8087		bp->flags |= BNX2_FLAG_BROKEN_STATS;
8088	}
8089
8090	if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
8091		if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
8092			bp->flags |= BNX2_FLAG_MSIX_CAP;
8093	}
8094
8095	if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
8096		if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
8097			bp->flags |= BNX2_FLAG_MSI_CAP;
8098	}
8099
8100	/* 5708 cannot support DMA addresses > 40-bit.  */
8101	if (CHIP_NUM(bp) == CHIP_NUM_5708)
8102		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8103	else
8104		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8105
8106	/* Configure DMA attributes. */
8107	if (pci_set_dma_mask(pdev, dma_mask) == 0) {
8108		dev->features |= NETIF_F_HIGHDMA;
8109		rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
8110		if (rc) {
8111			dev_err(&pdev->dev,
8112				"pci_set_consistent_dma_mask failed, aborting\n");
8113			goto err_out_unmap;
8114		}
8115	} else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
8116		dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8117		goto err_out_unmap;
8118	}
8119
8120	if (!(bp->flags & BNX2_FLAG_PCIE))
8121		bnx2_get_pci_speed(bp);
8122
8123	/* 5706A0 may falsely detect SERR and PERR. */
8124	if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8125		reg = REG_RD(bp, PCI_COMMAND);
8126		reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8127		REG_WR(bp, PCI_COMMAND, reg);
8128	}
8129	else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
8130		!(bp->flags & BNX2_FLAG_PCIX)) {
8131
8132		dev_err(&pdev->dev,
8133			"5706 A1 can only be used in a PCIX bus, aborting\n");
8134		goto err_out_unmap;
8135	}
8136
8137	bnx2_init_nvram(bp);
8138
8139	reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8140
8141	if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8142	    BNX2_SHM_HDR_SIGNATURE_SIG) {
8143		u32 off = PCI_FUNC(pdev->devfn) << 2;
8144
8145		bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8146	} else
8147		bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8148
8149	/* Get the permanent MAC address.  First we need to make sure the
8150	 * firmware is actually running.
8151	 */
8152	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8153
8154	if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8155	    BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8156		dev_err(&pdev->dev, "Firmware not running, aborting\n");
8157		rc = -ENODEV;
8158		goto err_out_unmap;
8159	}
8160
8161	bnx2_read_vpd_fw_ver(bp);
8162
8163	j = strlen(bp->fw_version);
8164	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8165	for (i = 0; i < 3 && j < 24; i++) {
8166		u8 num, k, skip0;
8167
8168		if (i == 0) {
8169			bp->fw_version[j++] = 'b';
8170			bp->fw_version[j++] = 'c';
8171			bp->fw_version[j++] = ' ';
8172		}
8173		num = (u8) (reg >> (24 - (i * 8)));
8174		for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8175			if (num >= k || !skip0 || k == 1) {
8176				bp->fw_version[j++] = (num / k) + '0';
8177				skip0 = 0;
8178			}
8179		}
8180		if (i != 2)
8181			bp->fw_version[j++] = '.';
8182	}
8183	reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8184	if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8185		bp->wol = 1;
8186
8187	if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8188		bp->flags |= BNX2_FLAG_ASF_ENABLE;
8189
8190		for (i = 0; i < 30; i++) {
8191			reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8192			if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8193				break;
8194			msleep(10);
8195		}
8196	}
8197	reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8198	reg &= BNX2_CONDITION_MFW_RUN_MASK;
8199	if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8200	    reg != BNX2_CONDITION_MFW_RUN_NONE) {
8201		u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8202
8203		if (j < 32)
8204			bp->fw_version[j++] = ' ';
8205		for (i = 0; i < 3 && j < 28; i++) {
8206			reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8207			reg = be32_to_cpu(reg);
8208			memcpy(&bp->fw_version[j], &reg, 4);
8209			j += 4;
8210		}
8211	}
8212
8213	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8214	bp->mac_addr[0] = (u8) (reg >> 8);
8215	bp->mac_addr[1] = (u8) reg;
8216
8217	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8218	bp->mac_addr[2] = (u8) (reg >> 24);
8219	bp->mac_addr[3] = (u8) (reg >> 16);
8220	bp->mac_addr[4] = (u8) (reg >> 8);
8221	bp->mac_addr[5] = (u8) reg;
8222
8223	bp->tx_ring_size = MAX_TX_DESC_CNT;
8224	bnx2_set_rx_ring_size(bp, 255);
8225
8226	bp->tx_quick_cons_trip_int = 2;
8227	bp->tx_quick_cons_trip = 20;
8228	bp->tx_ticks_int = 18;
8229	bp->tx_ticks = 80;
8230
8231	bp->rx_quick_cons_trip_int = 2;
8232	bp->rx_quick_cons_trip = 12;
8233	bp->rx_ticks_int = 18;
8234	bp->rx_ticks = 18;
8235
8236	bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8237
8238	bp->current_interval = BNX2_TIMER_INTERVAL;
8239
8240	bp->phy_addr = 1;
8241
8242	/* Disable WOL support if we are running on a SERDES chip. */
8243	if (CHIP_NUM(bp) == CHIP_NUM_5709)
8244		bnx2_get_5709_media(bp);
8245	else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
8246		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8247
8248	bp->phy_port = PORT_TP;
8249	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8250		bp->phy_port = PORT_FIBRE;
8251		reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8252		if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8253			bp->flags |= BNX2_FLAG_NO_WOL;
8254			bp->wol = 0;
8255		}
8256		if (CHIP_NUM(bp) == CHIP_NUM_5706) {
8257			/* Don't do parallel detect on this board because of
8258			 * some board problems.  The link will not go down
8259			 * if we do parallel detect.
8260			 */
8261			if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8262			    pdev->subsystem_device == 0x310c)
8263				bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8264		} else {
8265			bp->phy_addr = 2;
8266			if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8267				bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8268		}
8269	} else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
8270		   CHIP_NUM(bp) == CHIP_NUM_5708)
8271		bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8272	else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
8273		 (CHIP_REV(bp) == CHIP_REV_Ax ||
8274		  CHIP_REV(bp) == CHIP_REV_Bx))
8275		bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8276
8277	bnx2_init_fw_cap(bp);
8278
8279	if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
8280	    (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
8281	    (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
8282	    !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8283		bp->flags |= BNX2_FLAG_NO_WOL;
8284		bp->wol = 0;
8285	}
8286
8287	if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8288		bp->tx_quick_cons_trip_int =
8289			bp->tx_quick_cons_trip;
8290		bp->tx_ticks_int = bp->tx_ticks;
8291		bp->rx_quick_cons_trip_int =
8292			bp->rx_quick_cons_trip;
8293		bp->rx_ticks_int = bp->rx_ticks;
8294		bp->comp_prod_trip_int = bp->comp_prod_trip;
8295		bp->com_ticks_int = bp->com_ticks;
8296		bp->cmd_ticks_int = bp->cmd_ticks;
8297	}
8298
8299	/* Disable MSI on 5706 if AMD 8132 bridge is found.
8300	 *
8301	 * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
8302	 * with byte enables disabled on the unused 32-bit word.  This is legal
8303	 * but causes problems on the AMD 8132 which will eventually stop
8304	 * responding after a while.
8305	 *
8306	 * AMD believes this incompatibility is unique to the 5706, and
8307	 * prefers to locally disable MSI rather than globally disabling it.
8308	 */
8309	if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
8310		struct pci_dev *amd_8132 = NULL;
8311
8312		while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8313						  PCI_DEVICE_ID_AMD_8132_BRIDGE,
8314						  amd_8132))) {
8315
8316			if (amd_8132->revision >= 0x10 &&
8317			    amd_8132->revision <= 0x13) {
8318				disable_msi = 1;
8319				pci_dev_put(amd_8132);
8320				break;
8321			}
8322		}
8323	}
8324
8325	bnx2_set_default_link(bp);
8326	bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8327
8328	init_timer(&bp->timer);
8329	bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8330	bp->timer.data = (unsigned long) bp;
8331	bp->timer.function = bnx2_timer;
8332
8333#ifdef BCM_CNIC
8334	if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8335		bp->cnic_eth_dev.max_iscsi_conn =
8336			(bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8337			 BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
8338#endif
8339	pci_save_state(pdev);
8340
8341	return 0;
8342
8343err_out_unmap:
8344	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8345		pci_disable_pcie_error_reporting(pdev);
8346		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8347	}
8348
8349	if (bp->regview) {
8350		iounmap(bp->regview);
8351		bp->regview = NULL;
8352	}
8353
8354err_out_release:
8355	pci_release_regions(pdev);
8356
8357err_out_disable:
8358	pci_disable_device(pdev);
8359	pci_set_drvdata(pdev, NULL);
8360
8361err_out:
8362	return rc;
8363}
8364
8365static char * __devinit
8366bnx2_bus_string(struct bnx2 *bp, char *str)
8367{
8368	char *s = str;
8369
8370	if (bp->flags & BNX2_FLAG_PCIE) {
8371		s += sprintf(s, "PCI Express");
8372	} else {
8373		s += sprintf(s, "PCI");
8374		if (bp->flags & BNX2_FLAG_PCIX)
8375			s += sprintf(s, "-X");
8376		if (bp->flags & BNX2_FLAG_PCI_32BIT)
8377			s += sprintf(s, " 32-bit");
8378		else
8379			s += sprintf(s, " 64-bit");
8380		s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8381	}
8382	return str;
8383}
8384
8385static void
8386bnx2_del_napi(struct bnx2 *bp)
8387{
8388	int i;
8389
8390	for (i = 0; i < bp->irq_nvecs; i++)
8391		netif_napi_del(&bp->bnx2_napi[i].napi);
8392}
8393
8394static void
8395bnx2_init_napi(struct bnx2 *bp)
8396{
8397	int i;
8398
8399	for (i = 0; i < bp->irq_nvecs; i++) {
8400		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8401		int (*poll)(struct napi_struct *, int);
8402
8403		if (i == 0)
8404			poll = bnx2_poll;
8405		else
8406			poll = bnx2_poll_msix;
8407
8408		netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8409		bnapi->bp = bp;
8410	}
8411}
8412
8413static const struct net_device_ops bnx2_netdev_ops = {
8414	.ndo_open		= bnx2_open,
8415	.ndo_start_xmit		= bnx2_start_xmit,
8416	.ndo_stop		= bnx2_close,
8417	.ndo_get_stats64	= bnx2_get_stats64,
8418	.ndo_set_rx_mode	= bnx2_set_rx_mode,
8419	.ndo_do_ioctl		= bnx2_ioctl,
8420	.ndo_validate_addr	= eth_validate_addr,
8421	.ndo_set_mac_address	= bnx2_change_mac_addr,
8422	.ndo_change_mtu		= bnx2_change_mtu,
8423	.ndo_fix_features	= bnx2_fix_features,
8424	.ndo_set_features	= bnx2_set_features,
8425	.ndo_tx_timeout		= bnx2_tx_timeout,
8426#ifdef CONFIG_NET_POLL_CONTROLLER
8427	.ndo_poll_controller	= poll_bnx2,
8428#endif
8429};
8430
8431static int __devinit
8432bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8433{
8434	static int version_printed = 0;
8435	struct net_device *dev = NULL;
8436	struct bnx2 *bp;
8437	int rc;
8438	char str[40];
8439
8440	if (version_printed++ == 0)
8441		pr_info("%s", version);
8442
8443	/* dev zeroed in init_etherdev */
8444	dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8445
8446	if (!dev)
8447		return -ENOMEM;
8448
8449	rc = bnx2_init_board(pdev, dev);
8450	if (rc < 0) {
8451		free_netdev(dev);
8452		return rc;
8453	}
8454
8455	dev->netdev_ops = &bnx2_netdev_ops;
8456	dev->watchdog_timeo = TX_TIMEOUT;
8457	dev->ethtool_ops = &bnx2_ethtool_ops;
8458
8459	bp = netdev_priv(dev);
8460
8461	pci_set_drvdata(pdev, dev);
8462
8463	memcpy(dev->dev_addr, bp->mac_addr, 6);
8464	memcpy(dev->perm_addr, bp->mac_addr, 6);
8465
8466	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8467		NETIF_F_TSO | NETIF_F_TSO_ECN |
8468		NETIF_F_RXHASH | NETIF_F_RXCSUM;
8469
8470	if (CHIP_NUM(bp) == CHIP_NUM_5709)
8471		dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8472
8473	dev->vlan_features = dev->hw_features;
8474	dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8475	dev->features |= dev->hw_features;
8476	dev->priv_flags |= IFF_UNICAST_FLT;
8477
8478	if ((rc = register_netdev(dev))) {
8479		dev_err(&pdev->dev, "Cannot register net device\n");
8480		goto error;
8481	}
8482
8483	netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n",
8484		    board_info[ent->driver_data].name,
8485		    ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8486		    ((CHIP_ID(bp) & 0x0ff0) >> 4),
8487		    bnx2_bus_string(bp, str),
8488		    dev->base_addr,
8489		    bp->pdev->irq, dev->dev_addr);
8490
8491	return 0;
8492
8493error:
8494	if (bp->regview)
8495		iounmap(bp->regview);
8496	pci_release_regions(pdev);
8497	pci_disable_device(pdev);
8498	pci_set_drvdata(pdev, NULL);
8499	free_netdev(dev);
8500	return rc;
8501}
8502
8503static void __devexit
8504bnx2_remove_one(struct pci_dev *pdev)
8505{
8506	struct net_device *dev = pci_get_drvdata(pdev);
8507	struct bnx2 *bp = netdev_priv(dev);
8508
8509	unregister_netdev(dev);
8510
8511	del_timer_sync(&bp->timer);
8512	cancel_work_sync(&bp->reset_task);
8513
8514	if (bp->regview)
8515		iounmap(bp->regview);
8516
8517	kfree(bp->temp_stats_blk);
8518
8519	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8520		pci_disable_pcie_error_reporting(pdev);
8521		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8522	}
8523
8524	bnx2_release_firmware(bp);
8525
8526	free_netdev(dev);
8527
8528	pci_release_regions(pdev);
8529	pci_disable_device(pdev);
8530	pci_set_drvdata(pdev, NULL);
8531}
8532
8533static int
8534bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8535{
8536	struct net_device *dev = pci_get_drvdata(pdev);
8537	struct bnx2 *bp = netdev_priv(dev);
8538
8539	/* PCI register 4 needs to be saved whether netif_running() or not.
8540	 * MSI address and data need to be saved if using MSI and
8541	 * netif_running().
8542	 */
8543	pci_save_state(pdev);
8544	if (!netif_running(dev))
8545		return 0;
8546
8547	cancel_work_sync(&bp->reset_task);
8548	bnx2_netif_stop(bp, true);
8549	netif_device_detach(dev);
8550	del_timer_sync(&bp->timer);
8551	bnx2_shutdown_chip(bp);
8552	bnx2_free_skbs(bp);
8553	bnx2_set_power_state(bp, pci_choose_state(pdev, state));
8554	return 0;
8555}
8556
8557static int
8558bnx2_resume(struct pci_dev *pdev)
8559{
8560	struct net_device *dev = pci_get_drvdata(pdev);
8561	struct bnx2 *bp = netdev_priv(dev);
8562
8563	pci_restore_state(pdev);
8564	if (!netif_running(dev))
8565		return 0;
8566
8567	bnx2_set_power_state(bp, PCI_D0);
8568	netif_device_attach(dev);
8569	bnx2_init_nic(bp, 1);
8570	bnx2_netif_start(bp, true);
8571	return 0;
8572}
8573
8574/**
8575 * bnx2_io_error_detected - called when PCI error is detected
8576 * @pdev: Pointer to PCI device
8577 * @state: The current pci connection state
8578 *
8579 * This function is called after a PCI bus error affecting
8580 * this device has been detected.
8581 */
8582static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8583					       pci_channel_state_t state)
8584{
8585	struct net_device *dev = pci_get_drvdata(pdev);
8586	struct bnx2 *bp = netdev_priv(dev);
8587
8588	rtnl_lock();
8589	netif_device_detach(dev);
8590
8591	if (state == pci_channel_io_perm_failure) {
8592		rtnl_unlock();
8593		return PCI_ERS_RESULT_DISCONNECT;
8594	}
8595
8596	if (netif_running(dev)) {
8597		bnx2_netif_stop(bp, true);
8598		del_timer_sync(&bp->timer);
8599		bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8600	}
8601
8602	pci_disable_device(pdev);
8603	rtnl_unlock();
8604
8605	/* Request a slot slot reset. */
8606	return PCI_ERS_RESULT_NEED_RESET;
8607}
8608
8609/**
8610 * bnx2_io_slot_reset - called after the pci bus has been reset.
8611 * @pdev: Pointer to PCI device
8612 *
8613 * Restart the card from scratch, as if from a cold-boot.
8614 */
8615static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8616{
8617	struct net_device *dev = pci_get_drvdata(pdev);
8618	struct bnx2 *bp = netdev_priv(dev);
8619	pci_ers_result_t result;
8620	int err;
8621
8622	rtnl_lock();
8623	if (pci_enable_device(pdev)) {
8624		dev_err(&pdev->dev,
8625			"Cannot re-enable PCI device after reset\n");
8626		result = PCI_ERS_RESULT_DISCONNECT;
8627	} else {
8628		pci_set_master(pdev);
8629		pci_restore_state(pdev);
8630		pci_save_state(pdev);
8631
8632		if (netif_running(dev)) {
8633			bnx2_set_power_state(bp, PCI_D0);
8634			bnx2_init_nic(bp, 1);
8635		}
8636		result = PCI_ERS_RESULT_RECOVERED;
8637	}
8638	rtnl_unlock();
8639
8640	if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8641		return result;
8642
8643	err = pci_cleanup_aer_uncorrect_error_status(pdev);
8644	if (err) {
8645		dev_err(&pdev->dev,
8646			"pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8647			 err); /* non-fatal, continue */
8648	}
8649
8650	return result;
8651}
8652
8653/**
8654 * bnx2_io_resume - called when traffic can start flowing again.
8655 * @pdev: Pointer to PCI device
8656 *
8657 * This callback is called when the error recovery driver tells us that
8658 * its OK to resume normal operation.
8659 */
8660static void bnx2_io_resume(struct pci_dev *pdev)
8661{
8662	struct net_device *dev = pci_get_drvdata(pdev);
8663	struct bnx2 *bp = netdev_priv(dev);
8664
8665	rtnl_lock();
8666	if (netif_running(dev))
8667		bnx2_netif_start(bp, true);
8668
8669	netif_device_attach(dev);
8670	rtnl_unlock();
8671}
8672
8673static struct pci_error_handlers bnx2_err_handler = {
8674	.error_detected	= bnx2_io_error_detected,
8675	.slot_reset	= bnx2_io_slot_reset,
8676	.resume		= bnx2_io_resume,
8677};
8678
8679static struct pci_driver bnx2_pci_driver = {
8680	.name		= DRV_MODULE_NAME,
8681	.id_table	= bnx2_pci_tbl,
8682	.probe		= bnx2_init_one,
8683	.remove		= __devexit_p(bnx2_remove_one),
8684	.suspend	= bnx2_suspend,
8685	.resume		= bnx2_resume,
8686	.err_handler	= &bnx2_err_handler,
8687};
8688
8689static int __init bnx2_init(void)
8690{
8691	return pci_register_driver(&bnx2_pci_driver);
8692}
8693
8694static void __exit bnx2_cleanup(void)
8695{
8696	pci_unregister_driver(&bnx2_pci_driver);
8697}
8698
8699module_init(bnx2_init);
8700module_exit(bnx2_cleanup);
8701
8702
8703
8704