1/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2012 Broadcom Corporation.
8 *
9 * Firmware is:
10 *	Derived from proprietary unpublished source code,
11 *	Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 *	Permission is hereby granted for the distribution of this firmware
14 *	data in hexadecimal or equivalent format, provided this copyright
15 *	notice is accompanying it.
16 */
17
18
19#include <linux/module.h>
20#include <linux/moduleparam.h>
21#include <linux/stringify.h>
22#include <linux/kernel.h>
23#include <linux/types.h>
24#include <linux/compiler.h>
25#include <linux/slab.h>
26#include <linux/delay.h>
27#include <linux/in.h>
28#include <linux/init.h>
29#include <linux/interrupt.h>
30#include <linux/ioport.h>
31#include <linux/pci.h>
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/skbuff.h>
35#include <linux/ethtool.h>
36#include <linux/mdio.h>
37#include <linux/mii.h>
38#include <linux/phy.h>
39#include <linux/brcmphy.h>
40#include <linux/if_vlan.h>
41#include <linux/ip.h>
42#include <linux/tcp.h>
43#include <linux/workqueue.h>
44#include <linux/prefetch.h>
45#include <linux/dma-mapping.h>
46#include <linux/firmware.h>
47
48#include <net/checksum.h>
49#include <net/ip.h>
50
51#include <linux/io.h>
52#include <asm/byteorder.h>
53#include <linux/uaccess.h>
54
55#ifdef CONFIG_SPARC
56#include <asm/idprom.h>
57#include <asm/prom.h>
58#endif
59
60#define BAR_0	0
61#define BAR_2	2
62
63#include "tg3.h"
64
65/* Functions & macros to verify TG3_FLAGS types */
66
67static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
68{
69	return test_bit(flag, bits);
70}
71
72static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
73{
74	set_bit(flag, bits);
75}
76
77static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
78{
79	clear_bit(flag, bits);
80}
81
82#define tg3_flag(tp, flag)				\
83	_tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
84#define tg3_flag_set(tp, flag)				\
85	_tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
86#define tg3_flag_clear(tp, flag)			\
87	_tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
88
89#define DRV_MODULE_NAME		"tg3"
90#define TG3_MAJ_NUM			3
91#define TG3_MIN_NUM			123
92#define DRV_MODULE_VERSION	\
93	__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
94#define DRV_MODULE_RELDATE	"March 21, 2012"
95
96#define RESET_KIND_SHUTDOWN	0
97#define RESET_KIND_INIT		1
98#define RESET_KIND_SUSPEND	2
99
100#define TG3_DEF_RX_MODE		0
101#define TG3_DEF_TX_MODE		0
102#define TG3_DEF_MSG_ENABLE	  \
103	(NETIF_MSG_DRV		| \
104	 NETIF_MSG_PROBE	| \
105	 NETIF_MSG_LINK		| \
106	 NETIF_MSG_TIMER	| \
107	 NETIF_MSG_IFDOWN	| \
108	 NETIF_MSG_IFUP		| \
109	 NETIF_MSG_RX_ERR	| \
110	 NETIF_MSG_TX_ERR)
111
112#define TG3_GRC_LCLCTL_PWRSW_DELAY	100
113
114/* length of time before we decide the hardware is borked,
115 * and dev->tx_timeout() should be called to fix the problem
116 */
117
118#define TG3_TX_TIMEOUT			(5 * HZ)
119
120/* hardware minimum and maximum for a single frame's data payload */
121#define TG3_MIN_MTU			60
122#define TG3_MAX_MTU(tp)	\
123	(tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
124
125/* These numbers seem to be hard coded in the NIC firmware somehow.
126 * You can't change the ring sizes, but you can change where you place
127 * them in the NIC onboard memory.
128 */
129#define TG3_RX_STD_RING_SIZE(tp) \
130	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
131	 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
132#define TG3_DEF_RX_RING_PENDING		200
133#define TG3_RX_JMB_RING_SIZE(tp) \
134	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
135	 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
136#define TG3_DEF_RX_JUMBO_RING_PENDING	100
137
138/* Do not place this n-ring entries value into the tp struct itself,
139 * we really want to expose these constants to GCC so that modulo et
140 * al.  operations are done with shifts and masks instead of with
141 * hw multiply/modulo instructions.  Another solution would be to
142 * replace things like '% foo' with '& (foo - 1)'.
143 */
144
145#define TG3_TX_RING_SIZE		512
146#define TG3_DEF_TX_RING_PENDING		(TG3_TX_RING_SIZE - 1)
147
148#define TG3_RX_STD_RING_BYTES(tp) \
149	(sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
150#define TG3_RX_JMB_RING_BYTES(tp) \
151	(sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
152#define TG3_RX_RCB_RING_BYTES(tp) \
153	(sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
154#define TG3_TX_RING_BYTES	(sizeof(struct tg3_tx_buffer_desc) * \
155				 TG3_TX_RING_SIZE)
156#define NEXT_TX(N)		(((N) + 1) & (TG3_TX_RING_SIZE - 1))
157
158#define TG3_DMA_BYTE_ENAB		64
159
160#define TG3_RX_STD_DMA_SZ		1536
161#define TG3_RX_JMB_DMA_SZ		9046
162
163#define TG3_RX_DMA_TO_MAP_SZ(x)		((x) + TG3_DMA_BYTE_ENAB)
164
165#define TG3_RX_STD_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
166#define TG3_RX_JMB_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
167
168#define TG3_RX_STD_BUFF_RING_SIZE(tp) \
169	(sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
170
171#define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
172	(sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
173
174/* Due to a hardware bug, the 5701 can only DMA to memory addresses
175 * that are at least dword aligned when used in PCIX mode.  The driver
176 * works around this bug by double copying the packet.  This workaround
177 * is built into the normal double copy length check for efficiency.
178 *
179 * However, the double copy is only necessary on those architectures
180 * where unaligned memory accesses are inefficient.  For those architectures
181 * where unaligned memory accesses incur little penalty, we can reintegrate
182 * the 5701 in the normal rx path.  Doing so saves a device structure
183 * dereference by hardcoding the double copy threshold in place.
184 */
185#define TG3_RX_COPY_THRESHOLD		256
186#if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
187	#define TG3_RX_COPY_THRESH(tp)	TG3_RX_COPY_THRESHOLD
188#else
189	#define TG3_RX_COPY_THRESH(tp)	((tp)->rx_copy_thresh)
190#endif
191
192#if (NET_IP_ALIGN != 0)
193#define TG3_RX_OFFSET(tp)	((tp)->rx_offset)
194#else
195#define TG3_RX_OFFSET(tp)	(NET_SKB_PAD)
196#endif
197
198/* minimum number of free TX descriptors required to wake up TX process */
199#define TG3_TX_WAKEUP_THRESH(tnapi)		((tnapi)->tx_pending / 4)
200#define TG3_TX_BD_DMA_MAX_2K		2048
201#define TG3_TX_BD_DMA_MAX_4K		4096
202
203#define TG3_RAW_IP_ALIGN 2
204
205#define TG3_FW_UPDATE_TIMEOUT_SEC	5
206#define TG3_FW_UPDATE_FREQ_SEC		(TG3_FW_UPDATE_TIMEOUT_SEC / 2)
207
208#define FIRMWARE_TG3		"tigon/tg3.bin"
209#define FIRMWARE_TG3TSO		"tigon/tg3_tso.bin"
210#define FIRMWARE_TG3TSO5	"tigon/tg3_tso5.bin"
211
212static char version[] __devinitdata =
213	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
214
215MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217MODULE_LICENSE("GPL");
218MODULE_VERSION(DRV_MODULE_VERSION);
219MODULE_FIRMWARE(FIRMWARE_TG3);
220MODULE_FIRMWARE(FIRMWARE_TG3TSO);
221MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
222
223static int tg3_debug = -1;	/* -1 == use TG3_DEF_MSG_ENABLE as value */
224module_param(tg3_debug, int, 0);
225MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
226
227static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
228	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
229	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
230	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
231	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
232	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
233	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
234	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
235	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
236	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
237	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
238	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
239	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
240	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
241	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
242	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
243	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
244	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
245	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
246	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
247	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
248	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
249	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
250	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
251	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
252	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
253	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
254	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
255	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
256	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
257	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
258	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
259	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
260	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
261	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
262	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
263	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
264	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
265	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
266	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
267	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
268	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
269	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
270	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
271	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
272	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
273	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
274	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
275	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
276	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
277	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
278	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
279	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
280	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
281	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
282	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
283	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
284	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
285	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
286	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
287	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
288	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
289	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
290	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
291	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
292	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
293	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
294	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
295	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
296	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
297	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
298	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
299	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
300	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
301	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
302	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
303	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
304	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
305	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
306	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
307	{PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
308	{PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
309	{}
310};
311
312MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
313
314static const struct {
315	const char string[ETH_GSTRING_LEN];
316} ethtool_stats_keys[] = {
317	{ "rx_octets" },
318	{ "rx_fragments" },
319	{ "rx_ucast_packets" },
320	{ "rx_mcast_packets" },
321	{ "rx_bcast_packets" },
322	{ "rx_fcs_errors" },
323	{ "rx_align_errors" },
324	{ "rx_xon_pause_rcvd" },
325	{ "rx_xoff_pause_rcvd" },
326	{ "rx_mac_ctrl_rcvd" },
327	{ "rx_xoff_entered" },
328	{ "rx_frame_too_long_errors" },
329	{ "rx_jabbers" },
330	{ "rx_undersize_packets" },
331	{ "rx_in_length_errors" },
332	{ "rx_out_length_errors" },
333	{ "rx_64_or_less_octet_packets" },
334	{ "rx_65_to_127_octet_packets" },
335	{ "rx_128_to_255_octet_packets" },
336	{ "rx_256_to_511_octet_packets" },
337	{ "rx_512_to_1023_octet_packets" },
338	{ "rx_1024_to_1522_octet_packets" },
339	{ "rx_1523_to_2047_octet_packets" },
340	{ "rx_2048_to_4095_octet_packets" },
341	{ "rx_4096_to_8191_octet_packets" },
342	{ "rx_8192_to_9022_octet_packets" },
343
344	{ "tx_octets" },
345	{ "tx_collisions" },
346
347	{ "tx_xon_sent" },
348	{ "tx_xoff_sent" },
349	{ "tx_flow_control" },
350	{ "tx_mac_errors" },
351	{ "tx_single_collisions" },
352	{ "tx_mult_collisions" },
353	{ "tx_deferred" },
354	{ "tx_excessive_collisions" },
355	{ "tx_late_collisions" },
356	{ "tx_collide_2times" },
357	{ "tx_collide_3times" },
358	{ "tx_collide_4times" },
359	{ "tx_collide_5times" },
360	{ "tx_collide_6times" },
361	{ "tx_collide_7times" },
362	{ "tx_collide_8times" },
363	{ "tx_collide_9times" },
364	{ "tx_collide_10times" },
365	{ "tx_collide_11times" },
366	{ "tx_collide_12times" },
367	{ "tx_collide_13times" },
368	{ "tx_collide_14times" },
369	{ "tx_collide_15times" },
370	{ "tx_ucast_packets" },
371	{ "tx_mcast_packets" },
372	{ "tx_bcast_packets" },
373	{ "tx_carrier_sense_errors" },
374	{ "tx_discards" },
375	{ "tx_errors" },
376
377	{ "dma_writeq_full" },
378	{ "dma_write_prioq_full" },
379	{ "rxbds_empty" },
380	{ "rx_discards" },
381	{ "rx_errors" },
382	{ "rx_threshold_hit" },
383
384	{ "dma_readq_full" },
385	{ "dma_read_prioq_full" },
386	{ "tx_comp_queue_full" },
387
388	{ "ring_set_send_prod_index" },
389	{ "ring_status_update" },
390	{ "nic_irqs" },
391	{ "nic_avoided_irqs" },
392	{ "nic_tx_threshold_hit" },
393
394	{ "mbuf_lwm_thresh_hit" },
395};
396
397#define TG3_NUM_STATS	ARRAY_SIZE(ethtool_stats_keys)
398
399
400static const struct {
401	const char string[ETH_GSTRING_LEN];
402} ethtool_test_keys[] = {
403	{ "nvram test        (online) " },
404	{ "link test         (online) " },
405	{ "register test     (offline)" },
406	{ "memory test       (offline)" },
407	{ "mac loopback test (offline)" },
408	{ "phy loopback test (offline)" },
409	{ "ext loopback test (offline)" },
410	{ "interrupt test    (offline)" },
411};
412
413#define TG3_NUM_TEST	ARRAY_SIZE(ethtool_test_keys)
414
415
416static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
417{
418	writel(val, tp->regs + off);
419}
420
421static u32 tg3_read32(struct tg3 *tp, u32 off)
422{
423	return readl(tp->regs + off);
424}
425
426static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
427{
428	writel(val, tp->aperegs + off);
429}
430
431static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
432{
433	return readl(tp->aperegs + off);
434}
435
436static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
437{
438	unsigned long flags;
439
440	spin_lock_irqsave(&tp->indirect_lock, flags);
441	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
442	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
443	spin_unlock_irqrestore(&tp->indirect_lock, flags);
444}
445
446static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
447{
448	writel(val, tp->regs + off);
449	readl(tp->regs + off);
450}
451
452static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
453{
454	unsigned long flags;
455	u32 val;
456
457	spin_lock_irqsave(&tp->indirect_lock, flags);
458	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
459	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
460	spin_unlock_irqrestore(&tp->indirect_lock, flags);
461	return val;
462}
463
464static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
465{
466	unsigned long flags;
467
468	if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
469		pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
470				       TG3_64BIT_REG_LOW, val);
471		return;
472	}
473	if (off == TG3_RX_STD_PROD_IDX_REG) {
474		pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
475				       TG3_64BIT_REG_LOW, val);
476		return;
477	}
478
479	spin_lock_irqsave(&tp->indirect_lock, flags);
480	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
481	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
482	spin_unlock_irqrestore(&tp->indirect_lock, flags);
483
484	/* In indirect mode when disabling interrupts, we also need
485	 * to clear the interrupt bit in the GRC local ctrl register.
486	 */
487	if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
488	    (val == 0x1)) {
489		pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
490				       tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
491	}
492}
493
494static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
495{
496	unsigned long flags;
497	u32 val;
498
499	spin_lock_irqsave(&tp->indirect_lock, flags);
500	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
501	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
502	spin_unlock_irqrestore(&tp->indirect_lock, flags);
503	return val;
504}
505
506/* usec_wait specifies the wait time in usec when writing to certain registers
507 * where it is unsafe to read back the register without some delay.
508 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
509 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
510 */
511static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
512{
513	if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
514		/* Non-posted methods */
515		tp->write32(tp, off, val);
516	else {
517		/* Posted method */
518		tg3_write32(tp, off, val);
519		if (usec_wait)
520			udelay(usec_wait);
521		tp->read32(tp, off);
522	}
523	/* Wait again after the read for the posted method to guarantee that
524	 * the wait time is met.
525	 */
526	if (usec_wait)
527		udelay(usec_wait);
528}
529
530static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
531{
532	tp->write32_mbox(tp, off, val);
533	if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
534		tp->read32_mbox(tp, off);
535}
536
537static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
538{
539	void __iomem *mbox = tp->regs + off;
540	writel(val, mbox);
541	if (tg3_flag(tp, TXD_MBOX_HWBUG))
542		writel(val, mbox);
543	if (tg3_flag(tp, MBOX_WRITE_REORDER))
544		readl(mbox);
545}
546
547static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
548{
549	return readl(tp->regs + off + GRCMBOX_BASE);
550}
551
552static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
553{
554	writel(val, tp->regs + off + GRCMBOX_BASE);
555}
556
557#define tw32_mailbox(reg, val)		tp->write32_mbox(tp, reg, val)
558#define tw32_mailbox_f(reg, val)	tw32_mailbox_flush(tp, (reg), (val))
559#define tw32_rx_mbox(reg, val)		tp->write32_rx_mbox(tp, reg, val)
560#define tw32_tx_mbox(reg, val)		tp->write32_tx_mbox(tp, reg, val)
561#define tr32_mailbox(reg)		tp->read32_mbox(tp, reg)
562
563#define tw32(reg, val)			tp->write32(tp, reg, val)
564#define tw32_f(reg, val)		_tw32_flush(tp, (reg), (val), 0)
565#define tw32_wait_f(reg, val, us)	_tw32_flush(tp, (reg), (val), (us))
566#define tr32(reg)			tp->read32(tp, reg)
567
568static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
569{
570	unsigned long flags;
571
572	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
573	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
574		return;
575
576	spin_lock_irqsave(&tp->indirect_lock, flags);
577	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
578		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
579		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
580
581		/* Always leave this as zero. */
582		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
583	} else {
584		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
585		tw32_f(TG3PCI_MEM_WIN_DATA, val);
586
587		/* Always leave this as zero. */
588		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
589	}
590	spin_unlock_irqrestore(&tp->indirect_lock, flags);
591}
592
593static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
594{
595	unsigned long flags;
596
597	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
598	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
599		*val = 0;
600		return;
601	}
602
603	spin_lock_irqsave(&tp->indirect_lock, flags);
604	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
605		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
606		pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
607
608		/* Always leave this as zero. */
609		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
610	} else {
611		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
612		*val = tr32(TG3PCI_MEM_WIN_DATA);
613
614		/* Always leave this as zero. */
615		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
616	}
617	spin_unlock_irqrestore(&tp->indirect_lock, flags);
618}
619
620static void tg3_ape_lock_init(struct tg3 *tp)
621{
622	int i;
623	u32 regbase, bit;
624
625	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
626		regbase = TG3_APE_LOCK_GRANT;
627	else
628		regbase = TG3_APE_PER_LOCK_GRANT;
629
630	/* Make sure the driver hasn't any stale locks. */
631	for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
632		switch (i) {
633		case TG3_APE_LOCK_PHY0:
634		case TG3_APE_LOCK_PHY1:
635		case TG3_APE_LOCK_PHY2:
636		case TG3_APE_LOCK_PHY3:
637			bit = APE_LOCK_GRANT_DRIVER;
638			break;
639		default:
640			if (!tp->pci_fn)
641				bit = APE_LOCK_GRANT_DRIVER;
642			else
643				bit = 1 << tp->pci_fn;
644		}
645		tg3_ape_write32(tp, regbase + 4 * i, bit);
646	}
647
648}
649
650static int tg3_ape_lock(struct tg3 *tp, int locknum)
651{
652	int i, off;
653	int ret = 0;
654	u32 status, req, gnt, bit;
655
656	if (!tg3_flag(tp, ENABLE_APE))
657		return 0;
658
659	switch (locknum) {
660	case TG3_APE_LOCK_GPIO:
661		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
662			return 0;
663	case TG3_APE_LOCK_GRC:
664	case TG3_APE_LOCK_MEM:
665		if (!tp->pci_fn)
666			bit = APE_LOCK_REQ_DRIVER;
667		else
668			bit = 1 << tp->pci_fn;
669		break;
670	default:
671		return -EINVAL;
672	}
673
674	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
675		req = TG3_APE_LOCK_REQ;
676		gnt = TG3_APE_LOCK_GRANT;
677	} else {
678		req = TG3_APE_PER_LOCK_REQ;
679		gnt = TG3_APE_PER_LOCK_GRANT;
680	}
681
682	off = 4 * locknum;
683
684	tg3_ape_write32(tp, req + off, bit);
685
686	/* Wait for up to 1 millisecond to acquire lock. */
687	for (i = 0; i < 100; i++) {
688		status = tg3_ape_read32(tp, gnt + off);
689		if (status == bit)
690			break;
691		udelay(10);
692	}
693
694	if (status != bit) {
695		/* Revoke the lock request. */
696		tg3_ape_write32(tp, gnt + off, bit);
697		ret = -EBUSY;
698	}
699
700	return ret;
701}
702
703static void tg3_ape_unlock(struct tg3 *tp, int locknum)
704{
705	u32 gnt, bit;
706
707	if (!tg3_flag(tp, ENABLE_APE))
708		return;
709
710	switch (locknum) {
711	case TG3_APE_LOCK_GPIO:
712		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
713			return;
714	case TG3_APE_LOCK_GRC:
715	case TG3_APE_LOCK_MEM:
716		if (!tp->pci_fn)
717			bit = APE_LOCK_GRANT_DRIVER;
718		else
719			bit = 1 << tp->pci_fn;
720		break;
721	default:
722		return;
723	}
724
725	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
726		gnt = TG3_APE_LOCK_GRANT;
727	else
728		gnt = TG3_APE_PER_LOCK_GRANT;
729
730	tg3_ape_write32(tp, gnt + 4 * locknum, bit);
731}
732
733static void tg3_ape_send_event(struct tg3 *tp, u32 event)
734{
735	int i;
736	u32 apedata;
737
738	/* NCSI does not support APE events */
739	if (tg3_flag(tp, APE_HAS_NCSI))
740		return;
741
742	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
743	if (apedata != APE_SEG_SIG_MAGIC)
744		return;
745
746	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
747	if (!(apedata & APE_FW_STATUS_READY))
748		return;
749
750	/* Wait for up to 1 millisecond for APE to service previous event. */
751	for (i = 0; i < 10; i++) {
752		if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
753			return;
754
755		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
756
757		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
758			tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
759					event | APE_EVENT_STATUS_EVENT_PENDING);
760
761		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
762
763		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
764			break;
765
766		udelay(100);
767	}
768
769	if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
770		tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
771}
772
773static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
774{
775	u32 event;
776	u32 apedata;
777
778	if (!tg3_flag(tp, ENABLE_APE))
779		return;
780
781	switch (kind) {
782	case RESET_KIND_INIT:
783		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
784				APE_HOST_SEG_SIG_MAGIC);
785		tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
786				APE_HOST_SEG_LEN_MAGIC);
787		apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
788		tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
789		tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
790			APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
791		tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
792				APE_HOST_BEHAV_NO_PHYLOCK);
793		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
794				    TG3_APE_HOST_DRVR_STATE_START);
795
796		event = APE_EVENT_STATUS_STATE_START;
797		break;
798	case RESET_KIND_SHUTDOWN:
799		/* With the interface we are currently using,
800		 * APE does not track driver state.  Wiping
801		 * out the HOST SEGMENT SIGNATURE forces
802		 * the APE to assume OS absent status.
803		 */
804		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
805
806		if (device_may_wakeup(&tp->pdev->dev) &&
807		    tg3_flag(tp, WOL_ENABLE)) {
808			tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
809					    TG3_APE_HOST_WOL_SPEED_AUTO);
810			apedata = TG3_APE_HOST_DRVR_STATE_WOL;
811		} else
812			apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
813
814		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
815
816		event = APE_EVENT_STATUS_STATE_UNLOAD;
817		break;
818	case RESET_KIND_SUSPEND:
819		event = APE_EVENT_STATUS_STATE_SUSPEND;
820		break;
821	default:
822		return;
823	}
824
825	event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
826
827	tg3_ape_send_event(tp, event);
828}
829
830static void tg3_disable_ints(struct tg3 *tp)
831{
832	int i;
833
834	tw32(TG3PCI_MISC_HOST_CTRL,
835	     (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
836	for (i = 0; i < tp->irq_max; i++)
837		tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
838}
839
840static void tg3_enable_ints(struct tg3 *tp)
841{
842	int i;
843
844	tp->irq_sync = 0;
845	wmb();
846
847	tw32(TG3PCI_MISC_HOST_CTRL,
848	     (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
849
850	tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
851	for (i = 0; i < tp->irq_cnt; i++) {
852		struct tg3_napi *tnapi = &tp->napi[i];
853
854		tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
855		if (tg3_flag(tp, 1SHOT_MSI))
856			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
857
858		tp->coal_now |= tnapi->coal_now;
859	}
860
861	/* Force an initial interrupt */
862	if (!tg3_flag(tp, TAGGED_STATUS) &&
863	    (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
864		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
865	else
866		tw32(HOSTCC_MODE, tp->coal_now);
867
868	tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
869}
870
871static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
872{
873	struct tg3 *tp = tnapi->tp;
874	struct tg3_hw_status *sblk = tnapi->hw_status;
875	unsigned int work_exists = 0;
876
877	/* check for phy events */
878	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
879		if (sblk->status & SD_STATUS_LINK_CHG)
880			work_exists = 1;
881	}
882
883	/* check for TX work to do */
884	if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
885		work_exists = 1;
886
887	/* check for RX work to do */
888	if (tnapi->rx_rcb_prod_idx &&
889	    *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
890		work_exists = 1;
891
892	return work_exists;
893}
894
895/* tg3_int_reenable
896 *  similar to tg3_enable_ints, but it accurately determines whether there
897 *  is new work pending and can return without flushing the PIO write
898 *  which reenables interrupts
899 */
900static void tg3_int_reenable(struct tg3_napi *tnapi)
901{
902	struct tg3 *tp = tnapi->tp;
903
904	tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
905	mmiowb();
906
907	/* When doing tagged status, this work check is unnecessary.
908	 * The last_tag we write above tells the chip which piece of
909	 * work we've completed.
910	 */
911	if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
912		tw32(HOSTCC_MODE, tp->coalesce_mode |
913		     HOSTCC_MODE_ENABLE | tnapi->coal_now);
914}
915
916static void tg3_switch_clocks(struct tg3 *tp)
917{
918	u32 clock_ctrl;
919	u32 orig_clock_ctrl;
920
921	if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
922		return;
923
924	clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
925
926	orig_clock_ctrl = clock_ctrl;
927	clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
928		       CLOCK_CTRL_CLKRUN_OENABLE |
929		       0x1f);
930	tp->pci_clock_ctrl = clock_ctrl;
931
932	if (tg3_flag(tp, 5705_PLUS)) {
933		if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
934			tw32_wait_f(TG3PCI_CLOCK_CTRL,
935				    clock_ctrl | CLOCK_CTRL_625_CORE, 40);
936		}
937	} else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
938		tw32_wait_f(TG3PCI_CLOCK_CTRL,
939			    clock_ctrl |
940			    (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
941			    40);
942		tw32_wait_f(TG3PCI_CLOCK_CTRL,
943			    clock_ctrl | (CLOCK_CTRL_ALTCLK),
944			    40);
945	}
946	tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
947}
948
949#define PHY_BUSY_LOOPS	5000
950
951static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
952{
953	u32 frame_val;
954	unsigned int loops;
955	int ret;
956
957	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
958		tw32_f(MAC_MI_MODE,
959		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
960		udelay(80);
961	}
962
963	*val = 0x0;
964
965	frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
966		      MI_COM_PHY_ADDR_MASK);
967	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
968		      MI_COM_REG_ADDR_MASK);
969	frame_val |= (MI_COM_CMD_READ | MI_COM_START);
970
971	tw32_f(MAC_MI_COM, frame_val);
972
973	loops = PHY_BUSY_LOOPS;
974	while (loops != 0) {
975		udelay(10);
976		frame_val = tr32(MAC_MI_COM);
977
978		if ((frame_val & MI_COM_BUSY) == 0) {
979			udelay(5);
980			frame_val = tr32(MAC_MI_COM);
981			break;
982		}
983		loops -= 1;
984	}
985
986	ret = -EBUSY;
987	if (loops != 0) {
988		*val = frame_val & MI_COM_DATA_MASK;
989		ret = 0;
990	}
991
992	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
993		tw32_f(MAC_MI_MODE, tp->mi_mode);
994		udelay(80);
995	}
996
997	return ret;
998}
999
1000static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1001{
1002	u32 frame_val;
1003	unsigned int loops;
1004	int ret;
1005
1006	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1007	    (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1008		return 0;
1009
1010	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1011		tw32_f(MAC_MI_MODE,
1012		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1013		udelay(80);
1014	}
1015
1016	frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1017		      MI_COM_PHY_ADDR_MASK);
1018	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1019		      MI_COM_REG_ADDR_MASK);
1020	frame_val |= (val & MI_COM_DATA_MASK);
1021	frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1022
1023	tw32_f(MAC_MI_COM, frame_val);
1024
1025	loops = PHY_BUSY_LOOPS;
1026	while (loops != 0) {
1027		udelay(10);
1028		frame_val = tr32(MAC_MI_COM);
1029		if ((frame_val & MI_COM_BUSY) == 0) {
1030			udelay(5);
1031			frame_val = tr32(MAC_MI_COM);
1032			break;
1033		}
1034		loops -= 1;
1035	}
1036
1037	ret = -EBUSY;
1038	if (loops != 0)
1039		ret = 0;
1040
1041	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1042		tw32_f(MAC_MI_MODE, tp->mi_mode);
1043		udelay(80);
1044	}
1045
1046	return ret;
1047}
1048
1049static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1050{
1051	int err;
1052
1053	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1054	if (err)
1055		goto done;
1056
1057	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1058	if (err)
1059		goto done;
1060
1061	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1062			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1063	if (err)
1064		goto done;
1065
1066	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1067
1068done:
1069	return err;
1070}
1071
1072static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1073{
1074	int err;
1075
1076	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1077	if (err)
1078		goto done;
1079
1080	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1081	if (err)
1082		goto done;
1083
1084	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1085			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1086	if (err)
1087		goto done;
1088
1089	err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1090
1091done:
1092	return err;
1093}
1094
1095static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1096{
1097	int err;
1098
1099	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1100	if (!err)
1101		err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1102
1103	return err;
1104}
1105
1106static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1107{
1108	int err;
1109
1110	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1111	if (!err)
1112		err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1113
1114	return err;
1115}
1116
1117static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1118{
1119	int err;
1120
1121	err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1122			   (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1123			   MII_TG3_AUXCTL_SHDWSEL_MISC);
1124	if (!err)
1125		err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1126
1127	return err;
1128}
1129
1130static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1131{
1132	if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1133		set |= MII_TG3_AUXCTL_MISC_WREN;
1134
1135	return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1136}
1137
1138#define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1139	tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1140			     MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1141			     MII_TG3_AUXCTL_ACTL_TX_6DB)
1142
1143#define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1144	tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1145			     MII_TG3_AUXCTL_ACTL_TX_6DB);
1146
1147static int tg3_bmcr_reset(struct tg3 *tp)
1148{
1149	u32 phy_control;
1150	int limit, err;
1151
1152	/* OK, reset it, and poll the BMCR_RESET bit until it
1153	 * clears or we time out.
1154	 */
1155	phy_control = BMCR_RESET;
1156	err = tg3_writephy(tp, MII_BMCR, phy_control);
1157	if (err != 0)
1158		return -EBUSY;
1159
1160	limit = 5000;
1161	while (limit--) {
1162		err = tg3_readphy(tp, MII_BMCR, &phy_control);
1163		if (err != 0)
1164			return -EBUSY;
1165
1166		if ((phy_control & BMCR_RESET) == 0) {
1167			udelay(40);
1168			break;
1169		}
1170		udelay(10);
1171	}
1172	if (limit < 0)
1173		return -EBUSY;
1174
1175	return 0;
1176}
1177
1178static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1179{
1180	struct tg3 *tp = bp->priv;
1181	u32 val;
1182
1183	spin_lock_bh(&tp->lock);
1184
1185	if (tg3_readphy(tp, reg, &val))
1186		val = -EIO;
1187
1188	spin_unlock_bh(&tp->lock);
1189
1190	return val;
1191}
1192
1193static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1194{
1195	struct tg3 *tp = bp->priv;
1196	u32 ret = 0;
1197
1198	spin_lock_bh(&tp->lock);
1199
1200	if (tg3_writephy(tp, reg, val))
1201		ret = -EIO;
1202
1203	spin_unlock_bh(&tp->lock);
1204
1205	return ret;
1206}
1207
1208static int tg3_mdio_reset(struct mii_bus *bp)
1209{
1210	return 0;
1211}
1212
1213static void tg3_mdio_config_5785(struct tg3 *tp)
1214{
1215	u32 val;
1216	struct phy_device *phydev;
1217
1218	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1219	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1220	case PHY_ID_BCM50610:
1221	case PHY_ID_BCM50610M:
1222		val = MAC_PHYCFG2_50610_LED_MODES;
1223		break;
1224	case PHY_ID_BCMAC131:
1225		val = MAC_PHYCFG2_AC131_LED_MODES;
1226		break;
1227	case PHY_ID_RTL8211C:
1228		val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1229		break;
1230	case PHY_ID_RTL8201E:
1231		val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1232		break;
1233	default:
1234		return;
1235	}
1236
1237	if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1238		tw32(MAC_PHYCFG2, val);
1239
1240		val = tr32(MAC_PHYCFG1);
1241		val &= ~(MAC_PHYCFG1_RGMII_INT |
1242			 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1243		val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1244		tw32(MAC_PHYCFG1, val);
1245
1246		return;
1247	}
1248
1249	if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1250		val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1251		       MAC_PHYCFG2_FMODE_MASK_MASK |
1252		       MAC_PHYCFG2_GMODE_MASK_MASK |
1253		       MAC_PHYCFG2_ACT_MASK_MASK   |
1254		       MAC_PHYCFG2_QUAL_MASK_MASK |
1255		       MAC_PHYCFG2_INBAND_ENABLE;
1256
1257	tw32(MAC_PHYCFG2, val);
1258
1259	val = tr32(MAC_PHYCFG1);
1260	val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1261		 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1262	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1263		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1264			val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1265		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1266			val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1267	}
1268	val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1269	       MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1270	tw32(MAC_PHYCFG1, val);
1271
1272	val = tr32(MAC_EXT_RGMII_MODE);
1273	val &= ~(MAC_RGMII_MODE_RX_INT_B |
1274		 MAC_RGMII_MODE_RX_QUALITY |
1275		 MAC_RGMII_MODE_RX_ACTIVITY |
1276		 MAC_RGMII_MODE_RX_ENG_DET |
1277		 MAC_RGMII_MODE_TX_ENABLE |
1278		 MAC_RGMII_MODE_TX_LOWPWR |
1279		 MAC_RGMII_MODE_TX_RESET);
1280	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1281		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1282			val |= MAC_RGMII_MODE_RX_INT_B |
1283			       MAC_RGMII_MODE_RX_QUALITY |
1284			       MAC_RGMII_MODE_RX_ACTIVITY |
1285			       MAC_RGMII_MODE_RX_ENG_DET;
1286		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1287			val |= MAC_RGMII_MODE_TX_ENABLE |
1288			       MAC_RGMII_MODE_TX_LOWPWR |
1289			       MAC_RGMII_MODE_TX_RESET;
1290	}
1291	tw32(MAC_EXT_RGMII_MODE, val);
1292}
1293
1294static void tg3_mdio_start(struct tg3 *tp)
1295{
1296	tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1297	tw32_f(MAC_MI_MODE, tp->mi_mode);
1298	udelay(80);
1299
1300	if (tg3_flag(tp, MDIOBUS_INITED) &&
1301	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1302		tg3_mdio_config_5785(tp);
1303}
1304
1305static int tg3_mdio_init(struct tg3 *tp)
1306{
1307	int i;
1308	u32 reg;
1309	struct phy_device *phydev;
1310
1311	if (tg3_flag(tp, 5717_PLUS)) {
1312		u32 is_serdes;
1313
1314		tp->phy_addr = tp->pci_fn + 1;
1315
1316		if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1317			is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1318		else
1319			is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1320				    TG3_CPMU_PHY_STRAP_IS_SERDES;
1321		if (is_serdes)
1322			tp->phy_addr += 7;
1323	} else
1324		tp->phy_addr = TG3_PHY_MII_ADDR;
1325
1326	tg3_mdio_start(tp);
1327
1328	if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1329		return 0;
1330
1331	tp->mdio_bus = mdiobus_alloc();
1332	if (tp->mdio_bus == NULL)
1333		return -ENOMEM;
1334
1335	tp->mdio_bus->name     = "tg3 mdio bus";
1336	snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1337		 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1338	tp->mdio_bus->priv     = tp;
1339	tp->mdio_bus->parent   = &tp->pdev->dev;
1340	tp->mdio_bus->read     = &tg3_mdio_read;
1341	tp->mdio_bus->write    = &tg3_mdio_write;
1342	tp->mdio_bus->reset    = &tg3_mdio_reset;
1343	tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1344	tp->mdio_bus->irq      = &tp->mdio_irq[0];
1345
1346	for (i = 0; i < PHY_MAX_ADDR; i++)
1347		tp->mdio_bus->irq[i] = PHY_POLL;
1348
1349	/* The bus registration will look for all the PHYs on the mdio bus.
1350	 * Unfortunately, it does not ensure the PHY is powered up before
1351	 * accessing the PHY ID registers.  A chip reset is the
1352	 * quickest way to bring the device back to an operational state..
1353	 */
1354	if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1355		tg3_bmcr_reset(tp);
1356
1357	i = mdiobus_register(tp->mdio_bus);
1358	if (i) {
1359		dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1360		mdiobus_free(tp->mdio_bus);
1361		return i;
1362	}
1363
1364	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1365
1366	if (!phydev || !phydev->drv) {
1367		dev_warn(&tp->pdev->dev, "No PHY devices\n");
1368		mdiobus_unregister(tp->mdio_bus);
1369		mdiobus_free(tp->mdio_bus);
1370		return -ENODEV;
1371	}
1372
1373	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1374	case PHY_ID_BCM57780:
1375		phydev->interface = PHY_INTERFACE_MODE_GMII;
1376		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1377		break;
1378	case PHY_ID_BCM50610:
1379	case PHY_ID_BCM50610M:
1380		phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1381				     PHY_BRCM_RX_REFCLK_UNUSED |
1382				     PHY_BRCM_DIS_TXCRXC_NOENRGY |
1383				     PHY_BRCM_AUTO_PWRDWN_ENABLE;
1384		if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1385			phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1386		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1387			phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1388		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1389			phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1390		/* fallthru */
1391	case PHY_ID_RTL8211C:
1392		phydev->interface = PHY_INTERFACE_MODE_RGMII;
1393		break;
1394	case PHY_ID_RTL8201E:
1395	case PHY_ID_BCMAC131:
1396		phydev->interface = PHY_INTERFACE_MODE_MII;
1397		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1398		tp->phy_flags |= TG3_PHYFLG_IS_FET;
1399		break;
1400	}
1401
1402	tg3_flag_set(tp, MDIOBUS_INITED);
1403
1404	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1405		tg3_mdio_config_5785(tp);
1406
1407	return 0;
1408}
1409
1410static void tg3_mdio_fini(struct tg3 *tp)
1411{
1412	if (tg3_flag(tp, MDIOBUS_INITED)) {
1413		tg3_flag_clear(tp, MDIOBUS_INITED);
1414		mdiobus_unregister(tp->mdio_bus);
1415		mdiobus_free(tp->mdio_bus);
1416	}
1417}
1418
1419/* tp->lock is held. */
1420static inline void tg3_generate_fw_event(struct tg3 *tp)
1421{
1422	u32 val;
1423
1424	val = tr32(GRC_RX_CPU_EVENT);
1425	val |= GRC_RX_CPU_DRIVER_EVENT;
1426	tw32_f(GRC_RX_CPU_EVENT, val);
1427
1428	tp->last_event_jiffies = jiffies;
1429}
1430
1431#define TG3_FW_EVENT_TIMEOUT_USEC 2500
1432
1433/* tp->lock is held. */
1434static void tg3_wait_for_event_ack(struct tg3 *tp)
1435{
1436	int i;
1437	unsigned int delay_cnt;
1438	long time_remain;
1439
1440	/* If enough time has passed, no wait is necessary. */
1441	time_remain = (long)(tp->last_event_jiffies + 1 +
1442		      usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1443		      (long)jiffies;
1444	if (time_remain < 0)
1445		return;
1446
1447	/* Check if we can shorten the wait time. */
1448	delay_cnt = jiffies_to_usecs(time_remain);
1449	if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1450		delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1451	delay_cnt = (delay_cnt >> 3) + 1;
1452
1453	for (i = 0; i < delay_cnt; i++) {
1454		if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1455			break;
1456		udelay(8);
1457	}
1458}
1459
1460/* tp->lock is held. */
1461static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1462{
1463	u32 reg, val;
1464
1465	val = 0;
1466	if (!tg3_readphy(tp, MII_BMCR, &reg))
1467		val = reg << 16;
1468	if (!tg3_readphy(tp, MII_BMSR, &reg))
1469		val |= (reg & 0xffff);
1470	*data++ = val;
1471
1472	val = 0;
1473	if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1474		val = reg << 16;
1475	if (!tg3_readphy(tp, MII_LPA, &reg))
1476		val |= (reg & 0xffff);
1477	*data++ = val;
1478
1479	val = 0;
1480	if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1481		if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1482			val = reg << 16;
1483		if (!tg3_readphy(tp, MII_STAT1000, &reg))
1484			val |= (reg & 0xffff);
1485	}
1486	*data++ = val;
1487
1488	if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1489		val = reg << 16;
1490	else
1491		val = 0;
1492	*data++ = val;
1493}
1494
1495/* tp->lock is held. */
1496static void tg3_ump_link_report(struct tg3 *tp)
1497{
1498	u32 data[4];
1499
1500	if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1501		return;
1502
1503	tg3_phy_gather_ump_data(tp, data);
1504
1505	tg3_wait_for_event_ack(tp);
1506
1507	tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1508	tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1509	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1510	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1511	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1512	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1513
1514	tg3_generate_fw_event(tp);
1515}
1516
1517/* tp->lock is held. */
1518static void tg3_stop_fw(struct tg3 *tp)
1519{
1520	if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1521		/* Wait for RX cpu to ACK the previous event. */
1522		tg3_wait_for_event_ack(tp);
1523
1524		tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1525
1526		tg3_generate_fw_event(tp);
1527
1528		/* Wait for RX cpu to ACK this event. */
1529		tg3_wait_for_event_ack(tp);
1530	}
1531}
1532
1533/* tp->lock is held. */
1534static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1535{
1536	tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1537		      NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1538
1539	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1540		switch (kind) {
1541		case RESET_KIND_INIT:
1542			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1543				      DRV_STATE_START);
1544			break;
1545
1546		case RESET_KIND_SHUTDOWN:
1547			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1548				      DRV_STATE_UNLOAD);
1549			break;
1550
1551		case RESET_KIND_SUSPEND:
1552			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1553				      DRV_STATE_SUSPEND);
1554			break;
1555
1556		default:
1557			break;
1558		}
1559	}
1560
1561	if (kind == RESET_KIND_INIT ||
1562	    kind == RESET_KIND_SUSPEND)
1563		tg3_ape_driver_state_change(tp, kind);
1564}
1565
1566/* tp->lock is held. */
1567static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1568{
1569	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1570		switch (kind) {
1571		case RESET_KIND_INIT:
1572			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1573				      DRV_STATE_START_DONE);
1574			break;
1575
1576		case RESET_KIND_SHUTDOWN:
1577			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1578				      DRV_STATE_UNLOAD_DONE);
1579			break;
1580
1581		default:
1582			break;
1583		}
1584	}
1585
1586	if (kind == RESET_KIND_SHUTDOWN)
1587		tg3_ape_driver_state_change(tp, kind);
1588}
1589
1590/* tp->lock is held. */
1591static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1592{
1593	if (tg3_flag(tp, ENABLE_ASF)) {
1594		switch (kind) {
1595		case RESET_KIND_INIT:
1596			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1597				      DRV_STATE_START);
1598			break;
1599
1600		case RESET_KIND_SHUTDOWN:
1601			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1602				      DRV_STATE_UNLOAD);
1603			break;
1604
1605		case RESET_KIND_SUSPEND:
1606			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1607				      DRV_STATE_SUSPEND);
1608			break;
1609
1610		default:
1611			break;
1612		}
1613	}
1614}
1615
1616static int tg3_poll_fw(struct tg3 *tp)
1617{
1618	int i;
1619	u32 val;
1620
1621	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1622		/* Wait up to 20ms for init done. */
1623		for (i = 0; i < 200; i++) {
1624			if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1625				return 0;
1626			udelay(100);
1627		}
1628		return -ENODEV;
1629	}
1630
1631	/* Wait for firmware initialization to complete. */
1632	for (i = 0; i < 100000; i++) {
1633		tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1634		if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1635			break;
1636		udelay(10);
1637	}
1638
1639	/* Chip might not be fitted with firmware.  Some Sun onboard
1640	 * parts are configured like that.  So don't signal the timeout
1641	 * of the above loop as an error, but do report the lack of
1642	 * running firmware once.
1643	 */
1644	if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1645		tg3_flag_set(tp, NO_FWARE_REPORTED);
1646
1647		netdev_info(tp->dev, "No firmware running\n");
1648	}
1649
1650	if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1651		/* The 57765 A0 needs a little more
1652		 * time to do some important work.
1653		 */
1654		mdelay(10);
1655	}
1656
1657	return 0;
1658}
1659
1660static void tg3_link_report(struct tg3 *tp)
1661{
1662	if (!netif_carrier_ok(tp->dev)) {
1663		netif_info(tp, link, tp->dev, "Link is down\n");
1664		tg3_ump_link_report(tp);
1665	} else if (netif_msg_link(tp)) {
1666		netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1667			    (tp->link_config.active_speed == SPEED_1000 ?
1668			     1000 :
1669			     (tp->link_config.active_speed == SPEED_100 ?
1670			      100 : 10)),
1671			    (tp->link_config.active_duplex == DUPLEX_FULL ?
1672			     "full" : "half"));
1673
1674		netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1675			    (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1676			    "on" : "off",
1677			    (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1678			    "on" : "off");
1679
1680		if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1681			netdev_info(tp->dev, "EEE is %s\n",
1682				    tp->setlpicnt ? "enabled" : "disabled");
1683
1684		tg3_ump_link_report(tp);
1685	}
1686}
1687
1688static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1689{
1690	u16 miireg;
1691
1692	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1693		miireg = ADVERTISE_1000XPAUSE;
1694	else if (flow_ctrl & FLOW_CTRL_TX)
1695		miireg = ADVERTISE_1000XPSE_ASYM;
1696	else if (flow_ctrl & FLOW_CTRL_RX)
1697		miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1698	else
1699		miireg = 0;
1700
1701	return miireg;
1702}
1703
1704static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1705{
1706	u8 cap = 0;
1707
1708	if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1709		cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1710	} else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1711		if (lcladv & ADVERTISE_1000XPAUSE)
1712			cap = FLOW_CTRL_RX;
1713		if (rmtadv & ADVERTISE_1000XPAUSE)
1714			cap = FLOW_CTRL_TX;
1715	}
1716
1717	return cap;
1718}
1719
1720static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1721{
1722	u8 autoneg;
1723	u8 flowctrl = 0;
1724	u32 old_rx_mode = tp->rx_mode;
1725	u32 old_tx_mode = tp->tx_mode;
1726
1727	if (tg3_flag(tp, USE_PHYLIB))
1728		autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1729	else
1730		autoneg = tp->link_config.autoneg;
1731
1732	if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1733		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1734			flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1735		else
1736			flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1737	} else
1738		flowctrl = tp->link_config.flowctrl;
1739
1740	tp->link_config.active_flowctrl = flowctrl;
1741
1742	if (flowctrl & FLOW_CTRL_RX)
1743		tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1744	else
1745		tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1746
1747	if (old_rx_mode != tp->rx_mode)
1748		tw32_f(MAC_RX_MODE, tp->rx_mode);
1749
1750	if (flowctrl & FLOW_CTRL_TX)
1751		tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1752	else
1753		tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1754
1755	if (old_tx_mode != tp->tx_mode)
1756		tw32_f(MAC_TX_MODE, tp->tx_mode);
1757}
1758
1759static void tg3_adjust_link(struct net_device *dev)
1760{
1761	u8 oldflowctrl, linkmesg = 0;
1762	u32 mac_mode, lcl_adv, rmt_adv;
1763	struct tg3 *tp = netdev_priv(dev);
1764	struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1765
1766	spin_lock_bh(&tp->lock);
1767
1768	mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1769				    MAC_MODE_HALF_DUPLEX);
1770
1771	oldflowctrl = tp->link_config.active_flowctrl;
1772
1773	if (phydev->link) {
1774		lcl_adv = 0;
1775		rmt_adv = 0;
1776
1777		if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1778			mac_mode |= MAC_MODE_PORT_MODE_MII;
1779		else if (phydev->speed == SPEED_1000 ||
1780			 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1781			mac_mode |= MAC_MODE_PORT_MODE_GMII;
1782		else
1783			mac_mode |= MAC_MODE_PORT_MODE_MII;
1784
1785		if (phydev->duplex == DUPLEX_HALF)
1786			mac_mode |= MAC_MODE_HALF_DUPLEX;
1787		else {
1788			lcl_adv = mii_advertise_flowctrl(
1789				  tp->link_config.flowctrl);
1790
1791			if (phydev->pause)
1792				rmt_adv = LPA_PAUSE_CAP;
1793			if (phydev->asym_pause)
1794				rmt_adv |= LPA_PAUSE_ASYM;
1795		}
1796
1797		tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1798	} else
1799		mac_mode |= MAC_MODE_PORT_MODE_GMII;
1800
1801	if (mac_mode != tp->mac_mode) {
1802		tp->mac_mode = mac_mode;
1803		tw32_f(MAC_MODE, tp->mac_mode);
1804		udelay(40);
1805	}
1806
1807	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1808		if (phydev->speed == SPEED_10)
1809			tw32(MAC_MI_STAT,
1810			     MAC_MI_STAT_10MBPS_MODE |
1811			     MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1812		else
1813			tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1814	}
1815
1816	if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1817		tw32(MAC_TX_LENGTHS,
1818		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1819		      (6 << TX_LENGTHS_IPG_SHIFT) |
1820		      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1821	else
1822		tw32(MAC_TX_LENGTHS,
1823		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1824		      (6 << TX_LENGTHS_IPG_SHIFT) |
1825		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1826
1827	if (phydev->link != tp->old_link ||
1828	    phydev->speed != tp->link_config.active_speed ||
1829	    phydev->duplex != tp->link_config.active_duplex ||
1830	    oldflowctrl != tp->link_config.active_flowctrl)
1831		linkmesg = 1;
1832
1833	tp->old_link = phydev->link;
1834	tp->link_config.active_speed = phydev->speed;
1835	tp->link_config.active_duplex = phydev->duplex;
1836
1837	spin_unlock_bh(&tp->lock);
1838
1839	if (linkmesg)
1840		tg3_link_report(tp);
1841}
1842
1843static int tg3_phy_init(struct tg3 *tp)
1844{
1845	struct phy_device *phydev;
1846
1847	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1848		return 0;
1849
1850	/* Bring the PHY back to a known state. */
1851	tg3_bmcr_reset(tp);
1852
1853	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1854
1855	/* Attach the MAC to the PHY. */
1856	phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1857			     phydev->dev_flags, phydev->interface);
1858	if (IS_ERR(phydev)) {
1859		dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1860		return PTR_ERR(phydev);
1861	}
1862
1863	/* Mask with MAC supported features. */
1864	switch (phydev->interface) {
1865	case PHY_INTERFACE_MODE_GMII:
1866	case PHY_INTERFACE_MODE_RGMII:
1867		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1868			phydev->supported &= (PHY_GBIT_FEATURES |
1869					      SUPPORTED_Pause |
1870					      SUPPORTED_Asym_Pause);
1871			break;
1872		}
1873		/* fallthru */
1874	case PHY_INTERFACE_MODE_MII:
1875		phydev->supported &= (PHY_BASIC_FEATURES |
1876				      SUPPORTED_Pause |
1877				      SUPPORTED_Asym_Pause);
1878		break;
1879	default:
1880		phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1881		return -EINVAL;
1882	}
1883
1884	tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1885
1886	phydev->advertising = phydev->supported;
1887
1888	return 0;
1889}
1890
1891static void tg3_phy_start(struct tg3 *tp)
1892{
1893	struct phy_device *phydev;
1894
1895	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1896		return;
1897
1898	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1899
1900	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1901		tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1902		phydev->speed = tp->link_config.speed;
1903		phydev->duplex = tp->link_config.duplex;
1904		phydev->autoneg = tp->link_config.autoneg;
1905		phydev->advertising = tp->link_config.advertising;
1906	}
1907
1908	phy_start(phydev);
1909
1910	phy_start_aneg(phydev);
1911}
1912
1913static void tg3_phy_stop(struct tg3 *tp)
1914{
1915	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1916		return;
1917
1918	phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1919}
1920
1921static void tg3_phy_fini(struct tg3 *tp)
1922{
1923	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1924		phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1925		tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1926	}
1927}
1928
1929static int tg3_phy_set_extloopbk(struct tg3 *tp)
1930{
1931	int err;
1932	u32 val;
1933
1934	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1935		return 0;
1936
1937	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1938		/* Cannot do read-modify-write on 5401 */
1939		err = tg3_phy_auxctl_write(tp,
1940					   MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1941					   MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1942					   0x4c20);
1943		goto done;
1944	}
1945
1946	err = tg3_phy_auxctl_read(tp,
1947				  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1948	if (err)
1949		return err;
1950
1951	val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1952	err = tg3_phy_auxctl_write(tp,
1953				   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1954
1955done:
1956	return err;
1957}
1958
1959static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1960{
1961	u32 phytest;
1962
1963	if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1964		u32 phy;
1965
1966		tg3_writephy(tp, MII_TG3_FET_TEST,
1967			     phytest | MII_TG3_FET_SHADOW_EN);
1968		if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1969			if (enable)
1970				phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1971			else
1972				phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1973			tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1974		}
1975		tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1976	}
1977}
1978
1979static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1980{
1981	u32 reg;
1982
1983	if (!tg3_flag(tp, 5705_PLUS) ||
1984	    (tg3_flag(tp, 5717_PLUS) &&
1985	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1986		return;
1987
1988	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1989		tg3_phy_fet_toggle_apd(tp, enable);
1990		return;
1991	}
1992
1993	reg = MII_TG3_MISC_SHDW_WREN |
1994	      MII_TG3_MISC_SHDW_SCR5_SEL |
1995	      MII_TG3_MISC_SHDW_SCR5_LPED |
1996	      MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1997	      MII_TG3_MISC_SHDW_SCR5_SDTL |
1998	      MII_TG3_MISC_SHDW_SCR5_C125OE;
1999	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2000		reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2001
2002	tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2003
2004
2005	reg = MII_TG3_MISC_SHDW_WREN |
2006	      MII_TG3_MISC_SHDW_APD_SEL |
2007	      MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2008	if (enable)
2009		reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2010
2011	tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2012}
2013
2014static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2015{
2016	u32 phy;
2017
2018	if (!tg3_flag(tp, 5705_PLUS) ||
2019	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2020		return;
2021
2022	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2023		u32 ephy;
2024
2025		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2026			u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2027
2028			tg3_writephy(tp, MII_TG3_FET_TEST,
2029				     ephy | MII_TG3_FET_SHADOW_EN);
2030			if (!tg3_readphy(tp, reg, &phy)) {
2031				if (enable)
2032					phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2033				else
2034					phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2035				tg3_writephy(tp, reg, phy);
2036			}
2037			tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2038		}
2039	} else {
2040		int ret;
2041
2042		ret = tg3_phy_auxctl_read(tp,
2043					  MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2044		if (!ret) {
2045			if (enable)
2046				phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2047			else
2048				phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2049			tg3_phy_auxctl_write(tp,
2050					     MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2051		}
2052	}
2053}
2054
2055static void tg3_phy_set_wirespeed(struct tg3 *tp)
2056{
2057	int ret;
2058	u32 val;
2059
2060	if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2061		return;
2062
2063	ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2064	if (!ret)
2065		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2066				     val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2067}
2068
2069static void tg3_phy_apply_otp(struct tg3 *tp)
2070{
2071	u32 otp, phy;
2072
2073	if (!tp->phy_otp)
2074		return;
2075
2076	otp = tp->phy_otp;
2077
2078	if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2079		return;
2080
2081	phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2082	phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2083	tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2084
2085	phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2086	      ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2087	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2088
2089	phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2090	phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2091	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2092
2093	phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2094	tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2095
2096	phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2097	tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2098
2099	phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2100	      ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2101	tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2102
2103	TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2104}
2105
2106static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2107{
2108	u32 val;
2109
2110	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2111		return;
2112
2113	tp->setlpicnt = 0;
2114
2115	if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2116	    current_link_up == 1 &&
2117	    tp->link_config.active_duplex == DUPLEX_FULL &&
2118	    (tp->link_config.active_speed == SPEED_100 ||
2119	     tp->link_config.active_speed == SPEED_1000)) {
2120		u32 eeectl;
2121
2122		if (tp->link_config.active_speed == SPEED_1000)
2123			eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2124		else
2125			eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2126
2127		tw32(TG3_CPMU_EEE_CTRL, eeectl);
2128
2129		tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2130				  TG3_CL45_D7_EEERES_STAT, &val);
2131
2132		if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2133		    val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2134			tp->setlpicnt = 2;
2135	}
2136
2137	if (!tp->setlpicnt) {
2138		if (current_link_up == 1 &&
2139		   !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2140			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2141			TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2142		}
2143
2144		val = tr32(TG3_CPMU_EEE_MODE);
2145		tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2146	}
2147}
2148
2149static void tg3_phy_eee_enable(struct tg3 *tp)
2150{
2151	u32 val;
2152
2153	if (tp->link_config.active_speed == SPEED_1000 &&
2154	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2155	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2156	     tg3_flag(tp, 57765_CLASS)) &&
2157	    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2158		val = MII_TG3_DSP_TAP26_ALNOKO |
2159		      MII_TG3_DSP_TAP26_RMRXSTO;
2160		tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2161		TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2162	}
2163
2164	val = tr32(TG3_CPMU_EEE_MODE);
2165	tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2166}
2167
2168static int tg3_wait_macro_done(struct tg3 *tp)
2169{
2170	int limit = 100;
2171
2172	while (limit--) {
2173		u32 tmp32;
2174
2175		if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2176			if ((tmp32 & 0x1000) == 0)
2177				break;
2178		}
2179	}
2180	if (limit < 0)
2181		return -EBUSY;
2182
2183	return 0;
2184}
2185
2186static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2187{
2188	static const u32 test_pat[4][6] = {
2189	{ 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2190	{ 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2191	{ 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2192	{ 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2193	};
2194	int chan;
2195
2196	for (chan = 0; chan < 4; chan++) {
2197		int i;
2198
2199		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2200			     (chan * 0x2000) | 0x0200);
2201		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2202
2203		for (i = 0; i < 6; i++)
2204			tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2205				     test_pat[chan][i]);
2206
2207		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2208		if (tg3_wait_macro_done(tp)) {
2209			*resetp = 1;
2210			return -EBUSY;
2211		}
2212
2213		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2214			     (chan * 0x2000) | 0x0200);
2215		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2216		if (tg3_wait_macro_done(tp)) {
2217			*resetp = 1;
2218			return -EBUSY;
2219		}
2220
2221		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2222		if (tg3_wait_macro_done(tp)) {
2223			*resetp = 1;
2224			return -EBUSY;
2225		}
2226
2227		for (i = 0; i < 6; i += 2) {
2228			u32 low, high;
2229
2230			if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2231			    tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2232			    tg3_wait_macro_done(tp)) {
2233				*resetp = 1;
2234				return -EBUSY;
2235			}
2236			low &= 0x7fff;
2237			high &= 0x000f;
2238			if (low != test_pat[chan][i] ||
2239			    high != test_pat[chan][i+1]) {
2240				tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2241				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2242				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2243
2244				return -EBUSY;
2245			}
2246		}
2247	}
2248
2249	return 0;
2250}
2251
2252static int tg3_phy_reset_chanpat(struct tg3 *tp)
2253{
2254	int chan;
2255
2256	for (chan = 0; chan < 4; chan++) {
2257		int i;
2258
2259		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2260			     (chan * 0x2000) | 0x0200);
2261		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2262		for (i = 0; i < 6; i++)
2263			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2264		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2265		if (tg3_wait_macro_done(tp))
2266			return -EBUSY;
2267	}
2268
2269	return 0;
2270}
2271
2272static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2273{
2274	u32 reg32, phy9_orig;
2275	int retries, do_phy_reset, err;
2276
2277	retries = 10;
2278	do_phy_reset = 1;
2279	do {
2280		if (do_phy_reset) {
2281			err = tg3_bmcr_reset(tp);
2282			if (err)
2283				return err;
2284			do_phy_reset = 0;
2285		}
2286
2287		/* Disable transmitter and interrupt.  */
2288		if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2289			continue;
2290
2291		reg32 |= 0x3000;
2292		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2293
2294		/* Set full-duplex, 1000 mbps.  */
2295		tg3_writephy(tp, MII_BMCR,
2296			     BMCR_FULLDPLX | BMCR_SPEED1000);
2297
2298		/* Set to master mode.  */
2299		if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2300			continue;
2301
2302		tg3_writephy(tp, MII_CTRL1000,
2303			     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2304
2305		err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2306		if (err)
2307			return err;
2308
2309		/* Block the PHY control access.  */
2310		tg3_phydsp_write(tp, 0x8005, 0x0800);
2311
2312		err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2313		if (!err)
2314			break;
2315	} while (--retries);
2316
2317	err = tg3_phy_reset_chanpat(tp);
2318	if (err)
2319		return err;
2320
2321	tg3_phydsp_write(tp, 0x8005, 0x0000);
2322
2323	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2324	tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2325
2326	TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2327
2328	tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2329
2330	if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2331		reg32 &= ~0x3000;
2332		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2333	} else if (!err)
2334		err = -EBUSY;
2335
2336	return err;
2337}
2338
2339/* This will reset the tigon3 PHY if there is no valid
2340 * link unless the FORCE argument is non-zero.
2341 */
2342static int tg3_phy_reset(struct tg3 *tp)
2343{
2344	u32 val, cpmuctrl;
2345	int err;
2346
2347	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2348		val = tr32(GRC_MISC_CFG);
2349		tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2350		udelay(40);
2351	}
2352	err  = tg3_readphy(tp, MII_BMSR, &val);
2353	err |= tg3_readphy(tp, MII_BMSR, &val);
2354	if (err != 0)
2355		return -EBUSY;
2356
2357	if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2358		netif_carrier_off(tp->dev);
2359		tg3_link_report(tp);
2360	}
2361
2362	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2363	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2364	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2365		err = tg3_phy_reset_5703_4_5(tp);
2366		if (err)
2367			return err;
2368		goto out;
2369	}
2370
2371	cpmuctrl = 0;
2372	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2373	    GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2374		cpmuctrl = tr32(TG3_CPMU_CTRL);
2375		if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2376			tw32(TG3_CPMU_CTRL,
2377			     cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2378	}
2379
2380	err = tg3_bmcr_reset(tp);
2381	if (err)
2382		return err;
2383
2384	if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2385		val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2386		tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2387
2388		tw32(TG3_CPMU_CTRL, cpmuctrl);
2389	}
2390
2391	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2392	    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2393		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2394		if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2395		    CPMU_LSPD_1000MB_MACCLK_12_5) {
2396			val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2397			udelay(40);
2398			tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2399		}
2400	}
2401
2402	if (tg3_flag(tp, 5717_PLUS) &&
2403	    (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2404		return 0;
2405
2406	tg3_phy_apply_otp(tp);
2407
2408	if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2409		tg3_phy_toggle_apd(tp, true);
2410	else
2411		tg3_phy_toggle_apd(tp, false);
2412
2413out:
2414	if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2415	    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2416		tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2417		tg3_phydsp_write(tp, 0x000a, 0x0323);
2418		TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2419	}
2420
2421	if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2422		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2423		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2424	}
2425
2426	if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2427		if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2428			tg3_phydsp_write(tp, 0x000a, 0x310b);
2429			tg3_phydsp_write(tp, 0x201f, 0x9506);
2430			tg3_phydsp_write(tp, 0x401f, 0x14e2);
2431			TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2432		}
2433	} else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2434		if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2435			tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2436			if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2437				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2438				tg3_writephy(tp, MII_TG3_TEST1,
2439					     MII_TG3_TEST1_TRIM_EN | 0x4);
2440			} else
2441				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2442
2443			TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2444		}
2445	}
2446
2447	/* Set Extended packet length bit (bit 14) on all chips that */
2448	/* support jumbo frames */
2449	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2450		/* Cannot do read-modify-write on 5401 */
2451		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2452	} else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2453		/* Set bit 14 with read-modify-write to preserve other bits */
2454		err = tg3_phy_auxctl_read(tp,
2455					  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2456		if (!err)
2457			tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2458					   val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2459	}
2460
2461	/* Set phy register 0x10 bit 0 to high fifo elasticity to support
2462	 * jumbo frames transmission.
2463	 */
2464	if (tg3_flag(tp, JUMBO_CAPABLE)) {
2465		if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2466			tg3_writephy(tp, MII_TG3_EXT_CTRL,
2467				     val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2468	}
2469
2470	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2471		/* adjust output voltage */
2472		tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2473	}
2474
2475	tg3_phy_toggle_automdix(tp, 1);
2476	tg3_phy_set_wirespeed(tp);
2477	return 0;
2478}
2479
2480#define TG3_GPIO_MSG_DRVR_PRES		 0x00000001
2481#define TG3_GPIO_MSG_NEED_VAUX		 0x00000002
2482#define TG3_GPIO_MSG_MASK		 (TG3_GPIO_MSG_DRVR_PRES | \
2483					  TG3_GPIO_MSG_NEED_VAUX)
2484#define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2485	((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2486	 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2487	 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2488	 (TG3_GPIO_MSG_DRVR_PRES << 12))
2489
2490#define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2491	((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2492	 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2493	 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2494	 (TG3_GPIO_MSG_NEED_VAUX << 12))
2495
2496static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2497{
2498	u32 status, shift;
2499
2500	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2501	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2502		status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2503	else
2504		status = tr32(TG3_CPMU_DRV_STATUS);
2505
2506	shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2507	status &= ~(TG3_GPIO_MSG_MASK << shift);
2508	status |= (newstat << shift);
2509
2510	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2511	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2512		tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2513	else
2514		tw32(TG3_CPMU_DRV_STATUS, status);
2515
2516	return status >> TG3_APE_GPIO_MSG_SHIFT;
2517}
2518
2519static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2520{
2521	if (!tg3_flag(tp, IS_NIC))
2522		return 0;
2523
2524	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2525	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2526	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2527		if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2528			return -EIO;
2529
2530		tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2531
2532		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2533			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2534
2535		tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2536	} else {
2537		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2538			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2539	}
2540
2541	return 0;
2542}
2543
2544static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2545{
2546	u32 grc_local_ctrl;
2547
2548	if (!tg3_flag(tp, IS_NIC) ||
2549	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2550	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2551		return;
2552
2553	grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2554
2555	tw32_wait_f(GRC_LOCAL_CTRL,
2556		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2557		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2558
2559	tw32_wait_f(GRC_LOCAL_CTRL,
2560		    grc_local_ctrl,
2561		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2562
2563	tw32_wait_f(GRC_LOCAL_CTRL,
2564		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2565		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2566}
2567
2568static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2569{
2570	if (!tg3_flag(tp, IS_NIC))
2571		return;
2572
2573	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2574	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2575		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2576			    (GRC_LCLCTRL_GPIO_OE0 |
2577			     GRC_LCLCTRL_GPIO_OE1 |
2578			     GRC_LCLCTRL_GPIO_OE2 |
2579			     GRC_LCLCTRL_GPIO_OUTPUT0 |
2580			     GRC_LCLCTRL_GPIO_OUTPUT1),
2581			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2582	} else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2583		   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2584		/* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2585		u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2586				     GRC_LCLCTRL_GPIO_OE1 |
2587				     GRC_LCLCTRL_GPIO_OE2 |
2588				     GRC_LCLCTRL_GPIO_OUTPUT0 |
2589				     GRC_LCLCTRL_GPIO_OUTPUT1 |
2590				     tp->grc_local_ctrl;
2591		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2592			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2593
2594		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2595		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2596			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2597
2598		grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2599		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2600			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2601	} else {
2602		u32 no_gpio2;
2603		u32 grc_local_ctrl = 0;
2604
2605		/* Workaround to prevent overdrawing Amps. */
2606		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2607			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2608			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2609				    grc_local_ctrl,
2610				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2611		}
2612
2613		/* On 5753 and variants, GPIO2 cannot be used. */
2614		no_gpio2 = tp->nic_sram_data_cfg &
2615			   NIC_SRAM_DATA_CFG_NO_GPIO2;
2616
2617		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2618				  GRC_LCLCTRL_GPIO_OE1 |
2619				  GRC_LCLCTRL_GPIO_OE2 |
2620				  GRC_LCLCTRL_GPIO_OUTPUT1 |
2621				  GRC_LCLCTRL_GPIO_OUTPUT2;
2622		if (no_gpio2) {
2623			grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2624					    GRC_LCLCTRL_GPIO_OUTPUT2);
2625		}
2626		tw32_wait_f(GRC_LOCAL_CTRL,
2627			    tp->grc_local_ctrl | grc_local_ctrl,
2628			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2629
2630		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2631
2632		tw32_wait_f(GRC_LOCAL_CTRL,
2633			    tp->grc_local_ctrl | grc_local_ctrl,
2634			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2635
2636		if (!no_gpio2) {
2637			grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2638			tw32_wait_f(GRC_LOCAL_CTRL,
2639				    tp->grc_local_ctrl | grc_local_ctrl,
2640				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2641		}
2642	}
2643}
2644
2645static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2646{
2647	u32 msg = 0;
2648
2649	/* Serialize power state transitions */
2650	if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2651		return;
2652
2653	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2654		msg = TG3_GPIO_MSG_NEED_VAUX;
2655
2656	msg = tg3_set_function_status(tp, msg);
2657
2658	if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2659		goto done;
2660
2661	if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2662		tg3_pwrsrc_switch_to_vaux(tp);
2663	else
2664		tg3_pwrsrc_die_with_vmain(tp);
2665
2666done:
2667	tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2668}
2669
2670static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2671{
2672	bool need_vaux = false;
2673
2674	/* The GPIOs do something completely different on 57765. */
2675	if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2676		return;
2677
2678	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2679	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2680	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2681		tg3_frob_aux_power_5717(tp, include_wol ?
2682					tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2683		return;
2684	}
2685
2686	if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2687		struct net_device *dev_peer;
2688
2689		dev_peer = pci_get_drvdata(tp->pdev_peer);
2690
2691		/* remove_one() may have been run on the peer. */
2692		if (dev_peer) {
2693			struct tg3 *tp_peer = netdev_priv(dev_peer);
2694
2695			if (tg3_flag(tp_peer, INIT_COMPLETE))
2696				return;
2697
2698			if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2699			    tg3_flag(tp_peer, ENABLE_ASF))
2700				need_vaux = true;
2701		}
2702	}
2703
2704	if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2705	    tg3_flag(tp, ENABLE_ASF))
2706		need_vaux = true;
2707
2708	if (need_vaux)
2709		tg3_pwrsrc_switch_to_vaux(tp);
2710	else
2711		tg3_pwrsrc_die_with_vmain(tp);
2712}
2713
2714static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2715{
2716	if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2717		return 1;
2718	else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2719		if (speed != SPEED_10)
2720			return 1;
2721	} else if (speed == SPEED_10)
2722		return 1;
2723
2724	return 0;
2725}
2726
2727static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2728{
2729	u32 val;
2730
2731	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2732		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2733			u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2734			u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2735
2736			sg_dig_ctrl |=
2737				SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2738			tw32(SG_DIG_CTRL, sg_dig_ctrl);
2739			tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2740		}
2741		return;
2742	}
2743
2744	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2745		tg3_bmcr_reset(tp);
2746		val = tr32(GRC_MISC_CFG);
2747		tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2748		udelay(40);
2749		return;
2750	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2751		u32 phytest;
2752		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2753			u32 phy;
2754
2755			tg3_writephy(tp, MII_ADVERTISE, 0);
2756			tg3_writephy(tp, MII_BMCR,
2757				     BMCR_ANENABLE | BMCR_ANRESTART);
2758
2759			tg3_writephy(tp, MII_TG3_FET_TEST,
2760				     phytest | MII_TG3_FET_SHADOW_EN);
2761			if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2762				phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2763				tg3_writephy(tp,
2764					     MII_TG3_FET_SHDW_AUXMODE4,
2765					     phy);
2766			}
2767			tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2768		}
2769		return;
2770	} else if (do_low_power) {
2771		tg3_writephy(tp, MII_TG3_EXT_CTRL,
2772			     MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2773
2774		val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2775		      MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2776		      MII_TG3_AUXCTL_PCTL_VREG_11V;
2777		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2778	}
2779
2780	/* The PHY should not be powered down on some chips because
2781	 * of bugs.
2782	 */
2783	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2784	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2785	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2786	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2787	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
2788	     !tp->pci_fn))
2789		return;
2790
2791	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2792	    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2793		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2794		val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2795		val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2796		tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2797	}
2798
2799	tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2800}
2801
2802/* tp->lock is held. */
2803static int tg3_nvram_lock(struct tg3 *tp)
2804{
2805	if (tg3_flag(tp, NVRAM)) {
2806		int i;
2807
2808		if (tp->nvram_lock_cnt == 0) {
2809			tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2810			for (i = 0; i < 8000; i++) {
2811				if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2812					break;
2813				udelay(20);
2814			}
2815			if (i == 8000) {
2816				tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2817				return -ENODEV;
2818			}
2819		}
2820		tp->nvram_lock_cnt++;
2821	}
2822	return 0;
2823}
2824
2825/* tp->lock is held. */
2826static void tg3_nvram_unlock(struct tg3 *tp)
2827{
2828	if (tg3_flag(tp, NVRAM)) {
2829		if (tp->nvram_lock_cnt > 0)
2830			tp->nvram_lock_cnt--;
2831		if (tp->nvram_lock_cnt == 0)
2832			tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2833	}
2834}
2835
2836/* tp->lock is held. */
2837static void tg3_enable_nvram_access(struct tg3 *tp)
2838{
2839	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2840		u32 nvaccess = tr32(NVRAM_ACCESS);
2841
2842		tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2843	}
2844}
2845
2846/* tp->lock is held. */
2847static void tg3_disable_nvram_access(struct tg3 *tp)
2848{
2849	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2850		u32 nvaccess = tr32(NVRAM_ACCESS);
2851
2852		tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2853	}
2854}
2855
2856static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2857					u32 offset, u32 *val)
2858{
2859	u32 tmp;
2860	int i;
2861
2862	if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2863		return -EINVAL;
2864
2865	tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2866					EEPROM_ADDR_DEVID_MASK |
2867					EEPROM_ADDR_READ);
2868	tw32(GRC_EEPROM_ADDR,
2869	     tmp |
2870	     (0 << EEPROM_ADDR_DEVID_SHIFT) |
2871	     ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2872	      EEPROM_ADDR_ADDR_MASK) |
2873	     EEPROM_ADDR_READ | EEPROM_ADDR_START);
2874
2875	for (i = 0; i < 1000; i++) {
2876		tmp = tr32(GRC_EEPROM_ADDR);
2877
2878		if (tmp & EEPROM_ADDR_COMPLETE)
2879			break;
2880		msleep(1);
2881	}
2882	if (!(tmp & EEPROM_ADDR_COMPLETE))
2883		return -EBUSY;
2884
2885	tmp = tr32(GRC_EEPROM_DATA);
2886
2887	/*
2888	 * The data will always be opposite the native endian
2889	 * format.  Perform a blind byteswap to compensate.
2890	 */
2891	*val = swab32(tmp);
2892
2893	return 0;
2894}
2895
2896#define NVRAM_CMD_TIMEOUT 10000
2897
2898static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2899{
2900	int i;
2901
2902	tw32(NVRAM_CMD, nvram_cmd);
2903	for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2904		udelay(10);
2905		if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2906			udelay(10);
2907			break;
2908		}
2909	}
2910
2911	if (i == NVRAM_CMD_TIMEOUT)
2912		return -EBUSY;
2913
2914	return 0;
2915}
2916
2917static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2918{
2919	if (tg3_flag(tp, NVRAM) &&
2920	    tg3_flag(tp, NVRAM_BUFFERED) &&
2921	    tg3_flag(tp, FLASH) &&
2922	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2923	    (tp->nvram_jedecnum == JEDEC_ATMEL))
2924
2925		addr = ((addr / tp->nvram_pagesize) <<
2926			ATMEL_AT45DB0X1B_PAGE_POS) +
2927		       (addr % tp->nvram_pagesize);
2928
2929	return addr;
2930}
2931
2932static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2933{
2934	if (tg3_flag(tp, NVRAM) &&
2935	    tg3_flag(tp, NVRAM_BUFFERED) &&
2936	    tg3_flag(tp, FLASH) &&
2937	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2938	    (tp->nvram_jedecnum == JEDEC_ATMEL))
2939
2940		addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2941			tp->nvram_pagesize) +
2942		       (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2943
2944	return addr;
2945}
2946
2947/* NOTE: Data read in from NVRAM is byteswapped according to
2948 * the byteswapping settings for all other register accesses.
2949 * tg3 devices are BE devices, so on a BE machine, the data
2950 * returned will be exactly as it is seen in NVRAM.  On a LE
2951 * machine, the 32-bit value will be byteswapped.
2952 */
2953static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2954{
2955	int ret;
2956
2957	if (!tg3_flag(tp, NVRAM))
2958		return tg3_nvram_read_using_eeprom(tp, offset, val);
2959
2960	offset = tg3_nvram_phys_addr(tp, offset);
2961
2962	if (offset > NVRAM_ADDR_MSK)
2963		return -EINVAL;
2964
2965	ret = tg3_nvram_lock(tp);
2966	if (ret)
2967		return ret;
2968
2969	tg3_enable_nvram_access(tp);
2970
2971	tw32(NVRAM_ADDR, offset);
2972	ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2973		NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2974
2975	if (ret == 0)
2976		*val = tr32(NVRAM_RDDATA);
2977
2978	tg3_disable_nvram_access(tp);
2979
2980	tg3_nvram_unlock(tp);
2981
2982	return ret;
2983}
2984
2985/* Ensures NVRAM data is in bytestream format. */
2986static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2987{
2988	u32 v;
2989	int res = tg3_nvram_read(tp, offset, &v);
2990	if (!res)
2991		*val = cpu_to_be32(v);
2992	return res;
2993}
2994
2995static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
2996				    u32 offset, u32 len, u8 *buf)
2997{
2998	int i, j, rc = 0;
2999	u32 val;
3000
3001	for (i = 0; i < len; i += 4) {
3002		u32 addr;
3003		__be32 data;
3004
3005		addr = offset + i;
3006
3007		memcpy(&data, buf + i, 4);
3008
3009		/*
3010		 * The SEEPROM interface expects the data to always be opposite
3011		 * the native endian format.  We accomplish this by reversing
3012		 * all the operations that would have been performed on the
3013		 * data from a call to tg3_nvram_read_be32().
3014		 */
3015		tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3016
3017		val = tr32(GRC_EEPROM_ADDR);
3018		tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3019
3020		val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3021			EEPROM_ADDR_READ);
3022		tw32(GRC_EEPROM_ADDR, val |
3023			(0 << EEPROM_ADDR_DEVID_SHIFT) |
3024			(addr & EEPROM_ADDR_ADDR_MASK) |
3025			EEPROM_ADDR_START |
3026			EEPROM_ADDR_WRITE);
3027
3028		for (j = 0; j < 1000; j++) {
3029			val = tr32(GRC_EEPROM_ADDR);
3030
3031			if (val & EEPROM_ADDR_COMPLETE)
3032				break;
3033			msleep(1);
3034		}
3035		if (!(val & EEPROM_ADDR_COMPLETE)) {
3036			rc = -EBUSY;
3037			break;
3038		}
3039	}
3040
3041	return rc;
3042}
3043
3044/* offset and length are dword aligned */
3045static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3046		u8 *buf)
3047{
3048	int ret = 0;
3049	u32 pagesize = tp->nvram_pagesize;
3050	u32 pagemask = pagesize - 1;
3051	u32 nvram_cmd;
3052	u8 *tmp;
3053
3054	tmp = kmalloc(pagesize, GFP_KERNEL);
3055	if (tmp == NULL)
3056		return -ENOMEM;
3057
3058	while (len) {
3059		int j;
3060		u32 phy_addr, page_off, size;
3061
3062		phy_addr = offset & ~pagemask;
3063
3064		for (j = 0; j < pagesize; j += 4) {
3065			ret = tg3_nvram_read_be32(tp, phy_addr + j,
3066						  (__be32 *) (tmp + j));
3067			if (ret)
3068				break;
3069		}
3070		if (ret)
3071			break;
3072
3073		page_off = offset & pagemask;
3074		size = pagesize;
3075		if (len < size)
3076			size = len;
3077
3078		len -= size;
3079
3080		memcpy(tmp + page_off, buf, size);
3081
3082		offset = offset + (pagesize - page_off);
3083
3084		tg3_enable_nvram_access(tp);
3085
3086		/*
3087		 * Before we can erase the flash page, we need
3088		 * to issue a special "write enable" command.
3089		 */
3090		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3091
3092		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3093			break;
3094
3095		/* Erase the target page */
3096		tw32(NVRAM_ADDR, phy_addr);
3097
3098		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3099			NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3100
3101		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3102			break;
3103
3104		/* Issue another write enable to start the write. */
3105		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3106
3107		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3108			break;
3109
3110		for (j = 0; j < pagesize; j += 4) {
3111			__be32 data;
3112
3113			data = *((__be32 *) (tmp + j));
3114
3115			tw32(NVRAM_WRDATA, be32_to_cpu(data));
3116
3117			tw32(NVRAM_ADDR, phy_addr + j);
3118
3119			nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3120				NVRAM_CMD_WR;
3121
3122			if (j == 0)
3123				nvram_cmd |= NVRAM_CMD_FIRST;
3124			else if (j == (pagesize - 4))
3125				nvram_cmd |= NVRAM_CMD_LAST;
3126
3127			ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3128			if (ret)
3129				break;
3130		}
3131		if (ret)
3132			break;
3133	}
3134
3135	nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3136	tg3_nvram_exec_cmd(tp, nvram_cmd);
3137
3138	kfree(tmp);
3139
3140	return ret;
3141}
3142
3143/* offset and length are dword aligned */
3144static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3145		u8 *buf)
3146{
3147	int i, ret = 0;
3148
3149	for (i = 0; i < len; i += 4, offset += 4) {
3150		u32 page_off, phy_addr, nvram_cmd;
3151		__be32 data;
3152
3153		memcpy(&data, buf + i, 4);
3154		tw32(NVRAM_WRDATA, be32_to_cpu(data));
3155
3156		page_off = offset % tp->nvram_pagesize;
3157
3158		phy_addr = tg3_nvram_phys_addr(tp, offset);
3159
3160		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3161
3162		if (page_off == 0 || i == 0)
3163			nvram_cmd |= NVRAM_CMD_FIRST;
3164		if (page_off == (tp->nvram_pagesize - 4))
3165			nvram_cmd |= NVRAM_CMD_LAST;
3166
3167		if (i == (len - 4))
3168			nvram_cmd |= NVRAM_CMD_LAST;
3169
3170		if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3171		    !tg3_flag(tp, FLASH) ||
3172		    !tg3_flag(tp, 57765_PLUS))
3173			tw32(NVRAM_ADDR, phy_addr);
3174
3175		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3176		    !tg3_flag(tp, 5755_PLUS) &&
3177		    (tp->nvram_jedecnum == JEDEC_ST) &&
3178		    (nvram_cmd & NVRAM_CMD_FIRST)) {
3179			u32 cmd;
3180
3181			cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3182			ret = tg3_nvram_exec_cmd(tp, cmd);
3183			if (ret)
3184				break;
3185		}
3186		if (!tg3_flag(tp, FLASH)) {
3187			/* We always do complete word writes to eeprom. */
3188			nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3189		}
3190
3191		ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3192		if (ret)
3193			break;
3194	}
3195	return ret;
3196}
3197
3198/* offset and length are dword aligned */
3199static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3200{
3201	int ret;
3202
3203	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3204		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3205		       ~GRC_LCLCTRL_GPIO_OUTPUT1);
3206		udelay(40);
3207	}
3208
3209	if (!tg3_flag(tp, NVRAM)) {
3210		ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3211	} else {
3212		u32 grc_mode;
3213
3214		ret = tg3_nvram_lock(tp);
3215		if (ret)
3216			return ret;
3217
3218		tg3_enable_nvram_access(tp);
3219		if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3220			tw32(NVRAM_WRITE1, 0x406);
3221
3222		grc_mode = tr32(GRC_MODE);
3223		tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3224
3225		if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3226			ret = tg3_nvram_write_block_buffered(tp, offset, len,
3227				buf);
3228		} else {
3229			ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3230				buf);
3231		}
3232
3233		grc_mode = tr32(GRC_MODE);
3234		tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3235
3236		tg3_disable_nvram_access(tp);
3237		tg3_nvram_unlock(tp);
3238	}
3239
3240	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3241		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3242		udelay(40);
3243	}
3244
3245	return ret;
3246}
3247
3248#define RX_CPU_SCRATCH_BASE	0x30000
3249#define RX_CPU_SCRATCH_SIZE	0x04000
3250#define TX_CPU_SCRATCH_BASE	0x34000
3251#define TX_CPU_SCRATCH_SIZE	0x04000
3252
3253/* tp->lock is held. */
3254static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3255{
3256	int i;
3257
3258	BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3259
3260	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3261		u32 val = tr32(GRC_VCPU_EXT_CTRL);
3262
3263		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3264		return 0;
3265	}
3266	if (offset == RX_CPU_BASE) {
3267		for (i = 0; i < 10000; i++) {
3268			tw32(offset + CPU_STATE, 0xffffffff);
3269			tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3270			if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3271				break;
3272		}
3273
3274		tw32(offset + CPU_STATE, 0xffffffff);
3275		tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3276		udelay(10);
3277	} else {
3278		for (i = 0; i < 10000; i++) {
3279			tw32(offset + CPU_STATE, 0xffffffff);
3280			tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3281			if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3282				break;
3283		}
3284	}
3285
3286	if (i >= 10000) {
3287		netdev_err(tp->dev, "%s timed out, %s CPU\n",
3288			   __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3289		return -ENODEV;
3290	}
3291
3292	/* Clear firmware's nvram arbitration. */
3293	if (tg3_flag(tp, NVRAM))
3294		tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3295	return 0;
3296}
3297
3298struct fw_info {
3299	unsigned int fw_base;
3300	unsigned int fw_len;
3301	const __be32 *fw_data;
3302};
3303
3304/* tp->lock is held. */
3305static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3306				 u32 cpu_scratch_base, int cpu_scratch_size,
3307				 struct fw_info *info)
3308{
3309	int err, lock_err, i;
3310	void (*write_op)(struct tg3 *, u32, u32);
3311
3312	if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3313		netdev_err(tp->dev,
3314			   "%s: Trying to load TX cpu firmware which is 5705\n",
3315			   __func__);
3316		return -EINVAL;
3317	}
3318
3319	if (tg3_flag(tp, 5705_PLUS))
3320		write_op = tg3_write_mem;
3321	else
3322		write_op = tg3_write_indirect_reg32;
3323
3324	/* It is possible that bootcode is still loading at this point.
3325	 * Get the nvram lock first before halting the cpu.
3326	 */
3327	lock_err = tg3_nvram_lock(tp);
3328	err = tg3_halt_cpu(tp, cpu_base);
3329	if (!lock_err)
3330		tg3_nvram_unlock(tp);
3331	if (err)
3332		goto out;
3333
3334	for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3335		write_op(tp, cpu_scratch_base + i, 0);
3336	tw32(cpu_base + CPU_STATE, 0xffffffff);
3337	tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3338	for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3339		write_op(tp, (cpu_scratch_base +
3340			      (info->fw_base & 0xffff) +
3341			      (i * sizeof(u32))),
3342			      be32_to_cpu(info->fw_data[i]));
3343
3344	err = 0;
3345
3346out:
3347	return err;
3348}
3349
3350/* tp->lock is held. */
3351static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3352{
3353	struct fw_info info;
3354	const __be32 *fw_data;
3355	int err, i;
3356
3357	fw_data = (void *)tp->fw->data;
3358
3359	/* Firmware blob starts with version numbers, followed by
3360	   start address and length. We are setting complete length.
3361	   length = end_address_of_bss - start_address_of_text.
3362	   Remainder is the blob to be loaded contiguously
3363	   from start address. */
3364
3365	info.fw_base = be32_to_cpu(fw_data[1]);
3366	info.fw_len = tp->fw->size - 12;
3367	info.fw_data = &fw_data[3];
3368
3369	err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3370				    RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3371				    &info);
3372	if (err)
3373		return err;
3374
3375	err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3376				    TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3377				    &info);
3378	if (err)
3379		return err;
3380
3381	/* Now startup only the RX cpu. */
3382	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3383	tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3384
3385	for (i = 0; i < 5; i++) {
3386		if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3387			break;
3388		tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3389		tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3390		tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3391		udelay(1000);
3392	}
3393	if (i >= 5) {
3394		netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3395			   "should be %08x\n", __func__,
3396			   tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3397		return -ENODEV;
3398	}
3399	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3400	tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3401
3402	return 0;
3403}
3404
3405/* tp->lock is held. */
3406static int tg3_load_tso_firmware(struct tg3 *tp)
3407{
3408	struct fw_info info;
3409	const __be32 *fw_data;
3410	unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3411	int err, i;
3412
3413	if (tg3_flag(tp, HW_TSO_1) ||
3414	    tg3_flag(tp, HW_TSO_2) ||
3415	    tg3_flag(tp, HW_TSO_3))
3416		return 0;
3417
3418	fw_data = (void *)tp->fw->data;
3419
3420	/* Firmware blob starts with version numbers, followed by
3421	   start address and length. We are setting complete length.
3422	   length = end_address_of_bss - start_address_of_text.
3423	   Remainder is the blob to be loaded contiguously
3424	   from start address. */
3425
3426	info.fw_base = be32_to_cpu(fw_data[1]);
3427	cpu_scratch_size = tp->fw_len;
3428	info.fw_len = tp->fw->size - 12;
3429	info.fw_data = &fw_data[3];
3430
3431	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3432		cpu_base = RX_CPU_BASE;
3433		cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3434	} else {
3435		cpu_base = TX_CPU_BASE;
3436		cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3437		cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3438	}
3439
3440	err = tg3_load_firmware_cpu(tp, cpu_base,
3441				    cpu_scratch_base, cpu_scratch_size,
3442				    &info);
3443	if (err)
3444		return err;
3445
3446	/* Now startup the cpu. */
3447	tw32(cpu_base + CPU_STATE, 0xffffffff);
3448	tw32_f(cpu_base + CPU_PC, info.fw_base);
3449
3450	for (i = 0; i < 5; i++) {
3451		if (tr32(cpu_base + CPU_PC) == info.fw_base)
3452			break;
3453		tw32(cpu_base + CPU_STATE, 0xffffffff);
3454		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3455		tw32_f(cpu_base + CPU_PC, info.fw_base);
3456		udelay(1000);
3457	}
3458	if (i >= 5) {
3459		netdev_err(tp->dev,
3460			   "%s fails to set CPU PC, is %08x should be %08x\n",
3461			   __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3462		return -ENODEV;
3463	}
3464	tw32(cpu_base + CPU_STATE, 0xffffffff);
3465	tw32_f(cpu_base + CPU_MODE,  0x00000000);
3466	return 0;
3467}
3468
3469
3470/* tp->lock is held. */
3471static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3472{
3473	u32 addr_high, addr_low;
3474	int i;
3475
3476	addr_high = ((tp->dev->dev_addr[0] << 8) |
3477		     tp->dev->dev_addr[1]);
3478	addr_low = ((tp->dev->dev_addr[2] << 24) |
3479		    (tp->dev->dev_addr[3] << 16) |
3480		    (tp->dev->dev_addr[4] <<  8) |
3481		    (tp->dev->dev_addr[5] <<  0));
3482	for (i = 0; i < 4; i++) {
3483		if (i == 1 && skip_mac_1)
3484			continue;
3485		tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3486		tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3487	}
3488
3489	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3490	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3491		for (i = 0; i < 12; i++) {
3492			tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3493			tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3494		}
3495	}
3496
3497	addr_high = (tp->dev->dev_addr[0] +
3498		     tp->dev->dev_addr[1] +
3499		     tp->dev->dev_addr[2] +
3500		     tp->dev->dev_addr[3] +
3501		     tp->dev->dev_addr[4] +
3502		     tp->dev->dev_addr[5]) &
3503		TX_BACKOFF_SEED_MASK;
3504	tw32(MAC_TX_BACKOFF_SEED, addr_high);
3505}
3506
3507static void tg3_enable_register_access(struct tg3 *tp)
3508{
3509	/*
3510	 * Make sure register accesses (indirect or otherwise) will function
3511	 * correctly.
3512	 */
3513	pci_write_config_dword(tp->pdev,
3514			       TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3515}
3516
3517static int tg3_power_up(struct tg3 *tp)
3518{
3519	int err;
3520
3521	tg3_enable_register_access(tp);
3522
3523	err = pci_set_power_state(tp->pdev, PCI_D0);
3524	if (!err) {
3525		/* Switch out of Vaux if it is a NIC */
3526		tg3_pwrsrc_switch_to_vmain(tp);
3527	} else {
3528		netdev_err(tp->dev, "Transition to D0 failed\n");
3529	}
3530
3531	return err;
3532}
3533
3534static int tg3_setup_phy(struct tg3 *, int);
3535
3536static int tg3_power_down_prepare(struct tg3 *tp)
3537{
3538	u32 misc_host_ctrl;
3539	bool device_should_wake, do_low_power;
3540
3541	tg3_enable_register_access(tp);
3542
3543	/* Restore the CLKREQ setting. */
3544	if (tg3_flag(tp, CLKREQ_BUG)) {
3545		u16 lnkctl;
3546
3547		pci_read_config_word(tp->pdev,
3548				     pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3549				     &lnkctl);
3550		lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3551		pci_write_config_word(tp->pdev,
3552				      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3553				      lnkctl);
3554	}
3555
3556	misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3557	tw32(TG3PCI_MISC_HOST_CTRL,
3558	     misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3559
3560	device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3561			     tg3_flag(tp, WOL_ENABLE);
3562
3563	if (tg3_flag(tp, USE_PHYLIB)) {
3564		do_low_power = false;
3565		if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3566		    !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3567			struct phy_device *phydev;
3568			u32 phyid, advertising;
3569
3570			phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3571
3572			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3573
3574			tp->link_config.speed = phydev->speed;
3575			tp->link_config.duplex = phydev->duplex;
3576			tp->link_config.autoneg = phydev->autoneg;
3577			tp->link_config.advertising = phydev->advertising;
3578
3579			advertising = ADVERTISED_TP |
3580				      ADVERTISED_Pause |
3581				      ADVERTISED_Autoneg |
3582				      ADVERTISED_10baseT_Half;
3583
3584			if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3585				if (tg3_flag(tp, WOL_SPEED_100MB))
3586					advertising |=
3587						ADVERTISED_100baseT_Half |
3588						ADVERTISED_100baseT_Full |
3589						ADVERTISED_10baseT_Full;
3590				else
3591					advertising |= ADVERTISED_10baseT_Full;
3592			}
3593
3594			phydev->advertising = advertising;
3595
3596			phy_start_aneg(phydev);
3597
3598			phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3599			if (phyid != PHY_ID_BCMAC131) {
3600				phyid &= PHY_BCM_OUI_MASK;
3601				if (phyid == PHY_BCM_OUI_1 ||
3602				    phyid == PHY_BCM_OUI_2 ||
3603				    phyid == PHY_BCM_OUI_3)
3604					do_low_power = true;
3605			}
3606		}
3607	} else {
3608		do_low_power = true;
3609
3610		if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3611			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3612
3613		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3614			tg3_setup_phy(tp, 0);
3615	}
3616
3617	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3618		u32 val;
3619
3620		val = tr32(GRC_VCPU_EXT_CTRL);
3621		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3622	} else if (!tg3_flag(tp, ENABLE_ASF)) {
3623		int i;
3624		u32 val;
3625
3626		for (i = 0; i < 200; i++) {
3627			tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3628			if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3629				break;
3630			msleep(1);
3631		}
3632	}
3633	if (tg3_flag(tp, WOL_CAP))
3634		tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3635						     WOL_DRV_STATE_SHUTDOWN |
3636						     WOL_DRV_WOL |
3637						     WOL_SET_MAGIC_PKT);
3638
3639	if (device_should_wake) {
3640		u32 mac_mode;
3641
3642		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3643			if (do_low_power &&
3644			    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3645				tg3_phy_auxctl_write(tp,
3646					       MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3647					       MII_TG3_AUXCTL_PCTL_WOL_EN |
3648					       MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3649					       MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3650				udelay(40);
3651			}
3652
3653			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3654				mac_mode = MAC_MODE_PORT_MODE_GMII;
3655			else
3656				mac_mode = MAC_MODE_PORT_MODE_MII;
3657
3658			mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3659			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3660			    ASIC_REV_5700) {
3661				u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3662					     SPEED_100 : SPEED_10;
3663				if (tg3_5700_link_polarity(tp, speed))
3664					mac_mode |= MAC_MODE_LINK_POLARITY;
3665				else
3666					mac_mode &= ~MAC_MODE_LINK_POLARITY;
3667			}
3668		} else {
3669			mac_mode = MAC_MODE_PORT_MODE_TBI;
3670		}
3671
3672		if (!tg3_flag(tp, 5750_PLUS))
3673			tw32(MAC_LED_CTRL, tp->led_ctrl);
3674
3675		mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3676		if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3677		    (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3678			mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3679
3680		if (tg3_flag(tp, ENABLE_APE))
3681			mac_mode |= MAC_MODE_APE_TX_EN |
3682				    MAC_MODE_APE_RX_EN |
3683				    MAC_MODE_TDE_ENABLE;
3684
3685		tw32_f(MAC_MODE, mac_mode);
3686		udelay(100);
3687
3688		tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3689		udelay(10);
3690	}
3691
3692	if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3693	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3694	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3695		u32 base_val;
3696
3697		base_val = tp->pci_clock_ctrl;
3698		base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3699			     CLOCK_CTRL_TXCLK_DISABLE);
3700
3701		tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3702			    CLOCK_CTRL_PWRDOWN_PLL133, 40);
3703	} else if (tg3_flag(tp, 5780_CLASS) ||
3704		   tg3_flag(tp, CPMU_PRESENT) ||
3705		   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3706		/* do nothing */
3707	} else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3708		u32 newbits1, newbits2;
3709
3710		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3711		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3712			newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3713				    CLOCK_CTRL_TXCLK_DISABLE |
3714				    CLOCK_CTRL_ALTCLK);
3715			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3716		} else if (tg3_flag(tp, 5705_PLUS)) {
3717			newbits1 = CLOCK_CTRL_625_CORE;
3718			newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3719		} else {
3720			newbits1 = CLOCK_CTRL_ALTCLK;
3721			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3722		}
3723
3724		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3725			    40);
3726
3727		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3728			    40);
3729
3730		if (!tg3_flag(tp, 5705_PLUS)) {
3731			u32 newbits3;
3732
3733			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3734			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3735				newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3736					    CLOCK_CTRL_TXCLK_DISABLE |
3737					    CLOCK_CTRL_44MHZ_CORE);
3738			} else {
3739				newbits3 = CLOCK_CTRL_44MHZ_CORE;
3740			}
3741
3742			tw32_wait_f(TG3PCI_CLOCK_CTRL,
3743				    tp->pci_clock_ctrl | newbits3, 40);
3744		}
3745	}
3746
3747	if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3748		tg3_power_down_phy(tp, do_low_power);
3749
3750	tg3_frob_aux_power(tp, true);
3751
3752	/* Workaround for unstable PLL clock */
3753	if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3754	    (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3755		u32 val = tr32(0x7d00);
3756
3757		val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3758		tw32(0x7d00, val);
3759		if (!tg3_flag(tp, ENABLE_ASF)) {
3760			int err;
3761
3762			err = tg3_nvram_lock(tp);
3763			tg3_halt_cpu(tp, RX_CPU_BASE);
3764			if (!err)
3765				tg3_nvram_unlock(tp);
3766		}
3767	}
3768
3769	tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3770
3771	return 0;
3772}
3773
3774static void tg3_power_down(struct tg3 *tp)
3775{
3776	tg3_power_down_prepare(tp);
3777
3778	pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3779	pci_set_power_state(tp->pdev, PCI_D3hot);
3780}
3781
3782static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3783{
3784	switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3785	case MII_TG3_AUX_STAT_10HALF:
3786		*speed = SPEED_10;
3787		*duplex = DUPLEX_HALF;
3788		break;
3789
3790	case MII_TG3_AUX_STAT_10FULL:
3791		*speed = SPEED_10;
3792		*duplex = DUPLEX_FULL;
3793		break;
3794
3795	case MII_TG3_AUX_STAT_100HALF:
3796		*speed = SPEED_100;
3797		*duplex = DUPLEX_HALF;
3798		break;
3799
3800	case MII_TG3_AUX_STAT_100FULL:
3801		*speed = SPEED_100;
3802		*duplex = DUPLEX_FULL;
3803		break;
3804
3805	case MII_TG3_AUX_STAT_1000HALF:
3806		*speed = SPEED_1000;
3807		*duplex = DUPLEX_HALF;
3808		break;
3809
3810	case MII_TG3_AUX_STAT_1000FULL:
3811		*speed = SPEED_1000;
3812		*duplex = DUPLEX_FULL;
3813		break;
3814
3815	default:
3816		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3817			*speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3818				 SPEED_10;
3819			*duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3820				  DUPLEX_HALF;
3821			break;
3822		}
3823		*speed = SPEED_UNKNOWN;
3824		*duplex = DUPLEX_UNKNOWN;
3825		break;
3826	}
3827}
3828
3829static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3830{
3831	int err = 0;
3832	u32 val, new_adv;
3833
3834	new_adv = ADVERTISE_CSMA;
3835	new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3836	new_adv |= mii_advertise_flowctrl(flowctrl);
3837
3838	err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3839	if (err)
3840		goto done;
3841
3842	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3843		new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3844
3845		if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3846		    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3847			new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3848
3849		err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3850		if (err)
3851			goto done;
3852	}
3853
3854	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3855		goto done;
3856
3857	tw32(TG3_CPMU_EEE_MODE,
3858	     tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3859
3860	err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3861	if (!err) {
3862		u32 err2;
3863
3864		val = 0;
3865		/* Advertise 100-BaseTX EEE ability */
3866		if (advertise & ADVERTISED_100baseT_Full)
3867			val |= MDIO_AN_EEE_ADV_100TX;
3868		/* Advertise 1000-BaseT EEE ability */
3869		if (advertise & ADVERTISED_1000baseT_Full)
3870			val |= MDIO_AN_EEE_ADV_1000T;
3871		err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3872		if (err)
3873			val = 0;
3874
3875		switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3876		case ASIC_REV_5717:
3877		case ASIC_REV_57765:
3878		case ASIC_REV_57766:
3879		case ASIC_REV_5719:
3880			/* If we advertised any eee advertisements above... */
3881			if (val)
3882				val = MII_TG3_DSP_TAP26_ALNOKO |
3883				      MII_TG3_DSP_TAP26_RMRXSTO |
3884				      MII_TG3_DSP_TAP26_OPCSINPT;
3885			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3886			/* Fall through */
3887		case ASIC_REV_5720:
3888			if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3889				tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3890						 MII_TG3_DSP_CH34TP2_HIBW01);
3891		}
3892
3893		err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3894		if (!err)
3895			err = err2;
3896	}
3897
3898done:
3899	return err;
3900}
3901
3902static void tg3_phy_copper_begin(struct tg3 *tp)
3903{
3904	if (tp->link_config.autoneg == AUTONEG_ENABLE ||
3905	    (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3906		u32 adv, fc;
3907
3908		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3909			adv = ADVERTISED_10baseT_Half |
3910			      ADVERTISED_10baseT_Full;
3911			if (tg3_flag(tp, WOL_SPEED_100MB))
3912				adv |= ADVERTISED_100baseT_Half |
3913				       ADVERTISED_100baseT_Full;
3914
3915			fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
3916		} else {
3917			adv = tp->link_config.advertising;
3918			if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3919				adv &= ~(ADVERTISED_1000baseT_Half |
3920					 ADVERTISED_1000baseT_Full);
3921
3922			fc = tp->link_config.flowctrl;
3923		}
3924
3925		tg3_phy_autoneg_cfg(tp, adv, fc);
3926
3927		tg3_writephy(tp, MII_BMCR,
3928			     BMCR_ANENABLE | BMCR_ANRESTART);
3929	} else {
3930		int i;
3931		u32 bmcr, orig_bmcr;
3932
3933		tp->link_config.active_speed = tp->link_config.speed;
3934		tp->link_config.active_duplex = tp->link_config.duplex;
3935
3936		bmcr = 0;
3937		switch (tp->link_config.speed) {
3938		default:
3939		case SPEED_10:
3940			break;
3941
3942		case SPEED_100:
3943			bmcr |= BMCR_SPEED100;
3944			break;
3945
3946		case SPEED_1000:
3947			bmcr |= BMCR_SPEED1000;
3948			break;
3949		}
3950
3951		if (tp->link_config.duplex == DUPLEX_FULL)
3952			bmcr |= BMCR_FULLDPLX;
3953
3954		if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3955		    (bmcr != orig_bmcr)) {
3956			tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3957			for (i = 0; i < 1500; i++) {
3958				u32 tmp;
3959
3960				udelay(10);
3961				if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3962				    tg3_readphy(tp, MII_BMSR, &tmp))
3963					continue;
3964				if (!(tmp & BMSR_LSTATUS)) {
3965					udelay(40);
3966					break;
3967				}
3968			}
3969			tg3_writephy(tp, MII_BMCR, bmcr);
3970			udelay(40);
3971		}
3972	}
3973}
3974
3975static int tg3_init_5401phy_dsp(struct tg3 *tp)
3976{
3977	int err;
3978
3979	/* Turn off tap power management. */
3980	/* Set Extended packet length bit */
3981	err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3982
3983	err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3984	err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3985	err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3986	err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3987	err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3988
3989	udelay(40);
3990
3991	return err;
3992}
3993
3994static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
3995{
3996	u32 advmsk, tgtadv, advertising;
3997
3998	advertising = tp->link_config.advertising;
3999	tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4000
4001	advmsk = ADVERTISE_ALL;
4002	if (tp->link_config.active_duplex == DUPLEX_FULL) {
4003		tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4004		advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4005	}
4006
4007	if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4008		return false;
4009
4010	if ((*lcladv & advmsk) != tgtadv)
4011		return false;
4012
4013	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4014		u32 tg3_ctrl;
4015
4016		tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4017
4018		if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4019			return false;
4020
4021		if (tgtadv &&
4022		    (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4023		     tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4024			tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4025			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4026				     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4027		} else {
4028			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4029		}
4030
4031		if (tg3_ctrl != tgtadv)
4032			return false;
4033	}
4034
4035	return true;
4036}
4037
4038static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4039{
4040	u32 lpeth = 0;
4041
4042	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4043		u32 val;
4044
4045		if (tg3_readphy(tp, MII_STAT1000, &val))
4046			return false;
4047
4048		lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4049	}
4050
4051	if (tg3_readphy(tp, MII_LPA, rmtadv))
4052		return false;
4053
4054	lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4055	tp->link_config.rmt_adv = lpeth;
4056
4057	return true;
4058}
4059
4060static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4061{
4062	int current_link_up;
4063	u32 bmsr, val;
4064	u32 lcl_adv, rmt_adv;
4065	u16 current_speed;
4066	u8 current_duplex;
4067	int i, err;
4068
4069	tw32(MAC_EVENT, 0);
4070
4071	tw32_f(MAC_STATUS,
4072	     (MAC_STATUS_SYNC_CHANGED |
4073	      MAC_STATUS_CFG_CHANGED |
4074	      MAC_STATUS_MI_COMPLETION |
4075	      MAC_STATUS_LNKSTATE_CHANGED));
4076	udelay(40);
4077
4078	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4079		tw32_f(MAC_MI_MODE,
4080		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4081		udelay(80);
4082	}
4083
4084	tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4085
4086	/* Some third-party PHYs need to be reset on link going
4087	 * down.
4088	 */
4089	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4090	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4091	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4092	    netif_carrier_ok(tp->dev)) {
4093		tg3_readphy(tp, MII_BMSR, &bmsr);
4094		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4095		    !(bmsr & BMSR_LSTATUS))
4096			force_reset = 1;
4097	}
4098	if (force_reset)
4099		tg3_phy_reset(tp);
4100
4101	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4102		tg3_readphy(tp, MII_BMSR, &bmsr);
4103		if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4104		    !tg3_flag(tp, INIT_COMPLETE))
4105			bmsr = 0;
4106
4107		if (!(bmsr & BMSR_LSTATUS)) {
4108			err = tg3_init_5401phy_dsp(tp);
4109			if (err)
4110				return err;
4111
4112			tg3_readphy(tp, MII_BMSR, &bmsr);
4113			for (i = 0; i < 1000; i++) {
4114				udelay(10);
4115				if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4116				    (bmsr & BMSR_LSTATUS)) {
4117					udelay(40);
4118					break;
4119				}
4120			}
4121
4122			if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4123			    TG3_PHY_REV_BCM5401_B0 &&
4124			    !(bmsr & BMSR_LSTATUS) &&
4125			    tp->link_config.active_speed == SPEED_1000) {
4126				err = tg3_phy_reset(tp);
4127				if (!err)
4128					err = tg3_init_5401phy_dsp(tp);
4129				if (err)
4130					return err;
4131			}
4132		}
4133	} else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4134		   tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4135		/* 5701 {A0,B0} CRC bug workaround */
4136		tg3_writephy(tp, 0x15, 0x0a75);
4137		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4138		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4139		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4140	}
4141
4142	/* Clear pending interrupts... */
4143	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4144	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4145
4146	if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4147		tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4148	else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4149		tg3_writephy(tp, MII_TG3_IMASK, ~0);
4150
4151	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4152	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4153		if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4154			tg3_writephy(tp, MII_TG3_EXT_CTRL,
4155				     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4156		else
4157			tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4158	}
4159
4160	current_link_up = 0;
4161	current_speed = SPEED_UNKNOWN;
4162	current_duplex = DUPLEX_UNKNOWN;
4163	tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4164	tp->link_config.rmt_adv = 0;
4165
4166	if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4167		err = tg3_phy_auxctl_read(tp,
4168					  MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4169					  &val);
4170		if (!err && !(val & (1 << 10))) {
4171			tg3_phy_auxctl_write(tp,
4172					     MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4173					     val | (1 << 10));
4174			goto relink;
4175		}
4176	}
4177
4178	bmsr = 0;
4179	for (i = 0; i < 100; i++) {
4180		tg3_readphy(tp, MII_BMSR, &bmsr);
4181		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4182		    (bmsr & BMSR_LSTATUS))
4183			break;
4184		udelay(40);
4185	}
4186
4187	if (bmsr & BMSR_LSTATUS) {
4188		u32 aux_stat, bmcr;
4189
4190		tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4191		for (i = 0; i < 2000; i++) {
4192			udelay(10);
4193			if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4194			    aux_stat)
4195				break;
4196		}
4197
4198		tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4199					     &current_speed,
4200					     &current_duplex);
4201
4202		bmcr = 0;
4203		for (i = 0; i < 200; i++) {
4204			tg3_readphy(tp, MII_BMCR, &bmcr);
4205			if (tg3_readphy(tp, MII_BMCR, &bmcr))
4206				continue;
4207			if (bmcr && bmcr != 0x7fff)
4208				break;
4209			udelay(10);
4210		}
4211
4212		lcl_adv = 0;
4213		rmt_adv = 0;
4214
4215		tp->link_config.active_speed = current_speed;
4216		tp->link_config.active_duplex = current_duplex;
4217
4218		if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4219			if ((bmcr & BMCR_ANENABLE) &&
4220			    tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4221			    tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4222				current_link_up = 1;
4223		} else {
4224			if (!(bmcr & BMCR_ANENABLE) &&
4225			    tp->link_config.speed == current_speed &&
4226			    tp->link_config.duplex == current_duplex &&
4227			    tp->link_config.flowctrl ==
4228			    tp->link_config.active_flowctrl) {
4229				current_link_up = 1;
4230			}
4231		}
4232
4233		if (current_link_up == 1 &&
4234		    tp->link_config.active_duplex == DUPLEX_FULL) {
4235			u32 reg, bit;
4236
4237			if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4238				reg = MII_TG3_FET_GEN_STAT;
4239				bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4240			} else {
4241				reg = MII_TG3_EXT_STAT;
4242				bit = MII_TG3_EXT_STAT_MDIX;
4243			}
4244
4245			if (!tg3_readphy(tp, reg, &val) && (val & bit))
4246				tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4247
4248			tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4249		}
4250	}
4251
4252relink:
4253	if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4254		tg3_phy_copper_begin(tp);
4255
4256		tg3_readphy(tp, MII_BMSR, &bmsr);
4257		if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4258		    (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4259			current_link_up = 1;
4260	}
4261
4262	tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4263	if (current_link_up == 1) {
4264		if (tp->link_config.active_speed == SPEED_100 ||
4265		    tp->link_config.active_speed == SPEED_10)
4266			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4267		else
4268			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4269	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4270		tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4271	else
4272		tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4273
4274	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4275	if (tp->link_config.active_duplex == DUPLEX_HALF)
4276		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4277
4278	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4279		if (current_link_up == 1 &&
4280		    tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4281			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4282		else
4283			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4284	}
4285
4286	/* ??? Without this setting Netgear GA302T PHY does not
4287	 * ??? send/receive packets...
4288	 */
4289	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4290	    tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4291		tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4292		tw32_f(MAC_MI_MODE, tp->mi_mode);
4293		udelay(80);
4294	}
4295
4296	tw32_f(MAC_MODE, tp->mac_mode);
4297	udelay(40);
4298
4299	tg3_phy_eee_adjust(tp, current_link_up);
4300
4301	if (tg3_flag(tp, USE_LINKCHG_REG)) {
4302		/* Polled via timer. */
4303		tw32_f(MAC_EVENT, 0);
4304	} else {
4305		tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4306	}
4307	udelay(40);
4308
4309	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4310	    current_link_up == 1 &&
4311	    tp->link_config.active_speed == SPEED_1000 &&
4312	    (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4313		udelay(120);
4314		tw32_f(MAC_STATUS,
4315		     (MAC_STATUS_SYNC_CHANGED |
4316		      MAC_STATUS_CFG_CHANGED));
4317		udelay(40);
4318		tg3_write_mem(tp,
4319			      NIC_SRAM_FIRMWARE_MBOX,
4320			      NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4321	}
4322
4323	/* Prevent send BD corruption. */
4324	if (tg3_flag(tp, CLKREQ_BUG)) {
4325		u16 oldlnkctl, newlnkctl;
4326
4327		pci_read_config_word(tp->pdev,
4328				     pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4329				     &oldlnkctl);
4330		if (tp->link_config.active_speed == SPEED_100 ||
4331		    tp->link_config.active_speed == SPEED_10)
4332			newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4333		else
4334			newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4335		if (newlnkctl != oldlnkctl)
4336			pci_write_config_word(tp->pdev,
4337					      pci_pcie_cap(tp->pdev) +
4338					      PCI_EXP_LNKCTL, newlnkctl);
4339	}
4340
4341	if (current_link_up != netif_carrier_ok(tp->dev)) {
4342		if (current_link_up)
4343			netif_carrier_on(tp->dev);
4344		else
4345			netif_carrier_off(tp->dev);
4346		tg3_link_report(tp);
4347	}
4348
4349	return 0;
4350}
4351
4352struct tg3_fiber_aneginfo {
4353	int state;
4354#define ANEG_STATE_UNKNOWN		0
4355#define ANEG_STATE_AN_ENABLE		1
4356#define ANEG_STATE_RESTART_INIT		2
4357#define ANEG_STATE_RESTART		3
4358#define ANEG_STATE_DISABLE_LINK_OK	4
4359#define ANEG_STATE_ABILITY_DETECT_INIT	5
4360#define ANEG_STATE_ABILITY_DETECT	6
4361#define ANEG_STATE_ACK_DETECT_INIT	7
4362#define ANEG_STATE_ACK_DETECT		8
4363#define ANEG_STATE_COMPLETE_ACK_INIT	9
4364#define ANEG_STATE_COMPLETE_ACK		10
4365#define ANEG_STATE_IDLE_DETECT_INIT	11
4366#define ANEG_STATE_IDLE_DETECT		12
4367#define ANEG_STATE_LINK_OK		13
4368#define ANEG_STATE_NEXT_PAGE_WAIT_INIT	14
4369#define ANEG_STATE_NEXT_PAGE_WAIT	15
4370
4371	u32 flags;
4372#define MR_AN_ENABLE		0x00000001
4373#define MR_RESTART_AN		0x00000002
4374#define MR_AN_COMPLETE		0x00000004
4375#define MR_PAGE_RX		0x00000008
4376#define MR_NP_LOADED		0x00000010
4377#define MR_TOGGLE_TX		0x00000020
4378#define MR_LP_ADV_FULL_DUPLEX	0x00000040
4379#define MR_LP_ADV_HALF_DUPLEX	0x00000080
4380#define MR_LP_ADV_SYM_PAUSE	0x00000100
4381#define MR_LP_ADV_ASYM_PAUSE	0x00000200
4382#define MR_LP_ADV_REMOTE_FAULT1	0x00000400
4383#define MR_LP_ADV_REMOTE_FAULT2	0x00000800
4384#define MR_LP_ADV_NEXT_PAGE	0x00001000
4385#define MR_TOGGLE_RX		0x00002000
4386#define MR_NP_RX		0x00004000
4387
4388#define MR_LINK_OK		0x80000000
4389
4390	unsigned long link_time, cur_time;
4391
4392	u32 ability_match_cfg;
4393	int ability_match_count;
4394
4395	char ability_match, idle_match, ack_match;
4396
4397	u32 txconfig, rxconfig;
4398#define ANEG_CFG_NP		0x00000080
4399#define ANEG_CFG_ACK		0x00000040
4400#define ANEG_CFG_RF2		0x00000020
4401#define ANEG_CFG_RF1		0x00000010
4402#define ANEG_CFG_PS2		0x00000001
4403#define ANEG_CFG_PS1		0x00008000
4404#define ANEG_CFG_HD		0x00004000
4405#define ANEG_CFG_FD		0x00002000
4406#define ANEG_CFG_INVAL		0x00001f06
4407
4408};
4409#define ANEG_OK		0
4410#define ANEG_DONE	1
4411#define ANEG_TIMER_ENAB	2
4412#define ANEG_FAILED	-1
4413
4414#define ANEG_STATE_SETTLE_TIME	10000
4415
4416static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4417				   struct tg3_fiber_aneginfo *ap)
4418{
4419	u16 flowctrl;
4420	unsigned long delta;
4421	u32 rx_cfg_reg;
4422	int ret;
4423
4424	if (ap->state == ANEG_STATE_UNKNOWN) {
4425		ap->rxconfig = 0;
4426		ap->link_time = 0;
4427		ap->cur_time = 0;
4428		ap->ability_match_cfg = 0;
4429		ap->ability_match_count = 0;
4430		ap->ability_match = 0;
4431		ap->idle_match = 0;
4432		ap->ack_match = 0;
4433	}
4434	ap->cur_time++;
4435
4436	if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4437		rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4438
4439		if (rx_cfg_reg != ap->ability_match_cfg) {
4440			ap->ability_match_cfg = rx_cfg_reg;
4441			ap->ability_match = 0;
4442			ap->ability_match_count = 0;
4443		} else {
4444			if (++ap->ability_match_count > 1) {
4445				ap->ability_match = 1;
4446				ap->ability_match_cfg = rx_cfg_reg;
4447			}
4448		}
4449		if (rx_cfg_reg & ANEG_CFG_ACK)
4450			ap->ack_match = 1;
4451		else
4452			ap->ack_match = 0;
4453
4454		ap->idle_match = 0;
4455	} else {
4456		ap->idle_match = 1;
4457		ap->ability_match_cfg = 0;
4458		ap->ability_match_count = 0;
4459		ap->ability_match = 0;
4460		ap->ack_match = 0;
4461
4462		rx_cfg_reg = 0;
4463	}
4464
4465	ap->rxconfig = rx_cfg_reg;
4466	ret = ANEG_OK;
4467
4468	switch (ap->state) {
4469	case ANEG_STATE_UNKNOWN:
4470		if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4471			ap->state = ANEG_STATE_AN_ENABLE;
4472
4473		/* fallthru */
4474	case ANEG_STATE_AN_ENABLE:
4475		ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4476		if (ap->flags & MR_AN_ENABLE) {
4477			ap->link_time = 0;
4478			ap->cur_time = 0;
4479			ap->ability_match_cfg = 0;
4480			ap->ability_match_count = 0;
4481			ap->ability_match = 0;
4482			ap->idle_match = 0;
4483			ap->ack_match = 0;
4484
4485			ap->state = ANEG_STATE_RESTART_INIT;
4486		} else {
4487			ap->state = ANEG_STATE_DISABLE_LINK_OK;
4488		}
4489		break;
4490
4491	case ANEG_STATE_RESTART_INIT:
4492		ap->link_time = ap->cur_time;
4493		ap->flags &= ~(MR_NP_LOADED);
4494		ap->txconfig = 0;
4495		tw32(MAC_TX_AUTO_NEG, 0);
4496		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4497		tw32_f(MAC_MODE, tp->mac_mode);
4498		udelay(40);
4499
4500		ret = ANEG_TIMER_ENAB;
4501		ap->state = ANEG_STATE_RESTART;
4502
4503		/* fallthru */
4504	case ANEG_STATE_RESTART:
4505		delta = ap->cur_time - ap->link_time;
4506		if (delta > ANEG_STATE_SETTLE_TIME)
4507			ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4508		else
4509			ret = ANEG_TIMER_ENAB;
4510		break;
4511
4512	case ANEG_STATE_DISABLE_LINK_OK:
4513		ret = ANEG_DONE;
4514		break;
4515
4516	case ANEG_STATE_ABILITY_DETECT_INIT:
4517		ap->flags &= ~(MR_TOGGLE_TX);
4518		ap->txconfig = ANEG_CFG_FD;
4519		flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4520		if (flowctrl & ADVERTISE_1000XPAUSE)
4521			ap->txconfig |= ANEG_CFG_PS1;
4522		if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4523			ap->txconfig |= ANEG_CFG_PS2;
4524		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4525		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4526		tw32_f(MAC_MODE, tp->mac_mode);
4527		udelay(40);
4528
4529		ap->state = ANEG_STATE_ABILITY_DETECT;
4530		break;
4531
4532	case ANEG_STATE_ABILITY_DETECT:
4533		if (ap->ability_match != 0 && ap->rxconfig != 0)
4534			ap->state = ANEG_STATE_ACK_DETECT_INIT;
4535		break;
4536
4537	case ANEG_STATE_ACK_DETECT_INIT:
4538		ap->txconfig |= ANEG_CFG_ACK;
4539		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4540		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4541		tw32_f(MAC_MODE, tp->mac_mode);
4542		udelay(40);
4543
4544		ap->state = ANEG_STATE_ACK_DETECT;
4545
4546		/* fallthru */
4547	case ANEG_STATE_ACK_DETECT:
4548		if (ap->ack_match != 0) {
4549			if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4550			    (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4551				ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4552			} else {
4553				ap->state = ANEG_STATE_AN_ENABLE;
4554			}
4555		} else if (ap->ability_match != 0 &&
4556			   ap->rxconfig == 0) {
4557			ap->state = ANEG_STATE_AN_ENABLE;
4558		}
4559		break;
4560
4561	case ANEG_STATE_COMPLETE_ACK_INIT:
4562		if (ap->rxconfig & ANEG_CFG_INVAL) {
4563			ret = ANEG_FAILED;
4564			break;
4565		}
4566		ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4567			       MR_LP_ADV_HALF_DUPLEX |
4568			       MR_LP_ADV_SYM_PAUSE |
4569			       MR_LP_ADV_ASYM_PAUSE |
4570			       MR_LP_ADV_REMOTE_FAULT1 |
4571			       MR_LP_ADV_REMOTE_FAULT2 |
4572			       MR_LP_ADV_NEXT_PAGE |
4573			       MR_TOGGLE_RX |
4574			       MR_NP_RX);
4575		if (ap->rxconfig & ANEG_CFG_FD)
4576			ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4577		if (ap->rxconfig & ANEG_CFG_HD)
4578			ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4579		if (ap->rxconfig & ANEG_CFG_PS1)
4580			ap->flags |= MR_LP_ADV_SYM_PAUSE;
4581		if (ap->rxconfig & ANEG_CFG_PS2)
4582			ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4583		if (ap->rxconfig & ANEG_CFG_RF1)
4584			ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4585		if (ap->rxconfig & ANEG_CFG_RF2)
4586			ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4587		if (ap->rxconfig & ANEG_CFG_NP)
4588			ap->flags |= MR_LP_ADV_NEXT_PAGE;
4589
4590		ap->link_time = ap->cur_time;
4591
4592		ap->flags ^= (MR_TOGGLE_TX);
4593		if (ap->rxconfig & 0x0008)
4594			ap->flags |= MR_TOGGLE_RX;
4595		if (ap->rxconfig & ANEG_CFG_NP)
4596			ap->flags |= MR_NP_RX;
4597		ap->flags |= MR_PAGE_RX;
4598
4599		ap->state = ANEG_STATE_COMPLETE_ACK;
4600		ret = ANEG_TIMER_ENAB;
4601		break;
4602
4603	case ANEG_STATE_COMPLETE_ACK:
4604		if (ap->ability_match != 0 &&
4605		    ap->rxconfig == 0) {
4606			ap->state = ANEG_STATE_AN_ENABLE;
4607			break;
4608		}
4609		delta = ap->cur_time - ap->link_time;
4610		if (delta > ANEG_STATE_SETTLE_TIME) {
4611			if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4612				ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4613			} else {
4614				if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4615				    !(ap->flags & MR_NP_RX)) {
4616					ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4617				} else {
4618					ret = ANEG_FAILED;
4619				}
4620			}
4621		}
4622		break;
4623
4624	case ANEG_STATE_IDLE_DETECT_INIT:
4625		ap->link_time = ap->cur_time;
4626		tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4627		tw32_f(MAC_MODE, tp->mac_mode);
4628		udelay(40);
4629
4630		ap->state = ANEG_STATE_IDLE_DETECT;
4631		ret = ANEG_TIMER_ENAB;
4632		break;
4633
4634	case ANEG_STATE_IDLE_DETECT:
4635		if (ap->ability_match != 0 &&
4636		    ap->rxconfig == 0) {
4637			ap->state = ANEG_STATE_AN_ENABLE;
4638			break;
4639		}
4640		delta = ap->cur_time - ap->link_time;
4641		if (delta > ANEG_STATE_SETTLE_TIME) {
4642			/* XXX another gem from the Broadcom driver :( */
4643			ap->state = ANEG_STATE_LINK_OK;
4644		}
4645		break;
4646
4647	case ANEG_STATE_LINK_OK:
4648		ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4649		ret = ANEG_DONE;
4650		break;
4651
4652	case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4653		/* ??? unimplemented */
4654		break;
4655
4656	case ANEG_STATE_NEXT_PAGE_WAIT:
4657		/* ??? unimplemented */
4658		break;
4659
4660	default:
4661		ret = ANEG_FAILED;
4662		break;
4663	}
4664
4665	return ret;
4666}
4667
4668static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4669{
4670	int res = 0;
4671	struct tg3_fiber_aneginfo aninfo;
4672	int status = ANEG_FAILED;
4673	unsigned int tick;
4674	u32 tmp;
4675
4676	tw32_f(MAC_TX_AUTO_NEG, 0);
4677
4678	tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4679	tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4680	udelay(40);
4681
4682	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4683	udelay(40);
4684
4685	memset(&aninfo, 0, sizeof(aninfo));
4686	aninfo.flags |= MR_AN_ENABLE;
4687	aninfo.state = ANEG_STATE_UNKNOWN;
4688	aninfo.cur_time = 0;
4689	tick = 0;
4690	while (++tick < 195000) {
4691		status = tg3_fiber_aneg_smachine(tp, &aninfo);
4692		if (status == ANEG_DONE || status == ANEG_FAILED)
4693			break;
4694
4695		udelay(1);
4696	}
4697
4698	tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4699	tw32_f(MAC_MODE, tp->mac_mode);
4700	udelay(40);
4701
4702	*txflags = aninfo.txconfig;
4703	*rxflags = aninfo.flags;
4704
4705	if (status == ANEG_DONE &&
4706	    (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4707			     MR_LP_ADV_FULL_DUPLEX)))
4708		res = 1;
4709
4710	return res;
4711}
4712
4713static void tg3_init_bcm8002(struct tg3 *tp)
4714{
4715	u32 mac_status = tr32(MAC_STATUS);
4716	int i;
4717
4718	/* Reset when initting first time or we have a link. */
4719	if (tg3_flag(tp, INIT_COMPLETE) &&
4720	    !(mac_status & MAC_STATUS_PCS_SYNCED))
4721		return;
4722
4723	/* Set PLL lock range. */
4724	tg3_writephy(tp, 0x16, 0x8007);
4725
4726	/* SW reset */
4727	tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4728
4729	/* Wait for reset to complete. */
4730	/* XXX schedule_timeout() ... */
4731	for (i = 0; i < 500; i++)
4732		udelay(10);
4733
4734	/* Config mode; select PMA/Ch 1 regs. */
4735	tg3_writephy(tp, 0x10, 0x8411);
4736
4737	/* Enable auto-lock and comdet, select txclk for tx. */
4738	tg3_writephy(tp, 0x11, 0x0a10);
4739
4740	tg3_writephy(tp, 0x18, 0x00a0);
4741	tg3_writephy(tp, 0x16, 0x41ff);
4742
4743	/* Assert and deassert POR. */
4744	tg3_writephy(tp, 0x13, 0x0400);
4745	udelay(40);
4746	tg3_writephy(tp, 0x13, 0x0000);
4747
4748	tg3_writephy(tp, 0x11, 0x0a50);
4749	udelay(40);
4750	tg3_writephy(tp, 0x11, 0x0a10);
4751
4752	/* Wait for signal to stabilize */
4753	/* XXX schedule_timeout() ... */
4754	for (i = 0; i < 15000; i++)
4755		udelay(10);
4756
4757	/* Deselect the channel register so we can read the PHYID
4758	 * later.
4759	 */
4760	tg3_writephy(tp, 0x10, 0x8011);
4761}
4762
4763static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4764{
4765	u16 flowctrl;
4766	u32 sg_dig_ctrl, sg_dig_status;
4767	u32 serdes_cfg, expected_sg_dig_ctrl;
4768	int workaround, port_a;
4769	int current_link_up;
4770
4771	serdes_cfg = 0;
4772	expected_sg_dig_ctrl = 0;
4773	workaround = 0;
4774	port_a = 1;
4775	current_link_up = 0;
4776
4777	if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4778	    tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4779		workaround = 1;
4780		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4781			port_a = 0;
4782
4783		/* preserve bits 0-11,13,14 for signal pre-emphasis */
4784		/* preserve bits 20-23 for voltage regulator */
4785		serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4786	}
4787
4788	sg_dig_ctrl = tr32(SG_DIG_CTRL);
4789
4790	if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4791		if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4792			if (workaround) {
4793				u32 val = serdes_cfg;
4794
4795				if (port_a)
4796					val |= 0xc010000;
4797				else
4798					val |= 0x4010000;
4799				tw32_f(MAC_SERDES_CFG, val);
4800			}
4801
4802			tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4803		}
4804		if (mac_status & MAC_STATUS_PCS_SYNCED) {
4805			tg3_setup_flow_control(tp, 0, 0);
4806			current_link_up = 1;
4807		}
4808		goto out;
4809	}
4810
4811	/* Want auto-negotiation.  */
4812	expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4813
4814	flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4815	if (flowctrl & ADVERTISE_1000XPAUSE)
4816		expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4817	if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4818		expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4819
4820	if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4821		if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4822		    tp->serdes_counter &&
4823		    ((mac_status & (MAC_STATUS_PCS_SYNCED |
4824				    MAC_STATUS_RCVD_CFG)) ==
4825		     MAC_STATUS_PCS_SYNCED)) {
4826			tp->serdes_counter--;
4827			current_link_up = 1;
4828			goto out;
4829		}
4830restart_autoneg:
4831		if (workaround)
4832			tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4833		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4834		udelay(5);
4835		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4836
4837		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4838		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4839	} else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4840				 MAC_STATUS_SIGNAL_DET)) {
4841		sg_dig_status = tr32(SG_DIG_STATUS);
4842		mac_status = tr32(MAC_STATUS);
4843
4844		if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4845		    (mac_status & MAC_STATUS_PCS_SYNCED)) {
4846			u32 local_adv = 0, remote_adv = 0;
4847
4848			if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4849				local_adv |= ADVERTISE_1000XPAUSE;
4850			if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4851				local_adv |= ADVERTISE_1000XPSE_ASYM;
4852
4853			if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4854				remote_adv |= LPA_1000XPAUSE;
4855			if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4856				remote_adv |= LPA_1000XPAUSE_ASYM;
4857
4858			tp->link_config.rmt_adv =
4859					   mii_adv_to_ethtool_adv_x(remote_adv);
4860
4861			tg3_setup_flow_control(tp, local_adv, remote_adv);
4862			current_link_up = 1;
4863			tp->serdes_counter = 0;
4864			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4865		} else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4866			if (tp->serdes_counter)
4867				tp->serdes_counter--;
4868			else {
4869				if (workaround) {
4870					u32 val = serdes_cfg;
4871
4872					if (port_a)
4873						val |= 0xc010000;
4874					else
4875						val |= 0x4010000;
4876
4877					tw32_f(MAC_SERDES_CFG, val);
4878				}
4879
4880				tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4881				udelay(40);
4882
4883				/* Link parallel detection - link is up */
4884				/* only if we have PCS_SYNC and not */
4885				/* receiving config code words */
4886				mac_status = tr32(MAC_STATUS);
4887				if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4888				    !(mac_status & MAC_STATUS_RCVD_CFG)) {
4889					tg3_setup_flow_control(tp, 0, 0);
4890					current_link_up = 1;
4891					tp->phy_flags |=
4892						TG3_PHYFLG_PARALLEL_DETECT;
4893					tp->serdes_counter =
4894						SERDES_PARALLEL_DET_TIMEOUT;
4895				} else
4896					goto restart_autoneg;
4897			}
4898		}
4899	} else {
4900		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4901		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4902	}
4903
4904out:
4905	return current_link_up;
4906}
4907
4908static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4909{
4910	int current_link_up = 0;
4911
4912	if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4913		goto out;
4914
4915	if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4916		u32 txflags, rxflags;
4917		int i;
4918
4919		if (fiber_autoneg(tp, &txflags, &rxflags)) {
4920			u32 local_adv = 0, remote_adv = 0;
4921
4922			if (txflags & ANEG_CFG_PS1)
4923				local_adv |= ADVERTISE_1000XPAUSE;
4924			if (txflags & ANEG_CFG_PS2)
4925				local_adv |= ADVERTISE_1000XPSE_ASYM;
4926
4927			if (rxflags & MR_LP_ADV_SYM_PAUSE)
4928				remote_adv |= LPA_1000XPAUSE;
4929			if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4930				remote_adv |= LPA_1000XPAUSE_ASYM;
4931
4932			tp->link_config.rmt_adv =
4933					   mii_adv_to_ethtool_adv_x(remote_adv);
4934
4935			tg3_setup_flow_control(tp, local_adv, remote_adv);
4936
4937			current_link_up = 1;
4938		}
4939		for (i = 0; i < 30; i++) {
4940			udelay(20);
4941			tw32_f(MAC_STATUS,
4942			       (MAC_STATUS_SYNC_CHANGED |
4943				MAC_STATUS_CFG_CHANGED));
4944			udelay(40);
4945			if ((tr32(MAC_STATUS) &
4946			     (MAC_STATUS_SYNC_CHANGED |
4947			      MAC_STATUS_CFG_CHANGED)) == 0)
4948				break;
4949		}
4950
4951		mac_status = tr32(MAC_STATUS);
4952		if (current_link_up == 0 &&
4953		    (mac_status & MAC_STATUS_PCS_SYNCED) &&
4954		    !(mac_status & MAC_STATUS_RCVD_CFG))
4955			current_link_up = 1;
4956	} else {
4957		tg3_setup_flow_control(tp, 0, 0);
4958
4959		/* Forcing 1000FD link up. */
4960		current_link_up = 1;
4961
4962		tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4963		udelay(40);
4964
4965		tw32_f(MAC_MODE, tp->mac_mode);
4966		udelay(40);
4967	}
4968
4969out:
4970	return current_link_up;
4971}
4972
4973static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4974{
4975	u32 orig_pause_cfg;
4976	u16 orig_active_speed;
4977	u8 orig_active_duplex;
4978	u32 mac_status;
4979	int current_link_up;
4980	int i;
4981
4982	orig_pause_cfg = tp->link_config.active_flowctrl;
4983	orig_active_speed = tp->link_config.active_speed;
4984	orig_active_duplex = tp->link_config.active_duplex;
4985
4986	if (!tg3_flag(tp, HW_AUTONEG) &&
4987	    netif_carrier_ok(tp->dev) &&
4988	    tg3_flag(tp, INIT_COMPLETE)) {
4989		mac_status = tr32(MAC_STATUS);
4990		mac_status &= (MAC_STATUS_PCS_SYNCED |
4991			       MAC_STATUS_SIGNAL_DET |
4992			       MAC_STATUS_CFG_CHANGED |
4993			       MAC_STATUS_RCVD_CFG);
4994		if (mac_status == (MAC_STATUS_PCS_SYNCED |
4995				   MAC_STATUS_SIGNAL_DET)) {
4996			tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4997					    MAC_STATUS_CFG_CHANGED));
4998			return 0;
4999		}
5000	}
5001
5002	tw32_f(MAC_TX_AUTO_NEG, 0);
5003
5004	tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5005	tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5006	tw32_f(MAC_MODE, tp->mac_mode);
5007	udelay(40);
5008
5009	if (tp->phy_id == TG3_PHY_ID_BCM8002)
5010		tg3_init_bcm8002(tp);
5011
5012	/* Enable link change event even when serdes polling.  */
5013	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5014	udelay(40);
5015
5016	current_link_up = 0;
5017	tp->link_config.rmt_adv = 0;
5018	mac_status = tr32(MAC_STATUS);
5019
5020	if (tg3_flag(tp, HW_AUTONEG))
5021		current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5022	else
5023		current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5024
5025	tp->napi[0].hw_status->status =
5026		(SD_STATUS_UPDATED |
5027		 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5028
5029	for (i = 0; i < 100; i++) {
5030		tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5031				    MAC_STATUS_CFG_CHANGED));
5032		udelay(5);
5033		if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5034					 MAC_STATUS_CFG_CHANGED |
5035					 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5036			break;
5037	}
5038
5039	mac_status = tr32(MAC_STATUS);
5040	if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5041		current_link_up = 0;
5042		if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5043		    tp->serdes_counter == 0) {
5044			tw32_f(MAC_MODE, (tp->mac_mode |
5045					  MAC_MODE_SEND_CONFIGS));
5046			udelay(1);
5047			tw32_f(MAC_MODE, tp->mac_mode);
5048		}
5049	}
5050
5051	if (current_link_up == 1) {
5052		tp->link_config.active_speed = SPEED_1000;
5053		tp->link_config.active_duplex = DUPLEX_FULL;
5054		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5055				    LED_CTRL_LNKLED_OVERRIDE |
5056				    LED_CTRL_1000MBPS_ON));
5057	} else {
5058		tp->link_config.active_speed = SPEED_UNKNOWN;
5059		tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5060		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5061				    LED_CTRL_LNKLED_OVERRIDE |
5062				    LED_CTRL_TRAFFIC_OVERRIDE));
5063	}
5064
5065	if (current_link_up != netif_carrier_ok(tp->dev)) {
5066		if (current_link_up)
5067			netif_carrier_on(tp->dev);
5068		else
5069			netif_carrier_off(tp->dev);
5070		tg3_link_report(tp);
5071	} else {
5072		u32 now_pause_cfg = tp->link_config.active_flowctrl;
5073		if (orig_pause_cfg != now_pause_cfg ||
5074		    orig_active_speed != tp->link_config.active_speed ||
5075		    orig_active_duplex != tp->link_config.active_duplex)
5076			tg3_link_report(tp);
5077	}
5078
5079	return 0;
5080}
5081
5082static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5083{
5084	int current_link_up, err = 0;
5085	u32 bmsr, bmcr;
5086	u16 current_speed;
5087	u8 current_duplex;
5088	u32 local_adv, remote_adv;
5089
5090	tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5091	tw32_f(MAC_MODE, tp->mac_mode);
5092	udelay(40);
5093
5094	tw32(MAC_EVENT, 0);
5095
5096	tw32_f(MAC_STATUS,
5097	     (MAC_STATUS_SYNC_CHANGED |
5098	      MAC_STATUS_CFG_CHANGED |
5099	      MAC_STATUS_MI_COMPLETION |
5100	      MAC_STATUS_LNKSTATE_CHANGED));
5101	udelay(40);
5102
5103	if (force_reset)
5104		tg3_phy_reset(tp);
5105
5106	current_link_up = 0;
5107	current_speed = SPEED_UNKNOWN;
5108	current_duplex = DUPLEX_UNKNOWN;
5109	tp->link_config.rmt_adv = 0;
5110
5111	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5112	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5113	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5114		if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5115			bmsr |= BMSR_LSTATUS;
5116		else
5117			bmsr &= ~BMSR_LSTATUS;
5118	}
5119
5120	err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5121
5122	if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5123	    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5124		/* do nothing, just check for link up at the end */
5125	} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5126		u32 adv, newadv;
5127
5128		err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5129		newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5130				 ADVERTISE_1000XPAUSE |
5131				 ADVERTISE_1000XPSE_ASYM |
5132				 ADVERTISE_SLCT);
5133
5134		newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5135		newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5136
5137		if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5138			tg3_writephy(tp, MII_ADVERTISE, newadv);
5139			bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5140			tg3_writephy(tp, MII_BMCR, bmcr);
5141
5142			tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5143			tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5144			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5145
5146			return err;
5147		}
5148	} else {
5149		u32 new_bmcr;
5150
5151		bmcr &= ~BMCR_SPEED1000;
5152		new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5153
5154		if (tp->link_config.duplex == DUPLEX_FULL)
5155			new_bmcr |= BMCR_FULLDPLX;
5156
5157		if (new_bmcr != bmcr) {
5158			/* BMCR_SPEED1000 is a reserved bit that needs
5159			 * to be set on write.
5160			 */
5161			new_bmcr |= BMCR_SPEED1000;
5162
5163			/* Force a linkdown */
5164			if (netif_carrier_ok(tp->dev)) {
5165				u32 adv;
5166
5167				err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5168				adv &= ~(ADVERTISE_1000XFULL |
5169					 ADVERTISE_1000XHALF |
5170					 ADVERTISE_SLCT);
5171				tg3_writephy(tp, MII_ADVERTISE, adv);
5172				tg3_writephy(tp, MII_BMCR, bmcr |
5173							   BMCR_ANRESTART |
5174							   BMCR_ANENABLE);
5175				udelay(10);
5176				netif_carrier_off(tp->dev);
5177			}
5178			tg3_writephy(tp, MII_BMCR, new_bmcr);
5179			bmcr = new_bmcr;
5180			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5181			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5182			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5183			    ASIC_REV_5714) {
5184				if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5185					bmsr |= BMSR_LSTATUS;
5186				else
5187					bmsr &= ~BMSR_LSTATUS;
5188			}
5189			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5190		}
5191	}
5192
5193	if (bmsr & BMSR_LSTATUS) {
5194		current_speed = SPEED_1000;
5195		current_link_up = 1;
5196		if (bmcr & BMCR_FULLDPLX)
5197			current_duplex = DUPLEX_FULL;
5198		else
5199			current_duplex = DUPLEX_HALF;
5200
5201		local_adv = 0;
5202		remote_adv = 0;
5203
5204		if (bmcr & BMCR_ANENABLE) {
5205			u32 common;
5206
5207			err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5208			err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5209			common = local_adv & remote_adv;
5210			if (common & (ADVERTISE_1000XHALF |
5211				      ADVERTISE_1000XFULL)) {
5212				if (common & ADVERTISE_1000XFULL)
5213					current_duplex = DUPLEX_FULL;
5214				else
5215					current_duplex = DUPLEX_HALF;
5216
5217				tp->link_config.rmt_adv =
5218					   mii_adv_to_ethtool_adv_x(remote_adv);
5219			} else if (!tg3_flag(tp, 5780_CLASS)) {
5220				/* Link is up via parallel detect */
5221			} else {
5222				current_link_up = 0;
5223			}
5224		}
5225	}
5226
5227	if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5228		tg3_setup_flow_control(tp, local_adv, remote_adv);
5229
5230	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5231	if (tp->link_config.active_duplex == DUPLEX_HALF)
5232		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5233
5234	tw32_f(MAC_MODE, tp->mac_mode);
5235	udelay(40);
5236
5237	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5238
5239	tp->link_config.active_speed = current_speed;
5240	tp->link_config.active_duplex = current_duplex;
5241
5242	if (current_link_up != netif_carrier_ok(tp->dev)) {
5243		if (current_link_up)
5244			netif_carrier_on(tp->dev);
5245		else {
5246			netif_carrier_off(tp->dev);
5247			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5248		}
5249		tg3_link_report(tp);
5250	}
5251	return err;
5252}
5253
5254static void tg3_serdes_parallel_detect(struct tg3 *tp)
5255{
5256	if (tp->serdes_counter) {
5257		/* Give autoneg time to complete. */
5258		tp->serdes_counter--;
5259		return;
5260	}
5261
5262	if (!netif_carrier_ok(tp->dev) &&
5263	    (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5264		u32 bmcr;
5265
5266		tg3_readphy(tp, MII_BMCR, &bmcr);
5267		if (bmcr & BMCR_ANENABLE) {
5268			u32 phy1, phy2;
5269
5270			/* Select shadow register 0x1f */
5271			tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5272			tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5273
5274			/* Select expansion interrupt status register */
5275			tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5276					 MII_TG3_DSP_EXP1_INT_STAT);
5277			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5278			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5279
5280			if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5281				/* We have signal detect and not receiving
5282				 * config code words, link is up by parallel
5283				 * detection.
5284				 */
5285
5286				bmcr &= ~BMCR_ANENABLE;
5287				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5288				tg3_writephy(tp, MII_BMCR, bmcr);
5289				tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5290			}
5291		}
5292	} else if (netif_carrier_ok(tp->dev) &&
5293		   (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5294		   (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5295		u32 phy2;
5296
5297		/* Select expansion interrupt status register */
5298		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5299				 MII_TG3_DSP_EXP1_INT_STAT);
5300		tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5301		if (phy2 & 0x20) {
5302			u32 bmcr;
5303
5304			/* Config code words received, turn on autoneg. */
5305			tg3_readphy(tp, MII_BMCR, &bmcr);
5306			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5307
5308			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5309
5310		}
5311	}
5312}
5313
5314static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5315{
5316	u32 val;
5317	int err;
5318
5319	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5320		err = tg3_setup_fiber_phy(tp, force_reset);
5321	else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5322		err = tg3_setup_fiber_mii_phy(tp, force_reset);
5323	else
5324		err = tg3_setup_copper_phy(tp, force_reset);
5325
5326	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5327		u32 scale;
5328
5329		val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5330		if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5331			scale = 65;
5332		else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5333			scale = 6;
5334		else
5335			scale = 12;
5336
5337		val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5338		val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5339		tw32(GRC_MISC_CFG, val);
5340	}
5341
5342	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5343	      (6 << TX_LENGTHS_IPG_SHIFT);
5344	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5345		val |= tr32(MAC_TX_LENGTHS) &
5346		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
5347			TX_LENGTHS_CNT_DWN_VAL_MSK);
5348
5349	if (tp->link_config.active_speed == SPEED_1000 &&
5350	    tp->link_config.active_duplex == DUPLEX_HALF)
5351		tw32(MAC_TX_LENGTHS, val |
5352		     (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5353	else
5354		tw32(MAC_TX_LENGTHS, val |
5355		     (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5356
5357	if (!tg3_flag(tp, 5705_PLUS)) {
5358		if (netif_carrier_ok(tp->dev)) {
5359			tw32(HOSTCC_STAT_COAL_TICKS,
5360			     tp->coal.stats_block_coalesce_usecs);
5361		} else {
5362			tw32(HOSTCC_STAT_COAL_TICKS, 0);
5363		}
5364	}
5365
5366	if (tg3_flag(tp, ASPM_WORKAROUND)) {
5367		val = tr32(PCIE_PWR_MGMT_THRESH);
5368		if (!netif_carrier_ok(tp->dev))
5369			val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5370			      tp->pwrmgmt_thresh;
5371		else
5372			val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5373		tw32(PCIE_PWR_MGMT_THRESH, val);
5374	}
5375
5376	return err;
5377}
5378
5379static inline int tg3_irq_sync(struct tg3 *tp)
5380{
5381	return tp->irq_sync;
5382}
5383
5384static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5385{
5386	int i;
5387
5388	dst = (u32 *)((u8 *)dst + off);
5389	for (i = 0; i < len; i += sizeof(u32))
5390		*dst++ = tr32(off + i);
5391}
5392
5393static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5394{
5395	tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5396	tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5397	tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5398	tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5399	tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5400	tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5401	tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5402	tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5403	tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5404	tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5405	tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5406	tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5407	tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5408	tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5409	tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5410	tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5411	tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5412	tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5413	tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5414
5415	if (tg3_flag(tp, SUPPORT_MSIX))
5416		tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5417
5418	tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5419	tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5420	tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5421	tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5422	tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5423	tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5424	tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5425	tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5426
5427	if (!tg3_flag(tp, 5705_PLUS)) {
5428		tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5429		tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5430		tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5431	}
5432
5433	tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5434	tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5435	tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5436	tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5437	tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5438
5439	if (tg3_flag(tp, NVRAM))
5440		tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5441}
5442
5443static void tg3_dump_state(struct tg3 *tp)
5444{
5445	int i;
5446	u32 *regs;
5447
5448	regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5449	if (!regs) {
5450		netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5451		return;
5452	}
5453
5454	if (tg3_flag(tp, PCI_EXPRESS)) {
5455		/* Read up to but not including private PCI registers */
5456		for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5457			regs[i / sizeof(u32)] = tr32(i);
5458	} else
5459		tg3_dump_legacy_regs(tp, regs);
5460
5461	for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5462		if (!regs[i + 0] && !regs[i + 1] &&
5463		    !regs[i + 2] && !regs[i + 3])
5464			continue;
5465
5466		netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5467			   i * 4,
5468			   regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5469	}
5470
5471	kfree(regs);
5472
5473	for (i = 0; i < tp->irq_cnt; i++) {
5474		struct tg3_napi *tnapi = &tp->napi[i];
5475
5476		/* SW status block */
5477		netdev_err(tp->dev,
5478			 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5479			   i,
5480			   tnapi->hw_status->status,
5481			   tnapi->hw_status->status_tag,
5482			   tnapi->hw_status->rx_jumbo_consumer,
5483			   tnapi->hw_status->rx_consumer,
5484			   tnapi->hw_status->rx_mini_consumer,
5485			   tnapi->hw_status->idx[0].rx_producer,
5486			   tnapi->hw_status->idx[0].tx_consumer);
5487
5488		netdev_err(tp->dev,
5489		"%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5490			   i,
5491			   tnapi->last_tag, tnapi->last_irq_tag,
5492			   tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5493			   tnapi->rx_rcb_ptr,
5494			   tnapi->prodring.rx_std_prod_idx,
5495			   tnapi->prodring.rx_std_cons_idx,
5496			   tnapi->prodring.rx_jmb_prod_idx,
5497			   tnapi->prodring.rx_jmb_cons_idx);
5498	}
5499}
5500
5501/* This is called whenever we suspect that the system chipset is re-
5502 * ordering the sequence of MMIO to the tx send mailbox. The symptom
5503 * is bogus tx completions. We try to recover by setting the
5504 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5505 * in the workqueue.
5506 */
5507static void tg3_tx_recover(struct tg3 *tp)
5508{
5509	BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5510	       tp->write32_tx_mbox == tg3_write_indirect_mbox);
5511
5512	netdev_warn(tp->dev,
5513		    "The system may be re-ordering memory-mapped I/O "
5514		    "cycles to the network device, attempting to recover. "
5515		    "Please report the problem to the driver maintainer "
5516		    "and include system chipset information.\n");
5517
5518	spin_lock(&tp->lock);
5519	tg3_flag_set(tp, TX_RECOVERY_PENDING);
5520	spin_unlock(&tp->lock);
5521}
5522
5523static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5524{
5525	/* Tell compiler to fetch tx indices from memory. */
5526	barrier();
5527	return tnapi->tx_pending -
5528	       ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5529}
5530
5531/* Tigon3 never reports partial packet sends.  So we do not
5532 * need special logic to handle SKBs that have not had all
5533 * of their frags sent yet, like SunGEM does.
5534 */
5535static void tg3_tx(struct tg3_napi *tnapi)
5536{
5537	struct tg3 *tp = tnapi->tp;
5538	u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5539	u32 sw_idx = tnapi->tx_cons;
5540	struct netdev_queue *txq;
5541	int index = tnapi - tp->napi;
5542	unsigned int pkts_compl = 0, bytes_compl = 0;
5543
5544	if (tg3_flag(tp, ENABLE_TSS))
5545		index--;
5546
5547	txq = netdev_get_tx_queue(tp->dev, index);
5548
5549	while (sw_idx != hw_idx) {
5550		struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5551		struct sk_buff *skb = ri->skb;
5552		int i, tx_bug = 0;
5553
5554		if (unlikely(skb == NULL)) {
5555			tg3_tx_recover(tp);
5556			return;
5557		}
5558
5559		pci_unmap_single(tp->pdev,
5560				 dma_unmap_addr(ri, mapping),
5561				 skb_headlen(skb),
5562				 PCI_DMA_TODEVICE);
5563
5564		ri->skb = NULL;
5565
5566		while (ri->fragmented) {
5567			ri->fragmented = false;
5568			sw_idx = NEXT_TX(sw_idx);
5569			ri = &tnapi->tx_buffers[sw_idx];
5570		}
5571
5572		sw_idx = NEXT_TX(sw_idx);
5573
5574		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5575			ri = &tnapi->tx_buffers[sw_idx];
5576			if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5577				tx_bug = 1;
5578
5579			pci_unmap_page(tp->pdev,
5580				       dma_unmap_addr(ri, mapping),
5581				       skb_frag_size(&skb_shinfo(skb)->frags[i]),
5582				       PCI_DMA_TODEVICE);
5583
5584			while (ri->fragmented) {
5585				ri->fragmented = false;
5586				sw_idx = NEXT_TX(sw_idx);
5587				ri = &tnapi->tx_buffers[sw_idx];
5588			}
5589
5590			sw_idx = NEXT_TX(sw_idx);
5591		}
5592
5593		pkts_compl++;
5594		bytes_compl += skb->len;
5595
5596		dev_kfree_skb(skb);
5597
5598		if (unlikely(tx_bug)) {
5599			tg3_tx_recover(tp);
5600			return;
5601		}
5602	}
5603
5604	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
5605
5606	tnapi->tx_cons = sw_idx;
5607
5608	/* Need to make the tx_cons update visible to tg3_start_xmit()
5609	 * before checking for netif_queue_stopped().  Without the
5610	 * memory barrier, there is a small possibility that tg3_start_xmit()
5611	 * will miss it and cause the queue to be stopped forever.
5612	 */
5613	smp_mb();
5614
5615	if (unlikely(netif_tx_queue_stopped(txq) &&
5616		     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5617		__netif_tx_lock(txq, smp_processor_id());
5618		if (netif_tx_queue_stopped(txq) &&
5619		    (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5620			netif_tx_wake_queue(txq);
5621		__netif_tx_unlock(txq);
5622	}
5623}
5624
5625static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5626{
5627	if (!ri->data)
5628		return;
5629
5630	pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5631			 map_sz, PCI_DMA_FROMDEVICE);
5632	kfree(ri->data);
5633	ri->data = NULL;
5634}
5635
5636/* Returns size of skb allocated or < 0 on error.
5637 *
5638 * We only need to fill in the address because the other members
5639 * of the RX descriptor are invariant, see tg3_init_rings.
5640 *
5641 * Note the purposeful assymetry of cpu vs. chip accesses.  For
5642 * posting buffers we only dirty the first cache line of the RX
5643 * descriptor (containing the address).  Whereas for the RX status
5644 * buffers the cpu only reads the last cacheline of the RX descriptor
5645 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5646 */
5647static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5648			    u32 opaque_key, u32 dest_idx_unmasked)
5649{
5650	struct tg3_rx_buffer_desc *desc;
5651	struct ring_info *map;
5652	u8 *data;
5653	dma_addr_t mapping;
5654	int skb_size, data_size, dest_idx;
5655
5656	switch (opaque_key) {
5657	case RXD_OPAQUE_RING_STD:
5658		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5659		desc = &tpr->rx_std[dest_idx];
5660		map = &tpr->rx_std_buffers[dest_idx];
5661		data_size = tp->rx_pkt_map_sz;
5662		break;
5663
5664	case RXD_OPAQUE_RING_JUMBO:
5665		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5666		desc = &tpr->rx_jmb[dest_idx].std;
5667		map = &tpr->rx_jmb_buffers[dest_idx];
5668		data_size = TG3_RX_JMB_MAP_SZ;
5669		break;
5670
5671	default:
5672		return -EINVAL;
5673	}
5674
5675	/* Do not overwrite any of the map or rp information
5676	 * until we are sure we can commit to a new buffer.
5677	 *
5678	 * Callers depend upon this behavior and assume that
5679	 * we leave everything unchanged if we fail.
5680	 */
5681	skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5682		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5683	data = kmalloc(skb_size, GFP_ATOMIC);
5684	if (!data)
5685		return -ENOMEM;
5686
5687	mapping = pci_map_single(tp->pdev,
5688				 data + TG3_RX_OFFSET(tp),
5689				 data_size,
5690				 PCI_DMA_FROMDEVICE);
5691	if (pci_dma_mapping_error(tp->pdev, mapping)) {
5692		kfree(data);
5693		return -EIO;
5694	}
5695
5696	map->data = data;
5697	dma_unmap_addr_set(map, mapping, mapping);
5698
5699	desc->addr_hi = ((u64)mapping >> 32);
5700	desc->addr_lo = ((u64)mapping & 0xffffffff);
5701
5702	return data_size;
5703}
5704
5705/* We only need to move over in the address because the other
5706 * members of the RX descriptor are invariant.  See notes above
5707 * tg3_alloc_rx_data for full details.
5708 */
5709static void tg3_recycle_rx(struct tg3_napi *tnapi,
5710			   struct tg3_rx_prodring_set *dpr,
5711			   u32 opaque_key, int src_idx,
5712			   u32 dest_idx_unmasked)
5713{
5714	struct tg3 *tp = tnapi->tp;
5715	struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5716	struct ring_info *src_map, *dest_map;
5717	struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5718	int dest_idx;
5719
5720	switch (opaque_key) {
5721	case RXD_OPAQUE_RING_STD:
5722		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5723		dest_desc = &dpr->rx_std[dest_idx];
5724		dest_map = &dpr->rx_std_buffers[dest_idx];
5725		src_desc = &spr->rx_std[src_idx];
5726		src_map = &spr->rx_std_buffers[src_idx];
5727		break;
5728
5729	case RXD_OPAQUE_RING_JUMBO:
5730		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5731		dest_desc = &dpr->rx_jmb[dest_idx].std;
5732		dest_map = &dpr->rx_jmb_buffers[dest_idx];
5733		src_desc = &spr->rx_jmb[src_idx].std;
5734		src_map = &spr->rx_jmb_buffers[src_idx];
5735		break;
5736
5737	default:
5738		return;
5739	}
5740
5741	dest_map->data = src_map->data;
5742	dma_unmap_addr_set(dest_map, mapping,
5743			   dma_unmap_addr(src_map, mapping));
5744	dest_desc->addr_hi = src_desc->addr_hi;
5745	dest_desc->addr_lo = src_desc->addr_lo;
5746
5747	/* Ensure that the update to the skb happens after the physical
5748	 * addresses have been transferred to the new BD location.
5749	 */
5750	smp_wmb();
5751
5752	src_map->data = NULL;
5753}
5754
5755/* The RX ring scheme is composed of multiple rings which post fresh
5756 * buffers to the chip, and one special ring the chip uses to report
5757 * status back to the host.
5758 *
5759 * The special ring reports the status of received packets to the
5760 * host.  The chip does not write into the original descriptor the
5761 * RX buffer was obtained from.  The chip simply takes the original
5762 * descriptor as provided by the host, updates the status and length
5763 * field, then writes this into the next status ring entry.
5764 *
5765 * Each ring the host uses to post buffers to the chip is described
5766 * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5767 * it is first placed into the on-chip ram.  When the packet's length
5768 * is known, it walks down the TG3_BDINFO entries to select the ring.
5769 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5770 * which is within the range of the new packet's length is chosen.
5771 *
5772 * The "separate ring for rx status" scheme may sound queer, but it makes
5773 * sense from a cache coherency perspective.  If only the host writes
5774 * to the buffer post rings, and only the chip writes to the rx status
5775 * rings, then cache lines never move beyond shared-modified state.
5776 * If both the host and chip were to write into the same ring, cache line
5777 * eviction could occur since both entities want it in an exclusive state.
5778 */
5779static int tg3_rx(struct tg3_napi *tnapi, int budget)
5780{
5781	struct tg3 *tp = tnapi->tp;
5782	u32 work_mask, rx_std_posted = 0;
5783	u32 std_prod_idx, jmb_prod_idx;
5784	u32 sw_idx = tnapi->rx_rcb_ptr;
5785	u16 hw_idx;
5786	int received;
5787	struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5788
5789	hw_idx = *(tnapi->rx_rcb_prod_idx);
5790	/*
5791	 * We need to order the read of hw_idx and the read of
5792	 * the opaque cookie.
5793	 */
5794	rmb();
5795	work_mask = 0;
5796	received = 0;
5797	std_prod_idx = tpr->rx_std_prod_idx;
5798	jmb_prod_idx = tpr->rx_jmb_prod_idx;
5799	while (sw_idx != hw_idx && budget > 0) {
5800		struct ring_info *ri;
5801		struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5802		unsigned int len;
5803		struct sk_buff *skb;
5804		dma_addr_t dma_addr;
5805		u32 opaque_key, desc_idx, *post_ptr;
5806		u8 *data;
5807
5808		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5809		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5810		if (opaque_key == RXD_OPAQUE_RING_STD) {
5811			ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5812			dma_addr = dma_unmap_addr(ri, mapping);
5813			data = ri->data;
5814			post_ptr = &std_prod_idx;
5815			rx_std_posted++;
5816		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5817			ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5818			dma_addr = dma_unmap_addr(ri, mapping);
5819			data = ri->data;
5820			post_ptr = &jmb_prod_idx;
5821		} else
5822			goto next_pkt_nopost;
5823
5824		work_mask |= opaque_key;
5825
5826		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5827		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5828		drop_it:
5829			tg3_recycle_rx(tnapi, tpr, opaque_key,
5830				       desc_idx, *post_ptr);
5831		drop_it_no_recycle:
5832			/* Other statistics kept track of by card. */
5833			tp->rx_dropped++;
5834			goto next_pkt;
5835		}
5836
5837		prefetch(data + TG3_RX_OFFSET(tp));
5838		len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5839		      ETH_FCS_LEN;
5840
5841		if (len > TG3_RX_COPY_THRESH(tp)) {
5842			int skb_size;
5843
5844			skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5845						    *post_ptr);
5846			if (skb_size < 0)
5847				goto drop_it;
5848
5849			pci_unmap_single(tp->pdev, dma_addr, skb_size,
5850					 PCI_DMA_FROMDEVICE);
5851
5852			skb = build_skb(data);
5853			if (!skb) {
5854				kfree(data);
5855				goto drop_it_no_recycle;
5856			}
5857			skb_reserve(skb, TG3_RX_OFFSET(tp));
5858			/* Ensure that the update to the data happens
5859			 * after the usage of the old DMA mapping.
5860			 */
5861			smp_wmb();
5862
5863			ri->data = NULL;
5864
5865		} else {
5866			tg3_recycle_rx(tnapi, tpr, opaque_key,
5867				       desc_idx, *post_ptr);
5868
5869			skb = netdev_alloc_skb(tp->dev,
5870					       len + TG3_RAW_IP_ALIGN);
5871			if (skb == NULL)
5872				goto drop_it_no_recycle;
5873
5874			skb_reserve(skb, TG3_RAW_IP_ALIGN);
5875			pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5876			memcpy(skb->data,
5877			       data + TG3_RX_OFFSET(tp),
5878			       len);
5879			pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5880		}
5881
5882		skb_put(skb, len);
5883		if ((tp->dev->features & NETIF_F_RXCSUM) &&
5884		    (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5885		    (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5886		      >> RXD_TCPCSUM_SHIFT) == 0xffff))
5887			skb->ip_summed = CHECKSUM_UNNECESSARY;
5888		else
5889			skb_checksum_none_assert(skb);
5890
5891		skb->protocol = eth_type_trans(skb, tp->dev);
5892
5893		if (len > (tp->dev->mtu + ETH_HLEN) &&
5894		    skb->protocol != htons(ETH_P_8021Q)) {
5895			dev_kfree_skb(skb);
5896			goto drop_it_no_recycle;
5897		}
5898
5899		if (desc->type_flags & RXD_FLAG_VLAN &&
5900		    !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5901			__vlan_hwaccel_put_tag(skb,
5902					       desc->err_vlan & RXD_VLAN_MASK);
5903
5904		napi_gro_receive(&tnapi->napi, skb);
5905
5906		received++;
5907		budget--;
5908
5909next_pkt:
5910		(*post_ptr)++;
5911
5912		if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5913			tpr->rx_std_prod_idx = std_prod_idx &
5914					       tp->rx_std_ring_mask;
5915			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5916				     tpr->rx_std_prod_idx);
5917			work_mask &= ~RXD_OPAQUE_RING_STD;
5918			rx_std_posted = 0;
5919		}
5920next_pkt_nopost:
5921		sw_idx++;
5922		sw_idx &= tp->rx_ret_ring_mask;
5923
5924		/* Refresh hw_idx to see if there is new work */
5925		if (sw_idx == hw_idx) {
5926			hw_idx = *(tnapi->rx_rcb_prod_idx);
5927			rmb();
5928		}
5929	}
5930
5931	/* ACK the status ring. */
5932	tnapi->rx_rcb_ptr = sw_idx;
5933	tw32_rx_mbox(tnapi->consmbox, sw_idx);
5934
5935	/* Refill RX ring(s). */
5936	if (!tg3_flag(tp, ENABLE_RSS)) {
5937		/* Sync BD data before updating mailbox */
5938		wmb();
5939
5940		if (work_mask & RXD_OPAQUE_RING_STD) {
5941			tpr->rx_std_prod_idx = std_prod_idx &
5942					       tp->rx_std_ring_mask;
5943			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5944				     tpr->rx_std_prod_idx);
5945		}
5946		if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5947			tpr->rx_jmb_prod_idx = jmb_prod_idx &
5948					       tp->rx_jmb_ring_mask;
5949			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5950				     tpr->rx_jmb_prod_idx);
5951		}
5952		mmiowb();
5953	} else if (work_mask) {
5954		/* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5955		 * updated before the producer indices can be updated.
5956		 */
5957		smp_wmb();
5958
5959		tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5960		tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5961
5962		if (tnapi != &tp->napi[1]) {
5963			tp->rx_refill = true;
5964			napi_schedule(&tp->napi[1].napi);
5965		}
5966	}
5967
5968	return received;
5969}
5970
5971static void tg3_poll_link(struct tg3 *tp)
5972{
5973	/* handle link change and other phy events */
5974	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5975		struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5976
5977		if (sblk->status & SD_STATUS_LINK_CHG) {
5978			sblk->status = SD_STATUS_UPDATED |
5979				       (sblk->status & ~SD_STATUS_LINK_CHG);
5980			spin_lock(&tp->lock);
5981			if (tg3_flag(tp, USE_PHYLIB)) {
5982				tw32_f(MAC_STATUS,
5983				     (MAC_STATUS_SYNC_CHANGED |
5984				      MAC_STATUS_CFG_CHANGED |
5985				      MAC_STATUS_MI_COMPLETION |
5986				      MAC_STATUS_LNKSTATE_CHANGED));
5987				udelay(40);
5988			} else
5989				tg3_setup_phy(tp, 0);
5990			spin_unlock(&tp->lock);
5991		}
5992	}
5993}
5994
5995static int tg3_rx_prodring_xfer(struct tg3 *tp,
5996				struct tg3_rx_prodring_set *dpr,
5997				struct tg3_rx_prodring_set *spr)
5998{
5999	u32 si, di, cpycnt, src_prod_idx;
6000	int i, err = 0;
6001
6002	while (1) {
6003		src_prod_idx = spr->rx_std_prod_idx;
6004
6005		/* Make sure updates to the rx_std_buffers[] entries and the
6006		 * standard producer index are seen in the correct order.
6007		 */
6008		smp_rmb();
6009
6010		if (spr->rx_std_cons_idx == src_prod_idx)
6011			break;
6012
6013		if (spr->rx_std_cons_idx < src_prod_idx)
6014			cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6015		else
6016			cpycnt = tp->rx_std_ring_mask + 1 -
6017				 spr->rx_std_cons_idx;
6018
6019		cpycnt = min(cpycnt,
6020			     tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6021
6022		si = spr->rx_std_cons_idx;
6023		di = dpr->rx_std_prod_idx;
6024
6025		for (i = di; i < di + cpycnt; i++) {
6026			if (dpr->rx_std_buffers[i].data) {
6027				cpycnt = i - di;
6028				err = -ENOSPC;
6029				break;
6030			}
6031		}
6032
6033		if (!cpycnt)
6034			break;
6035
6036		/* Ensure that updates to the rx_std_buffers ring and the
6037		 * shadowed hardware producer ring from tg3_recycle_skb() are
6038		 * ordered correctly WRT the skb check above.
6039		 */
6040		smp_rmb();
6041
6042		memcpy(&dpr->rx_std_buffers[di],
6043		       &spr->rx_std_buffers[si],
6044		       cpycnt * sizeof(struct ring_info));
6045
6046		for (i = 0; i < cpycnt; i++, di++, si++) {
6047			struct tg3_rx_buffer_desc *sbd, *dbd;
6048			sbd = &spr->rx_std[si];
6049			dbd = &dpr->rx_std[di];
6050			dbd->addr_hi = sbd->addr_hi;
6051			dbd->addr_lo = sbd->addr_lo;
6052		}
6053
6054		spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6055				       tp->rx_std_ring_mask;
6056		dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6057				       tp->rx_std_ring_mask;
6058	}
6059
6060	while (1) {
6061		src_prod_idx = spr->rx_jmb_prod_idx;
6062
6063		/* Make sure updates to the rx_jmb_buffers[] entries and
6064		 * the jumbo producer index are seen in the correct order.
6065		 */
6066		smp_rmb();
6067
6068		if (spr->rx_jmb_cons_idx == src_prod_idx)
6069			break;
6070
6071		if (spr->rx_jmb_cons_idx < src_prod_idx)
6072			cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6073		else
6074			cpycnt = tp->rx_jmb_ring_mask + 1 -
6075				 spr->rx_jmb_cons_idx;
6076
6077		cpycnt = min(cpycnt,
6078			     tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6079
6080		si = spr->rx_jmb_cons_idx;
6081		di = dpr->rx_jmb_prod_idx;
6082
6083		for (i = di; i < di + cpycnt; i++) {
6084			if (dpr->rx_jmb_buffers[i].data) {
6085				cpycnt = i - di;
6086				err = -ENOSPC;
6087				break;
6088			}
6089		}
6090
6091		if (!cpycnt)
6092			break;
6093
6094		/* Ensure that updates to the rx_jmb_buffers ring and the
6095		 * shadowed hardware producer ring from tg3_recycle_skb() are
6096		 * ordered correctly WRT the skb check above.
6097		 */
6098		smp_rmb();
6099
6100		memcpy(&dpr->rx_jmb_buffers[di],
6101		       &spr->rx_jmb_buffers[si],
6102		       cpycnt * sizeof(struct ring_info));
6103
6104		for (i = 0; i < cpycnt; i++, di++, si++) {
6105			struct tg3_rx_buffer_desc *sbd, *dbd;
6106			sbd = &spr->rx_jmb[si].std;
6107			dbd = &dpr->rx_jmb[di].std;
6108			dbd->addr_hi = sbd->addr_hi;
6109			dbd->addr_lo = sbd->addr_lo;
6110		}
6111
6112		spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6113				       tp->rx_jmb_ring_mask;
6114		dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6115				       tp->rx_jmb_ring_mask;
6116	}
6117
6118	return err;
6119}
6120
6121static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6122{
6123	struct tg3 *tp = tnapi->tp;
6124
6125	/* run TX completion thread */
6126	if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6127		tg3_tx(tnapi);
6128		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6129			return work_done;
6130	}
6131
6132	if (!tnapi->rx_rcb_prod_idx)
6133		return work_done;
6134
6135	/* run RX thread, within the bounds set by NAPI.
6136	 * All RX "locking" is done by ensuring outside
6137	 * code synchronizes with tg3->napi.poll()
6138	 */
6139	if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6140		work_done += tg3_rx(tnapi, budget - work_done);
6141
6142	if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6143		struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6144		int i, err = 0;
6145		u32 std_prod_idx = dpr->rx_std_prod_idx;
6146		u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6147
6148		tp->rx_refill = false;
6149		for (i = 1; i < tp->irq_cnt; i++)
6150			err |= tg3_rx_prodring_xfer(tp, dpr,
6151						    &tp->napi[i].prodring);
6152
6153		wmb();
6154
6155		if (std_prod_idx != dpr->rx_std_prod_idx)
6156			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6157				     dpr->rx_std_prod_idx);
6158
6159		if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6160			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6161				     dpr->rx_jmb_prod_idx);
6162
6163		mmiowb();
6164
6165		if (err)
6166			tw32_f(HOSTCC_MODE, tp->coal_now);
6167	}
6168
6169	return work_done;
6170}
6171
6172static inline void tg3_reset_task_schedule(struct tg3 *tp)
6173{
6174	if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6175		schedule_work(&tp->reset_task);
6176}
6177
6178static inline void tg3_reset_task_cancel(struct tg3 *tp)
6179{
6180	cancel_work_sync(&tp->reset_task);
6181	tg3_flag_clear(tp, RESET_TASK_PENDING);
6182	tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6183}
6184
6185static int tg3_poll_msix(struct napi_struct *napi, int budget)
6186{
6187	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6188	struct tg3 *tp = tnapi->tp;
6189	int work_done = 0;
6190	struct tg3_hw_status *sblk = tnapi->hw_status;
6191
6192	while (1) {
6193		work_done = tg3_poll_work(tnapi, work_done, budget);
6194
6195		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6196			goto tx_recovery;
6197
6198		if (unlikely(work_done >= budget))
6199			break;
6200
6201		/* tp->last_tag is used in tg3_int_reenable() below
6202		 * to tell the hw how much work has been processed,
6203		 * so we must read it before checking for more work.
6204		 */
6205		tnapi->last_tag = sblk->status_tag;
6206		tnapi->last_irq_tag = tnapi->last_tag;
6207		rmb();
6208
6209		/* check for RX/TX work to do */
6210		if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6211			   *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6212
6213			/* This test here is not race free, but will reduce
6214			 * the number of interrupts by looping again.
6215			 */
6216			if (tnapi == &tp->napi[1] && tp->rx_refill)
6217				continue;
6218
6219			napi_complete(napi);
6220			/* Reenable interrupts. */
6221			tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6222
6223			/* This test here is synchronized by napi_schedule()
6224			 * and napi_complete() to close the race condition.
6225			 */
6226			if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6227				tw32(HOSTCC_MODE, tp->coalesce_mode |
6228						  HOSTCC_MODE_ENABLE |
6229						  tnapi->coal_now);
6230			}
6231			mmiowb();
6232			break;
6233		}
6234	}
6235
6236	return work_done;
6237
6238tx_recovery:
6239	/* work_done is guaranteed to be less than budget. */
6240	napi_complete(napi);
6241	tg3_reset_task_schedule(tp);
6242	return work_done;
6243}
6244
6245static void tg3_process_error(struct tg3 *tp)
6246{
6247	u32 val;
6248	bool real_error = false;
6249
6250	if (tg3_flag(tp, ERROR_PROCESSED))
6251		return;
6252
6253	/* Check Flow Attention register */
6254	val = tr32(HOSTCC_FLOW_ATTN);
6255	if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6256		netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6257		real_error = true;
6258	}
6259
6260	if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6261		netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6262		real_error = true;
6263	}
6264
6265	if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6266		netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6267		real_error = true;
6268	}
6269
6270	if (!real_error)
6271		return;
6272
6273	tg3_dump_state(tp);
6274
6275	tg3_flag_set(tp, ERROR_PROCESSED);
6276	tg3_reset_task_schedule(tp);
6277}
6278
6279static int tg3_poll(struct napi_struct *napi, int budget)
6280{
6281	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6282	struct tg3 *tp = tnapi->tp;
6283	int work_done = 0;
6284	struct tg3_hw_status *sblk = tnapi->hw_status;
6285
6286	while (1) {
6287		if (sblk->status & SD_STATUS_ERROR)
6288			tg3_process_error(tp);
6289
6290		tg3_poll_link(tp);
6291
6292		work_done = tg3_poll_work(tnapi, work_done, budget);
6293
6294		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6295			goto tx_recovery;
6296
6297		if (unlikely(work_done >= budget))
6298			break;
6299
6300		if (tg3_flag(tp, TAGGED_STATUS)) {
6301			/* tp->last_tag is used in tg3_int_reenable() below
6302			 * to tell the hw how much work has been processed,
6303			 * so we must read it before checking for more work.
6304			 */
6305			tnapi->last_tag = sblk->status_tag;
6306			tnapi->last_irq_tag = tnapi->last_tag;
6307			rmb();
6308		} else
6309			sblk->status &= ~SD_STATUS_UPDATED;
6310
6311		if (likely(!tg3_has_work(tnapi))) {
6312			napi_complete(napi);
6313			tg3_int_reenable(tnapi);
6314			break;
6315		}
6316	}
6317
6318	return work_done;
6319
6320tx_recovery:
6321	/* work_done is guaranteed to be less than budget. */
6322	napi_complete(napi);
6323	tg3_reset_task_schedule(tp);
6324	return work_done;
6325}
6326
6327static void tg3_napi_disable(struct tg3 *tp)
6328{
6329	int i;
6330
6331	for (i = tp->irq_cnt - 1; i >= 0; i--)
6332		napi_disable(&tp->napi[i].napi);
6333}
6334
6335static void tg3_napi_enable(struct tg3 *tp)
6336{
6337	int i;
6338
6339	for (i = 0; i < tp->irq_cnt; i++)
6340		napi_enable(&tp->napi[i].napi);
6341}
6342
6343static void tg3_napi_init(struct tg3 *tp)
6344{
6345	int i;
6346
6347	netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6348	for (i = 1; i < tp->irq_cnt; i++)
6349		netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6350}
6351
6352static void tg3_napi_fini(struct tg3 *tp)
6353{
6354	int i;
6355
6356	for (i = 0; i < tp->irq_cnt; i++)
6357		netif_napi_del(&tp->napi[i].napi);
6358}
6359
6360static inline void tg3_netif_stop(struct tg3 *tp)
6361{
6362	tp->dev->trans_start = jiffies;	/* prevent tx timeout */
6363	tg3_napi_disable(tp);
6364	netif_tx_disable(tp->dev);
6365}
6366
6367static inline void tg3_netif_start(struct tg3 *tp)
6368{
6369	/* NOTE: unconditional netif_tx_wake_all_queues is only
6370	 * appropriate so long as all callers are assured to
6371	 * have free tx slots (such as after tg3_init_hw)
6372	 */
6373	netif_tx_wake_all_queues(tp->dev);
6374
6375	tg3_napi_enable(tp);
6376	tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6377	tg3_enable_ints(tp);
6378}
6379
6380static void tg3_irq_quiesce(struct tg3 *tp)
6381{
6382	int i;
6383
6384	BUG_ON(tp->irq_sync);
6385
6386	tp->irq_sync = 1;
6387	smp_mb();
6388
6389	for (i = 0; i < tp->irq_cnt; i++)
6390		synchronize_irq(tp->napi[i].irq_vec);
6391}
6392
6393/* Fully shutdown all tg3 driver activity elsewhere in the system.
6394 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6395 * with as well.  Most of the time, this is not necessary except when
6396 * shutting down the device.
6397 */
6398static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6399{
6400	spin_lock_bh(&tp->lock);
6401	if (irq_sync)
6402		tg3_irq_quiesce(tp);
6403}
6404
6405static inline void tg3_full_unlock(struct tg3 *tp)
6406{
6407	spin_unlock_bh(&tp->lock);
6408}
6409
6410/* One-shot MSI handler - Chip automatically disables interrupt
6411 * after sending MSI so driver doesn't have to do it.
6412 */
6413static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6414{
6415	struct tg3_napi *tnapi = dev_id;
6416	struct tg3 *tp = tnapi->tp;
6417
6418	prefetch(tnapi->hw_status);
6419	if (tnapi->rx_rcb)
6420		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6421
6422	if (likely(!tg3_irq_sync(tp)))
6423		napi_schedule(&tnapi->napi);
6424
6425	return IRQ_HANDLED;
6426}
6427
6428/* MSI ISR - No need to check for interrupt sharing and no need to
6429 * flush status block and interrupt mailbox. PCI ordering rules
6430 * guarantee that MSI will arrive after the status block.
6431 */
6432static irqreturn_t tg3_msi(int irq, void *dev_id)
6433{
6434	struct tg3_napi *tnapi = dev_id;
6435	struct tg3 *tp = tnapi->tp;
6436
6437	prefetch(tnapi->hw_status);
6438	if (tnapi->rx_rcb)
6439		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6440	/*
6441	 * Writing any value to intr-mbox-0 clears PCI INTA# and
6442	 * chip-internal interrupt pending events.
6443	 * Writing non-zero to intr-mbox-0 additional tells the
6444	 * NIC to stop sending us irqs, engaging "in-intr-handler"
6445	 * event coalescing.
6446	 */
6447	tw32_mailbox(tnapi->int_mbox, 0x00000001);
6448	if (likely(!tg3_irq_sync(tp)))
6449		napi_schedule(&tnapi->napi);
6450
6451	return IRQ_RETVAL(1);
6452}
6453
6454static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6455{
6456	struct tg3_napi *tnapi = dev_id;
6457	struct tg3 *tp = tnapi->tp;
6458	struct tg3_hw_status *sblk = tnapi->hw_status;
6459	unsigned int handled = 1;
6460
6461	/* In INTx mode, it is possible for the interrupt to arrive at
6462	 * the CPU before the status block posted prior to the interrupt.
6463	 * Reading the PCI State register will confirm whether the
6464	 * interrupt is ours and will flush the status block.
6465	 */
6466	if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6467		if (tg3_flag(tp, CHIP_RESETTING) ||
6468		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6469			handled = 0;
6470			goto out;
6471		}
6472	}
6473
6474	/*
6475	 * Writing any value to intr-mbox-0 clears PCI INTA# and
6476	 * chip-internal interrupt pending events.
6477	 * Writing non-zero to intr-mbox-0 additional tells the
6478	 * NIC to stop sending us irqs, engaging "in-intr-handler"
6479	 * event coalescing.
6480	 *
6481	 * Flush the mailbox to de-assert the IRQ immediately to prevent
6482	 * spurious interrupts.  The flush impacts performance but
6483	 * excessive spurious interrupts can be worse in some cases.
6484	 */
6485	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6486	if (tg3_irq_sync(tp))
6487		goto out;
6488	sblk->status &= ~SD_STATUS_UPDATED;
6489	if (likely(tg3_has_work(tnapi))) {
6490		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6491		napi_schedule(&tnapi->napi);
6492	} else {
6493		/* No work, shared interrupt perhaps?  re-enable
6494		 * interrupts, and flush that PCI write
6495		 */
6496		tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6497			       0x00000000);
6498	}
6499out:
6500	return IRQ_RETVAL(handled);
6501}
6502
6503static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6504{
6505	struct tg3_napi *tnapi = dev_id;
6506	struct tg3 *tp = tnapi->tp;
6507	struct tg3_hw_status *sblk = tnapi->hw_status;
6508	unsigned int handled = 1;
6509
6510	/* In INTx mode, it is possible for the interrupt to arrive at
6511	 * the CPU before the status block posted prior to the interrupt.
6512	 * Reading the PCI State register will confirm whether the
6513	 * interrupt is ours and will flush the status block.
6514	 */
6515	if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6516		if (tg3_flag(tp, CHIP_RESETTING) ||
6517		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6518			handled = 0;
6519			goto out;
6520		}
6521	}
6522
6523	/*
6524	 * writing any value to intr-mbox-0 clears PCI INTA# and
6525	 * chip-internal interrupt pending events.
6526	 * writing non-zero to intr-mbox-0 additional tells the
6527	 * NIC to stop sending us irqs, engaging "in-intr-handler"
6528	 * event coalescing.
6529	 *
6530	 * Flush the mailbox to de-assert the IRQ immediately to prevent
6531	 * spurious interrupts.  The flush impacts performance but
6532	 * excessive spurious interrupts can be worse in some cases.
6533	 */
6534	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6535
6536	/*
6537	 * In a shared interrupt configuration, sometimes other devices'
6538	 * interrupts will scream.  We record the current status tag here
6539	 * so that the above check can report that the screaming interrupts
6540	 * are unhandled.  Eventually they will be silenced.
6541	 */
6542	tnapi->last_irq_tag = sblk->status_tag;
6543
6544	if (tg3_irq_sync(tp))
6545		goto out;
6546
6547	prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6548
6549	napi_schedule(&tnapi->napi);
6550
6551out:
6552	return IRQ_RETVAL(handled);
6553}
6554
6555/* ISR for interrupt test */
6556static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6557{
6558	struct tg3_napi *tnapi = dev_id;
6559	struct tg3 *tp = tnapi->tp;
6560	struct tg3_hw_status *sblk = tnapi->hw_status;
6561
6562	if ((sblk->status & SD_STATUS_UPDATED) ||
6563	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6564		tg3_disable_ints(tp);
6565		return IRQ_RETVAL(1);
6566	}
6567	return IRQ_RETVAL(0);
6568}
6569
6570#ifdef CONFIG_NET_POLL_CONTROLLER
6571static void tg3_poll_controller(struct net_device *dev)
6572{
6573	int i;
6574	struct tg3 *tp = netdev_priv(dev);
6575
6576	for (i = 0; i < tp->irq_cnt; i++)
6577		tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6578}
6579#endif
6580
6581static void tg3_tx_timeout(struct net_device *dev)
6582{
6583	struct tg3 *tp = netdev_priv(dev);
6584
6585	if (netif_msg_tx_err(tp)) {
6586		netdev_err(dev, "transmit timed out, resetting\n");
6587		tg3_dump_state(tp);
6588	}
6589
6590	tg3_reset_task_schedule(tp);
6591}
6592
6593/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6594static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6595{
6596	u32 base = (u32) mapping & 0xffffffff;
6597
6598	return (base > 0xffffdcc0) && (base + len + 8 < base);
6599}
6600
6601/* Test for DMA addresses > 40-bit */
6602static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6603					  int len)
6604{
6605#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6606	if (tg3_flag(tp, 40BIT_DMA_BUG))
6607		return ((u64) mapping + len) > DMA_BIT_MASK(40);
6608	return 0;
6609#else
6610	return 0;
6611#endif
6612}
6613
6614static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6615				 dma_addr_t mapping, u32 len, u32 flags,
6616				 u32 mss, u32 vlan)
6617{
6618	txbd->addr_hi = ((u64) mapping >> 32);
6619	txbd->addr_lo = ((u64) mapping & 0xffffffff);
6620	txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6621	txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6622}
6623
6624static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6625			    dma_addr_t map, u32 len, u32 flags,
6626			    u32 mss, u32 vlan)
6627{
6628	struct tg3 *tp = tnapi->tp;
6629	bool hwbug = false;
6630
6631	if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6632		hwbug = true;
6633
6634	if (tg3_4g_overflow_test(map, len))
6635		hwbug = true;
6636
6637	if (tg3_40bit_overflow_test(tp, map, len))
6638		hwbug = true;
6639
6640	if (tp->dma_limit) {
6641		u32 prvidx = *entry;
6642		u32 tmp_flag = flags & ~TXD_FLAG_END;
6643		while (len > tp->dma_limit && *budget) {
6644			u32 frag_len = tp->dma_limit;
6645			len -= tp->dma_limit;
6646
6647			/* Avoid the 8byte DMA problem */
6648			if (len <= 8) {
6649				len += tp->dma_limit / 2;
6650				frag_len = tp->dma_limit / 2;
6651			}
6652
6653			tnapi->tx_buffers[*entry].fragmented = true;
6654
6655			tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6656				      frag_len, tmp_flag, mss, vlan);
6657			*budget -= 1;
6658			prvidx = *entry;
6659			*entry = NEXT_TX(*entry);
6660
6661			map += frag_len;
6662		}
6663
6664		if (len) {
6665			if (*budget) {
6666				tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6667					      len, flags, mss, vlan);
6668				*budget -= 1;
6669				*entry = NEXT_TX(*entry);
6670			} else {
6671				hwbug = true;
6672				tnapi->tx_buffers[prvidx].fragmented = false;
6673			}
6674		}
6675	} else {
6676		tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6677			      len, flags, mss, vlan);
6678		*entry = NEXT_TX(*entry);
6679	}
6680
6681	return hwbug;
6682}
6683
6684static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6685{
6686	int i;
6687	struct sk_buff *skb;
6688	struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6689
6690	skb = txb->skb;
6691	txb->skb = NULL;
6692
6693	pci_unmap_single(tnapi->tp->pdev,
6694			 dma_unmap_addr(txb, mapping),
6695			 skb_headlen(skb),
6696			 PCI_DMA_TODEVICE);
6697
6698	while (txb->fragmented) {
6699		txb->fragmented = false;
6700		entry = NEXT_TX(entry);
6701		txb = &tnapi->tx_buffers[entry];
6702	}
6703
6704	for (i = 0; i <= last; i++) {
6705		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6706
6707		entry = NEXT_TX(entry);
6708		txb = &tnapi->tx_buffers[entry];
6709
6710		pci_unmap_page(tnapi->tp->pdev,
6711			       dma_unmap_addr(txb, mapping),
6712			       skb_frag_size(frag), PCI_DMA_TODEVICE);
6713
6714		while (txb->fragmented) {
6715			txb->fragmented = false;
6716			entry = NEXT_TX(entry);
6717			txb = &tnapi->tx_buffers[entry];
6718		}
6719	}
6720}
6721
6722/* Workaround 4GB and 40-bit hardware DMA bugs. */
6723static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6724				       struct sk_buff **pskb,
6725				       u32 *entry, u32 *budget,
6726				       u32 base_flags, u32 mss, u32 vlan)
6727{
6728	struct tg3 *tp = tnapi->tp;
6729	struct sk_buff *new_skb, *skb = *pskb;
6730	dma_addr_t new_addr = 0;
6731	int ret = 0;
6732
6733	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6734		new_skb = skb_copy(skb, GFP_ATOMIC);
6735	else {
6736		int more_headroom = 4 - ((unsigned long)skb->data & 3);
6737
6738		new_skb = skb_copy_expand(skb,
6739					  skb_headroom(skb) + more_headroom,
6740					  skb_tailroom(skb), GFP_ATOMIC);
6741	}
6742
6743	if (!new_skb) {
6744		ret = -1;
6745	} else {
6746		/* New SKB is guaranteed to be linear. */
6747		new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6748					  PCI_DMA_TODEVICE);
6749		/* Make sure the mapping succeeded */
6750		if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6751			dev_kfree_skb(new_skb);
6752			ret = -1;
6753		} else {
6754			u32 save_entry = *entry;
6755
6756			base_flags |= TXD_FLAG_END;
6757
6758			tnapi->tx_buffers[*entry].skb = new_skb;
6759			dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6760					   mapping, new_addr);
6761
6762			if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6763					    new_skb->len, base_flags,
6764					    mss, vlan)) {
6765				tg3_tx_skb_unmap(tnapi, save_entry, -1);
6766				dev_kfree_skb(new_skb);
6767				ret = -1;
6768			}
6769		}
6770	}
6771
6772	dev_kfree_skb(skb);
6773	*pskb = new_skb;
6774	return ret;
6775}
6776
6777static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6778
6779/* Use GSO to workaround a rare TSO bug that may be triggered when the
6780 * TSO header is greater than 80 bytes.
6781 */
6782static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6783{
6784	struct sk_buff *segs, *nskb;
6785	u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6786
6787	/* Estimate the number of fragments in the worst case */
6788	if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6789		netif_stop_queue(tp->dev);
6790
6791		/* netif_tx_stop_queue() must be done before checking
6792		 * checking tx index in tg3_tx_avail() below, because in
6793		 * tg3_tx(), we update tx index before checking for
6794		 * netif_tx_queue_stopped().
6795		 */
6796		smp_mb();
6797		if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6798			return NETDEV_TX_BUSY;
6799
6800		netif_wake_queue(tp->dev);
6801	}
6802
6803	segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6804	if (IS_ERR(segs))
6805		goto tg3_tso_bug_end;
6806
6807	do {
6808		nskb = segs;
6809		segs = segs->next;
6810		nskb->next = NULL;
6811		tg3_start_xmit(nskb, tp->dev);
6812	} while (segs);
6813
6814tg3_tso_bug_end:
6815	dev_kfree_skb(skb);
6816
6817	return NETDEV_TX_OK;
6818}
6819
6820/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6821 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6822 */
6823static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6824{
6825	struct tg3 *tp = netdev_priv(dev);
6826	u32 len, entry, base_flags, mss, vlan = 0;
6827	u32 budget;
6828	int i = -1, would_hit_hwbug;
6829	dma_addr_t mapping;
6830	struct tg3_napi *tnapi;
6831	struct netdev_queue *txq;
6832	unsigned int last;
6833
6834	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6835	tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6836	if (tg3_flag(tp, ENABLE_TSS))
6837		tnapi++;
6838
6839	budget = tg3_tx_avail(tnapi);
6840
6841	/* We are running in BH disabled context with netif_tx_lock
6842	 * and TX reclaim runs via tp->napi.poll inside of a software
6843	 * interrupt.  Furthermore, IRQ processing runs lockless so we have
6844	 * no IRQ context deadlocks to worry about either.  Rejoice!
6845	 */
6846	if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6847		if (!netif_tx_queue_stopped(txq)) {
6848			netif_tx_stop_queue(txq);
6849
6850			/* This is a hard error, log it. */
6851			netdev_err(dev,
6852				   "BUG! Tx Ring full when queue awake!\n");
6853		}
6854		return NETDEV_TX_BUSY;
6855	}
6856
6857	entry = tnapi->tx_prod;
6858	base_flags = 0;
6859	if (skb->ip_summed == CHECKSUM_PARTIAL)
6860		base_flags |= TXD_FLAG_TCPUDP_CSUM;
6861
6862	mss = skb_shinfo(skb)->gso_size;
6863	if (mss) {
6864		struct iphdr *iph;
6865		u32 tcp_opt_len, hdr_len;
6866
6867		if (skb_header_cloned(skb) &&
6868		    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6869			goto drop;
6870
6871		iph = ip_hdr(skb);
6872		tcp_opt_len = tcp_optlen(skb);
6873
6874		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
6875
6876		if (!skb_is_gso_v6(skb)) {
6877			iph->check = 0;
6878			iph->tot_len = htons(mss + hdr_len);
6879		}
6880
6881		if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6882		    tg3_flag(tp, TSO_BUG))
6883			return tg3_tso_bug(tp, skb);
6884
6885		base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6886			       TXD_FLAG_CPU_POST_DMA);
6887
6888		if (tg3_flag(tp, HW_TSO_1) ||
6889		    tg3_flag(tp, HW_TSO_2) ||
6890		    tg3_flag(tp, HW_TSO_3)) {
6891			tcp_hdr(skb)->check = 0;
6892			base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6893		} else
6894			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6895								 iph->daddr, 0,
6896								 IPPROTO_TCP,
6897								 0);
6898
6899		if (tg3_flag(tp, HW_TSO_3)) {
6900			mss |= (hdr_len & 0xc) << 12;
6901			if (hdr_len & 0x10)
6902				base_flags |= 0x00000010;
6903			base_flags |= (hdr_len & 0x3e0) << 5;
6904		} else if (tg3_flag(tp, HW_TSO_2))
6905			mss |= hdr_len << 9;
6906		else if (tg3_flag(tp, HW_TSO_1) ||
6907			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6908			if (tcp_opt_len || iph->ihl > 5) {
6909				int tsflags;
6910
6911				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6912				mss |= (tsflags << 11);
6913			}
6914		} else {
6915			if (tcp_opt_len || iph->ihl > 5) {
6916				int tsflags;
6917
6918				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6919				base_flags |= tsflags << 12;
6920			}
6921		}
6922	}
6923
6924	if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6925	    !mss && skb->len > VLAN_ETH_FRAME_LEN)
6926		base_flags |= TXD_FLAG_JMB_PKT;
6927
6928	if (vlan_tx_tag_present(skb)) {
6929		base_flags |= TXD_FLAG_VLAN;
6930		vlan = vlan_tx_tag_get(skb);
6931	}
6932
6933	len = skb_headlen(skb);
6934
6935	mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6936	if (pci_dma_mapping_error(tp->pdev, mapping))
6937		goto drop;
6938
6939
6940	tnapi->tx_buffers[entry].skb = skb;
6941	dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6942
6943	would_hit_hwbug = 0;
6944
6945	if (tg3_flag(tp, 5701_DMA_BUG))
6946		would_hit_hwbug = 1;
6947
6948	if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6949			  ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6950			    mss, vlan)) {
6951		would_hit_hwbug = 1;
6952	} else if (skb_shinfo(skb)->nr_frags > 0) {
6953		u32 tmp_mss = mss;
6954
6955		if (!tg3_flag(tp, HW_TSO_1) &&
6956		    !tg3_flag(tp, HW_TSO_2) &&
6957		    !tg3_flag(tp, HW_TSO_3))
6958			tmp_mss = 0;
6959
6960		/* Now loop through additional data
6961		 * fragments, and queue them.
6962		 */
6963		last = skb_shinfo(skb)->nr_frags - 1;
6964		for (i = 0; i <= last; i++) {
6965			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6966
6967			len = skb_frag_size(frag);
6968			mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
6969						   len, DMA_TO_DEVICE);
6970
6971			tnapi->tx_buffers[entry].skb = NULL;
6972			dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6973					   mapping);
6974			if (dma_mapping_error(&tp->pdev->dev, mapping))
6975				goto dma_error;
6976
6977			if (!budget ||
6978			    tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6979					    len, base_flags |
6980					    ((i == last) ? TXD_FLAG_END : 0),
6981					    tmp_mss, vlan)) {
6982				would_hit_hwbug = 1;
6983				break;
6984			}
6985		}
6986	}
6987
6988	if (would_hit_hwbug) {
6989		tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6990
6991		/* If the workaround fails due to memory/mapping
6992		 * failure, silently drop this packet.
6993		 */
6994		entry = tnapi->tx_prod;
6995		budget = tg3_tx_avail(tnapi);
6996		if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
6997						base_flags, mss, vlan))
6998			goto drop_nofree;
6999	}
7000
7001	skb_tx_timestamp(skb);
7002	netdev_tx_sent_queue(txq, skb->len);
7003
7004	/* Sync BD data before updating mailbox */
7005	wmb();
7006
7007	/* Packets are ready, update Tx producer idx local and on card. */
7008	tw32_tx_mbox(tnapi->prodmbox, entry);
7009
7010	tnapi->tx_prod = entry;
7011	if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7012		netif_tx_stop_queue(txq);
7013
7014		/* netif_tx_stop_queue() must be done before checking
7015		 * checking tx index in tg3_tx_avail() below, because in
7016		 * tg3_tx(), we update tx index before checking for
7017		 * netif_tx_queue_stopped().
7018		 */
7019		smp_mb();
7020		if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7021			netif_tx_wake_queue(txq);
7022	}
7023
7024	mmiowb();
7025	return NETDEV_TX_OK;
7026
7027dma_error:
7028	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7029	tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7030drop:
7031	dev_kfree_skb(skb);
7032drop_nofree:
7033	tp->tx_dropped++;
7034	return NETDEV_TX_OK;
7035}
7036
7037static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7038{
7039	if (enable) {
7040		tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7041				  MAC_MODE_PORT_MODE_MASK);
7042
7043		tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7044
7045		if (!tg3_flag(tp, 5705_PLUS))
7046			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7047
7048		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7049			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7050		else
7051			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7052	} else {
7053		tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7054
7055		if (tg3_flag(tp, 5705_PLUS) ||
7056		    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7057		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7058			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7059	}
7060
7061	tw32(MAC_MODE, tp->mac_mode);
7062	udelay(40);
7063}
7064
7065static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7066{
7067	u32 val, bmcr, mac_mode, ptest = 0;
7068
7069	tg3_phy_toggle_apd(tp, false);
7070	tg3_phy_toggle_automdix(tp, 0);
7071
7072	if (extlpbk && tg3_phy_set_extloopbk(tp))
7073		return -EIO;
7074
7075	bmcr = BMCR_FULLDPLX;
7076	switch (speed) {
7077	case SPEED_10:
7078		break;
7079	case SPEED_100:
7080		bmcr |= BMCR_SPEED100;
7081		break;
7082	case SPEED_1000:
7083	default:
7084		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7085			speed = SPEED_100;
7086			bmcr |= BMCR_SPEED100;
7087		} else {
7088			speed = SPEED_1000;
7089			bmcr |= BMCR_SPEED1000;
7090		}
7091	}
7092
7093	if (extlpbk) {
7094		if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7095			tg3_readphy(tp, MII_CTRL1000, &val);
7096			val |= CTL1000_AS_MASTER |
7097			       CTL1000_ENABLE_MASTER;
7098			tg3_writephy(tp, MII_CTRL1000, val);
7099		} else {
7100			ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7101				MII_TG3_FET_PTEST_TRIM_2;
7102			tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7103		}
7104	} else
7105		bmcr |= BMCR_LOOPBACK;
7106
7107	tg3_writephy(tp, MII_BMCR, bmcr);
7108
7109	/* The write needs to be flushed for the FETs */
7110	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7111		tg3_readphy(tp, MII_BMCR, &bmcr);
7112
7113	udelay(40);
7114
7115	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7116	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7117		tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7118			     MII_TG3_FET_PTEST_FRC_TX_LINK |
7119			     MII_TG3_FET_PTEST_FRC_TX_LOCK);
7120
7121		/* The write needs to be flushed for the AC131 */
7122		tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7123	}
7124
7125	/* Reset to prevent losing 1st rx packet intermittently */
7126	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7127	    tg3_flag(tp, 5780_CLASS)) {
7128		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7129		udelay(10);
7130		tw32_f(MAC_RX_MODE, tp->rx_mode);
7131	}
7132
7133	mac_mode = tp->mac_mode &
7134		   ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7135	if (speed == SPEED_1000)
7136		mac_mode |= MAC_MODE_PORT_MODE_GMII;
7137	else
7138		mac_mode |= MAC_MODE_PORT_MODE_MII;
7139
7140	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7141		u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7142
7143		if (masked_phy_id == TG3_PHY_ID_BCM5401)
7144			mac_mode &= ~MAC_MODE_LINK_POLARITY;
7145		else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7146			mac_mode |= MAC_MODE_LINK_POLARITY;
7147
7148		tg3_writephy(tp, MII_TG3_EXT_CTRL,
7149			     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7150	}
7151
7152	tw32(MAC_MODE, mac_mode);
7153	udelay(40);
7154
7155	return 0;
7156}
7157
7158static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7159{
7160	struct tg3 *tp = netdev_priv(dev);
7161
7162	if (features & NETIF_F_LOOPBACK) {
7163		if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7164			return;
7165
7166		spin_lock_bh(&tp->lock);
7167		tg3_mac_loopback(tp, true);
7168		netif_carrier_on(tp->dev);
7169		spin_unlock_bh(&tp->lock);
7170		netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7171	} else {
7172		if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7173			return;
7174
7175		spin_lock_bh(&tp->lock);
7176		tg3_mac_loopback(tp, false);
7177		/* Force link status check */
7178		tg3_setup_phy(tp, 1);
7179		spin_unlock_bh(&tp->lock);
7180		netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7181	}
7182}
7183
7184static netdev_features_t tg3_fix_features(struct net_device *dev,
7185	netdev_features_t features)
7186{
7187	struct tg3 *tp = netdev_priv(dev);
7188
7189	if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7190		features &= ~NETIF_F_ALL_TSO;
7191
7192	return features;
7193}
7194
7195static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7196{
7197	netdev_features_t changed = dev->features ^ features;
7198
7199	if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7200		tg3_set_loopback(dev, features);
7201
7202	return 0;
7203}
7204
7205static void tg3_rx_prodring_free(struct tg3 *tp,
7206				 struct tg3_rx_prodring_set *tpr)
7207{
7208	int i;
7209
7210	if (tpr != &tp->napi[0].prodring) {
7211		for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7212		     i = (i + 1) & tp->rx_std_ring_mask)
7213			tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7214					tp->rx_pkt_map_sz);
7215
7216		if (tg3_flag(tp, JUMBO_CAPABLE)) {
7217			for (i = tpr->rx_jmb_cons_idx;
7218			     i != tpr->rx_jmb_prod_idx;
7219			     i = (i + 1) & tp->rx_jmb_ring_mask) {
7220				tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7221						TG3_RX_JMB_MAP_SZ);
7222			}
7223		}
7224
7225		return;
7226	}
7227
7228	for (i = 0; i <= tp->rx_std_ring_mask; i++)
7229		tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7230				tp->rx_pkt_map_sz);
7231
7232	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7233		for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7234			tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7235					TG3_RX_JMB_MAP_SZ);
7236	}
7237}
7238
7239/* Initialize rx rings for packet processing.
7240 *
7241 * The chip has been shut down and the driver detached from
7242 * the networking, so no interrupts or new tx packets will
7243 * end up in the driver.  tp->{tx,}lock are held and thus
7244 * we may not sleep.
7245 */
7246static int tg3_rx_prodring_alloc(struct tg3 *tp,
7247				 struct tg3_rx_prodring_set *tpr)
7248{
7249	u32 i, rx_pkt_dma_sz;
7250
7251	tpr->rx_std_cons_idx = 0;
7252	tpr->rx_std_prod_idx = 0;
7253	tpr->rx_jmb_cons_idx = 0;
7254	tpr->rx_jmb_prod_idx = 0;
7255
7256	if (tpr != &tp->napi[0].prodring) {
7257		memset(&tpr->rx_std_buffers[0], 0,
7258		       TG3_RX_STD_BUFF_RING_SIZE(tp));
7259		if (tpr->rx_jmb_buffers)
7260			memset(&tpr->rx_jmb_buffers[0], 0,
7261			       TG3_RX_JMB_BUFF_RING_SIZE(tp));
7262		goto done;
7263	}
7264
7265	/* Zero out all descriptors. */
7266	memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7267
7268	rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7269	if (tg3_flag(tp, 5780_CLASS) &&
7270	    tp->dev->mtu > ETH_DATA_LEN)
7271		rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7272	tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7273
7274	/* Initialize invariants of the rings, we only set this
7275	 * stuff once.  This works because the card does not
7276	 * write into the rx buffer posting rings.
7277	 */
7278	for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7279		struct tg3_rx_buffer_desc *rxd;
7280
7281		rxd = &tpr->rx_std[i];
7282		rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7283		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7284		rxd->opaque = (RXD_OPAQUE_RING_STD |
7285			       (i << RXD_OPAQUE_INDEX_SHIFT));
7286	}
7287
7288	/* Now allocate fresh SKBs for each rx ring. */
7289	for (i = 0; i < tp->rx_pending; i++) {
7290		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
7291			netdev_warn(tp->dev,
7292				    "Using a smaller RX standard ring. Only "
7293				    "%d out of %d buffers were allocated "
7294				    "successfully\n", i, tp->rx_pending);
7295			if (i == 0)
7296				goto initfail;
7297			tp->rx_pending = i;
7298			break;
7299		}
7300	}
7301
7302	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7303		goto done;
7304
7305	memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7306
7307	if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7308		goto done;
7309
7310	for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7311		struct tg3_rx_buffer_desc *rxd;
7312
7313		rxd = &tpr->rx_jmb[i].std;
7314		rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7315		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7316				  RXD_FLAG_JUMBO;
7317		rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7318		       (i << RXD_OPAQUE_INDEX_SHIFT));
7319	}
7320
7321	for (i = 0; i < tp->rx_jumbo_pending; i++) {
7322		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
7323			netdev_warn(tp->dev,
7324				    "Using a smaller RX jumbo ring. Only %d "
7325				    "out of %d buffers were allocated "
7326				    "successfully\n", i, tp->rx_jumbo_pending);
7327			if (i == 0)
7328				goto initfail;
7329			tp->rx_jumbo_pending = i;
7330			break;
7331		}
7332	}
7333
7334done:
7335	return 0;
7336
7337initfail:
7338	tg3_rx_prodring_free(tp, tpr);
7339	return -ENOMEM;
7340}
7341
7342static void tg3_rx_prodring_fini(struct tg3 *tp,
7343				 struct tg3_rx_prodring_set *tpr)
7344{
7345	kfree(tpr->rx_std_buffers);
7346	tpr->rx_std_buffers = NULL;
7347	kfree(tpr->rx_jmb_buffers);
7348	tpr->rx_jmb_buffers = NULL;
7349	if (tpr->rx_std) {
7350		dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7351				  tpr->rx_std, tpr->rx_std_mapping);
7352		tpr->rx_std = NULL;
7353	}
7354	if (tpr->rx_jmb) {
7355		dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7356				  tpr->rx_jmb, tpr->rx_jmb_mapping);
7357		tpr->rx_jmb = NULL;
7358	}
7359}
7360
7361static int tg3_rx_prodring_init(struct tg3 *tp,
7362				struct tg3_rx_prodring_set *tpr)
7363{
7364	tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7365				      GFP_KERNEL);
7366	if (!tpr->rx_std_buffers)
7367		return -ENOMEM;
7368
7369	tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7370					 TG3_RX_STD_RING_BYTES(tp),
7371					 &tpr->rx_std_mapping,
7372					 GFP_KERNEL);
7373	if (!tpr->rx_std)
7374		goto err_out;
7375
7376	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7377		tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7378					      GFP_KERNEL);
7379		if (!tpr->rx_jmb_buffers)
7380			goto err_out;
7381
7382		tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7383						 TG3_RX_JMB_RING_BYTES(tp),
7384						 &tpr->rx_jmb_mapping,
7385						 GFP_KERNEL);
7386		if (!tpr->rx_jmb)
7387			goto err_out;
7388	}
7389
7390	return 0;
7391
7392err_out:
7393	tg3_rx_prodring_fini(tp, tpr);
7394	return -ENOMEM;
7395}
7396
7397/* Free up pending packets in all rx/tx rings.
7398 *
7399 * The chip has been shut down and the driver detached from
7400 * the networking, so no interrupts or new tx packets will
7401 * end up in the driver.  tp->{tx,}lock is not held and we are not
7402 * in an interrupt context and thus may sleep.
7403 */
7404static void tg3_free_rings(struct tg3 *tp)
7405{
7406	int i, j;
7407
7408	for (j = 0; j < tp->irq_cnt; j++) {
7409		struct tg3_napi *tnapi = &tp->napi[j];
7410
7411		tg3_rx_prodring_free(tp, &tnapi->prodring);
7412
7413		if (!tnapi->tx_buffers)
7414			continue;
7415
7416		for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7417			struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7418
7419			if (!skb)
7420				continue;
7421
7422			tg3_tx_skb_unmap(tnapi, i,
7423					 skb_shinfo(skb)->nr_frags - 1);
7424
7425			dev_kfree_skb_any(skb);
7426		}
7427		netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7428	}
7429}
7430
7431/* Initialize tx/rx rings for packet processing.
7432 *
7433 * The chip has been shut down and the driver detached from
7434 * the networking, so no interrupts or new tx packets will
7435 * end up in the driver.  tp->{tx,}lock are held and thus
7436 * we may not sleep.
7437 */
7438static int tg3_init_rings(struct tg3 *tp)
7439{
7440	int i;
7441
7442	/* Free up all the SKBs. */
7443	tg3_free_rings(tp);
7444
7445	for (i = 0; i < tp->irq_cnt; i++) {
7446		struct tg3_napi *tnapi = &tp->napi[i];
7447
7448		tnapi->last_tag = 0;
7449		tnapi->last_irq_tag = 0;
7450		tnapi->hw_status->status = 0;
7451		tnapi->hw_status->status_tag = 0;
7452		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7453
7454		tnapi->tx_prod = 0;
7455		tnapi->tx_cons = 0;
7456		if (tnapi->tx_ring)
7457			memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7458
7459		tnapi->rx_rcb_ptr = 0;
7460		if (tnapi->rx_rcb)
7461			memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7462
7463		if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7464			tg3_free_rings(tp);
7465			return -ENOMEM;
7466		}
7467	}
7468
7469	return 0;
7470}
7471
7472/*
7473 * Must not be invoked with interrupt sources disabled and
7474 * the hardware shutdown down.
7475 */
7476static void tg3_free_consistent(struct tg3 *tp)
7477{
7478	int i;
7479
7480	for (i = 0; i < tp->irq_cnt; i++) {
7481		struct tg3_napi *tnapi = &tp->napi[i];
7482
7483		if (tnapi->tx_ring) {
7484			dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7485				tnapi->tx_ring, tnapi->tx_desc_mapping);
7486			tnapi->tx_ring = NULL;
7487		}
7488
7489		kfree(tnapi->tx_buffers);
7490		tnapi->tx_buffers = NULL;
7491
7492		if (tnapi->rx_rcb) {
7493			dma_free_coherent(&tp->pdev->dev,
7494					  TG3_RX_RCB_RING_BYTES(tp),
7495					  tnapi->rx_rcb,
7496					  tnapi->rx_rcb_mapping);
7497			tnapi->rx_rcb = NULL;
7498		}
7499
7500		tg3_rx_prodring_fini(tp, &tnapi->prodring);
7501
7502		if (tnapi->hw_status) {
7503			dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7504					  tnapi->hw_status,
7505					  tnapi->status_mapping);
7506			tnapi->hw_status = NULL;
7507		}
7508	}
7509
7510	if (tp->hw_stats) {
7511		dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7512				  tp->hw_stats, tp->stats_mapping);
7513		tp->hw_stats = NULL;
7514	}
7515}
7516
7517/*
7518 * Must not be invoked with interrupt sources disabled and
7519 * the hardware shutdown down.  Can sleep.
7520 */
7521static int tg3_alloc_consistent(struct tg3 *tp)
7522{
7523	int i;
7524
7525	tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7526					  sizeof(struct tg3_hw_stats),
7527					  &tp->stats_mapping,
7528					  GFP_KERNEL);
7529	if (!tp->hw_stats)
7530		goto err_out;
7531
7532	memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7533
7534	for (i = 0; i < tp->irq_cnt; i++) {
7535		struct tg3_napi *tnapi = &tp->napi[i];
7536		struct tg3_hw_status *sblk;
7537
7538		tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7539						      TG3_HW_STATUS_SIZE,
7540						      &tnapi->status_mapping,
7541						      GFP_KERNEL);
7542		if (!tnapi->hw_status)
7543			goto err_out;
7544
7545		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7546		sblk = tnapi->hw_status;
7547
7548		if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7549			goto err_out;
7550
7551		/* If multivector TSS is enabled, vector 0 does not handle
7552		 * tx interrupts.  Don't allocate any resources for it.
7553		 */
7554		if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7555		    (i && tg3_flag(tp, ENABLE_TSS))) {
7556			tnapi->tx_buffers = kzalloc(
7557					       sizeof(struct tg3_tx_ring_info) *
7558					       TG3_TX_RING_SIZE, GFP_KERNEL);
7559			if (!tnapi->tx_buffers)
7560				goto err_out;
7561
7562			tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7563							    TG3_TX_RING_BYTES,
7564							&tnapi->tx_desc_mapping,
7565							    GFP_KERNEL);
7566			if (!tnapi->tx_ring)
7567				goto err_out;
7568		}
7569
7570		/*
7571		 * When RSS is enabled, the status block format changes
7572		 * slightly.  The "rx_jumbo_consumer", "reserved",
7573		 * and "rx_mini_consumer" members get mapped to the
7574		 * other three rx return ring producer indexes.
7575		 */
7576		switch (i) {
7577		default:
7578			if (tg3_flag(tp, ENABLE_RSS)) {
7579				tnapi->rx_rcb_prod_idx = NULL;
7580				break;
7581			}
7582			/* Fall through */
7583		case 1:
7584			tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7585			break;
7586		case 2:
7587			tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7588			break;
7589		case 3:
7590			tnapi->rx_rcb_prod_idx = &sblk->reserved;
7591			break;
7592		case 4:
7593			tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7594			break;
7595		}
7596
7597		/*
7598		 * If multivector RSS is enabled, vector 0 does not handle
7599		 * rx or tx interrupts.  Don't allocate any resources for it.
7600		 */
7601		if (!i && tg3_flag(tp, ENABLE_RSS))
7602			continue;
7603
7604		tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7605						   TG3_RX_RCB_RING_BYTES(tp),
7606						   &tnapi->rx_rcb_mapping,
7607						   GFP_KERNEL);
7608		if (!tnapi->rx_rcb)
7609			goto err_out;
7610
7611		memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7612	}
7613
7614	return 0;
7615
7616err_out:
7617	tg3_free_consistent(tp);
7618	return -ENOMEM;
7619}
7620
7621#define MAX_WAIT_CNT 1000
7622
7623/* To stop a block, clear the enable bit and poll till it
7624 * clears.  tp->lock is held.
7625 */
7626static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7627{
7628	unsigned int i;
7629	u32 val;
7630
7631	if (tg3_flag(tp, 5705_PLUS)) {
7632		switch (ofs) {
7633		case RCVLSC_MODE:
7634		case DMAC_MODE:
7635		case MBFREE_MODE:
7636		case BUFMGR_MODE:
7637		case MEMARB_MODE:
7638			/* We can't enable/disable these bits of the
7639			 * 5705/5750, just say success.
7640			 */
7641			return 0;
7642
7643		default:
7644			break;
7645		}
7646	}
7647
7648	val = tr32(ofs);
7649	val &= ~enable_bit;
7650	tw32_f(ofs, val);
7651
7652	for (i = 0; i < MAX_WAIT_CNT; i++) {
7653		udelay(100);
7654		val = tr32(ofs);
7655		if ((val & enable_bit) == 0)
7656			break;
7657	}
7658
7659	if (i == MAX_WAIT_CNT && !silent) {
7660		dev_err(&tp->pdev->dev,
7661			"tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7662			ofs, enable_bit);
7663		return -ENODEV;
7664	}
7665
7666	return 0;
7667}
7668
7669/* tp->lock is held. */
7670static int tg3_abort_hw(struct tg3 *tp, int silent)
7671{
7672	int i, err;
7673
7674	tg3_disable_ints(tp);
7675
7676	tp->rx_mode &= ~RX_MODE_ENABLE;
7677	tw32_f(MAC_RX_MODE, tp->rx_mode);
7678	udelay(10);
7679
7680	err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7681	err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7682	err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7683	err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7684	err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7685	err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7686
7687	err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7688	err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7689	err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7690	err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7691	err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7692	err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7693	err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7694
7695	tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7696	tw32_f(MAC_MODE, tp->mac_mode);
7697	udelay(40);
7698
7699	tp->tx_mode &= ~TX_MODE_ENABLE;
7700	tw32_f(MAC_TX_MODE, tp->tx_mode);
7701
7702	for (i = 0; i < MAX_WAIT_CNT; i++) {
7703		udelay(100);
7704		if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7705			break;
7706	}
7707	if (i >= MAX_WAIT_CNT) {
7708		dev_err(&tp->pdev->dev,
7709			"%s timed out, TX_MODE_ENABLE will not clear "
7710			"MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7711		err |= -ENODEV;
7712	}
7713
7714	err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7715	err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7716	err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7717
7718	tw32(FTQ_RESET, 0xffffffff);
7719	tw32(FTQ_RESET, 0x00000000);
7720
7721	err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7722	err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7723
7724	for (i = 0; i < tp->irq_cnt; i++) {
7725		struct tg3_napi *tnapi = &tp->napi[i];
7726		if (tnapi->hw_status)
7727			memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7728	}
7729
7730	return err;
7731}
7732
7733/* Save PCI command register before chip reset */
7734static void tg3_save_pci_state(struct tg3 *tp)
7735{
7736	pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7737}
7738
7739/* Restore PCI state after chip reset */
7740static void tg3_restore_pci_state(struct tg3 *tp)
7741{
7742	u32 val;
7743
7744	/* Re-enable indirect register accesses. */
7745	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7746			       tp->misc_host_ctrl);
7747
7748	/* Set MAX PCI retry to zero. */
7749	val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7750	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7751	    tg3_flag(tp, PCIX_MODE))
7752		val |= PCISTATE_RETRY_SAME_DMA;
7753	/* Allow reads and writes to the APE register and memory space. */
7754	if (tg3_flag(tp, ENABLE_APE))
7755		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7756		       PCISTATE_ALLOW_APE_SHMEM_WR |
7757		       PCISTATE_ALLOW_APE_PSPACE_WR;
7758	pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7759
7760	pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7761
7762	if (!tg3_flag(tp, PCI_EXPRESS)) {
7763		pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7764				      tp->pci_cacheline_sz);
7765		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7766				      tp->pci_lat_timer);
7767	}
7768
7769	/* Make sure PCI-X relaxed ordering bit is clear. */
7770	if (tg3_flag(tp, PCIX_MODE)) {
7771		u16 pcix_cmd;
7772
7773		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7774				     &pcix_cmd);
7775		pcix_cmd &= ~PCI_X_CMD_ERO;
7776		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7777				      pcix_cmd);
7778	}
7779
7780	if (tg3_flag(tp, 5780_CLASS)) {
7781
7782		/* Chip reset on 5780 will reset MSI enable bit,
7783		 * so need to restore it.
7784		 */
7785		if (tg3_flag(tp, USING_MSI)) {
7786			u16 ctrl;
7787
7788			pci_read_config_word(tp->pdev,
7789					     tp->msi_cap + PCI_MSI_FLAGS,
7790					     &ctrl);
7791			pci_write_config_word(tp->pdev,
7792					      tp->msi_cap + PCI_MSI_FLAGS,
7793					      ctrl | PCI_MSI_FLAGS_ENABLE);
7794			val = tr32(MSGINT_MODE);
7795			tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7796		}
7797	}
7798}
7799
7800/* tp->lock is held. */
7801static int tg3_chip_reset(struct tg3 *tp)
7802{
7803	u32 val;
7804	void (*write_op)(struct tg3 *, u32, u32);
7805	int i, err;
7806
7807	tg3_nvram_lock(tp);
7808
7809	tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7810
7811	/* No matching tg3_nvram_unlock() after this because
7812	 * chip reset below will undo the nvram lock.
7813	 */
7814	tp->nvram_lock_cnt = 0;
7815
7816	/* GRC_MISC_CFG core clock reset will clear the memory
7817	 * enable bit in PCI register 4 and the MSI enable bit
7818	 * on some chips, so we save relevant registers here.
7819	 */
7820	tg3_save_pci_state(tp);
7821
7822	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7823	    tg3_flag(tp, 5755_PLUS))
7824		tw32(GRC_FASTBOOT_PC, 0);
7825
7826	/*
7827	 * We must avoid the readl() that normally takes place.
7828	 * It locks machines, causes machine checks, and other
7829	 * fun things.  So, temporarily disable the 5701
7830	 * hardware workaround, while we do the reset.
7831	 */
7832	write_op = tp->write32;
7833	if (write_op == tg3_write_flush_reg32)
7834		tp->write32 = tg3_write32;
7835
7836	/* Prevent the irq handler from reading or writing PCI registers
7837	 * during chip reset when the memory enable bit in the PCI command
7838	 * register may be cleared.  The chip does not generate interrupt
7839	 * at this time, but the irq handler may still be called due to irq
7840	 * sharing or irqpoll.
7841	 */
7842	tg3_flag_set(tp, CHIP_RESETTING);
7843	for (i = 0; i < tp->irq_cnt; i++) {
7844		struct tg3_napi *tnapi = &tp->napi[i];
7845		if (tnapi->hw_status) {
7846			tnapi->hw_status->status = 0;
7847			tnapi->hw_status->status_tag = 0;
7848		}
7849		tnapi->last_tag = 0;
7850		tnapi->last_irq_tag = 0;
7851	}
7852	smp_mb();
7853
7854	for (i = 0; i < tp->irq_cnt; i++)
7855		synchronize_irq(tp->napi[i].irq_vec);
7856
7857	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7858		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7859		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7860	}
7861
7862	/* do the reset */
7863	val = GRC_MISC_CFG_CORECLK_RESET;
7864
7865	if (tg3_flag(tp, PCI_EXPRESS)) {
7866		/* Force PCIe 1.0a mode */
7867		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7868		    !tg3_flag(tp, 57765_PLUS) &&
7869		    tr32(TG3_PCIE_PHY_TSTCTL) ==
7870		    (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7871			tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7872
7873		if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7874			tw32(GRC_MISC_CFG, (1 << 29));
7875			val |= (1 << 29);
7876		}
7877	}
7878
7879	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7880		tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7881		tw32(GRC_VCPU_EXT_CTRL,
7882		     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7883	}
7884
7885	/* Manage gphy power for all CPMU absent PCIe devices. */
7886	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7887		val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7888
7889	tw32(GRC_MISC_CFG, val);
7890
7891	/* restore 5701 hardware bug workaround write method */
7892	tp->write32 = write_op;
7893
7894	/* Unfortunately, we have to delay before the PCI read back.
7895	 * Some 575X chips even will not respond to a PCI cfg access
7896	 * when the reset command is given to the chip.
7897	 *
7898	 * How do these hardware designers expect things to work
7899	 * properly if the PCI write is posted for a long period
7900	 * of time?  It is always necessary to have some method by
7901	 * which a register read back can occur to push the write
7902	 * out which does the reset.
7903	 *
7904	 * For most tg3 variants the trick below was working.
7905	 * Ho hum...
7906	 */
7907	udelay(120);
7908
7909	/* Flush PCI posted writes.  The normal MMIO registers
7910	 * are inaccessible at this time so this is the only
7911	 * way to make this reliably (actually, this is no longer
7912	 * the case, see above).  I tried to use indirect
7913	 * register read/write but this upset some 5701 variants.
7914	 */
7915	pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7916
7917	udelay(120);
7918
7919	if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7920		u16 val16;
7921
7922		if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7923			int i;
7924			u32 cfg_val;
7925
7926			/* Wait for link training to complete.  */
7927			for (i = 0; i < 5000; i++)
7928				udelay(100);
7929
7930			pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7931			pci_write_config_dword(tp->pdev, 0xc4,
7932					       cfg_val | (1 << 15));
7933		}
7934
7935		/* Clear the "no snoop" and "relaxed ordering" bits. */
7936		pci_read_config_word(tp->pdev,
7937				     pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7938				     &val16);
7939		val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7940			   PCI_EXP_DEVCTL_NOSNOOP_EN);
7941		/*
7942		 * Older PCIe devices only support the 128 byte
7943		 * MPS setting.  Enforce the restriction.
7944		 */
7945		if (!tg3_flag(tp, CPMU_PRESENT))
7946			val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7947		pci_write_config_word(tp->pdev,
7948				      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7949				      val16);
7950
7951		/* Clear error status */
7952		pci_write_config_word(tp->pdev,
7953				      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7954				      PCI_EXP_DEVSTA_CED |
7955				      PCI_EXP_DEVSTA_NFED |
7956				      PCI_EXP_DEVSTA_FED |
7957				      PCI_EXP_DEVSTA_URD);
7958	}
7959
7960	tg3_restore_pci_state(tp);
7961
7962	tg3_flag_clear(tp, CHIP_RESETTING);
7963	tg3_flag_clear(tp, ERROR_PROCESSED);
7964
7965	val = 0;
7966	if (tg3_flag(tp, 5780_CLASS))
7967		val = tr32(MEMARB_MODE);
7968	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7969
7970	if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7971		tg3_stop_fw(tp);
7972		tw32(0x5000, 0x400);
7973	}
7974
7975	tw32(GRC_MODE, tp->grc_mode);
7976
7977	if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7978		val = tr32(0xc4);
7979
7980		tw32(0xc4, val | (1 << 15));
7981	}
7982
7983	if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7984	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7985		tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7986		if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7987			tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7988		tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7989	}
7990
7991	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7992		tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7993		val = tp->mac_mode;
7994	} else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7995		tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7996		val = tp->mac_mode;
7997	} else
7998		val = 0;
7999
8000	tw32_f(MAC_MODE, val);
8001	udelay(40);
8002
8003	tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8004
8005	err = tg3_poll_fw(tp);
8006	if (err)
8007		return err;
8008
8009	tg3_mdio_start(tp);
8010
8011	if (tg3_flag(tp, PCI_EXPRESS) &&
8012	    tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8013	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8014	    !tg3_flag(tp, 57765_PLUS)) {
8015		val = tr32(0x7c00);
8016
8017		tw32(0x7c00, val | (1 << 25));
8018	}
8019
8020	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8021		val = tr32(TG3_CPMU_CLCK_ORIDE);
8022		tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8023	}
8024
8025	/* Reprobe ASF enable state.  */
8026	tg3_flag_clear(tp, ENABLE_ASF);
8027	tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8028	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8029	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8030		u32 nic_cfg;
8031
8032		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8033		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8034			tg3_flag_set(tp, ENABLE_ASF);
8035			tp->last_event_jiffies = jiffies;
8036			if (tg3_flag(tp, 5750_PLUS))
8037				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8038		}
8039	}
8040
8041	return 0;
8042}
8043
8044static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8045static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8046
8047/* tp->lock is held. */
8048static int tg3_halt(struct tg3 *tp, int kind, int silent)
8049{
8050	int err;
8051
8052	tg3_stop_fw(tp);
8053
8054	tg3_write_sig_pre_reset(tp, kind);
8055
8056	tg3_abort_hw(tp, silent);
8057	err = tg3_chip_reset(tp);
8058
8059	__tg3_set_mac_addr(tp, 0);
8060
8061	tg3_write_sig_legacy(tp, kind);
8062	tg3_write_sig_post_reset(tp, kind);
8063
8064	if (tp->hw_stats) {
8065		/* Save the stats across chip resets... */
8066		tg3_get_nstats(tp, &tp->net_stats_prev);
8067		tg3_get_estats(tp, &tp->estats_prev);
8068
8069		/* And make sure the next sample is new data */
8070		memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8071	}
8072
8073	if (err)
8074		return err;
8075
8076	return 0;
8077}
8078
8079static int tg3_set_mac_addr(struct net_device *dev, void *p)
8080{
8081	struct tg3 *tp = netdev_priv(dev);
8082	struct sockaddr *addr = p;
8083	int err = 0, skip_mac_1 = 0;
8084
8085	if (!is_valid_ether_addr(addr->sa_data))
8086		return -EADDRNOTAVAIL;
8087
8088	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8089
8090	if (!netif_running(dev))
8091		return 0;
8092
8093	if (tg3_flag(tp, ENABLE_ASF)) {
8094		u32 addr0_high, addr0_low, addr1_high, addr1_low;
8095
8096		addr0_high = tr32(MAC_ADDR_0_HIGH);
8097		addr0_low = tr32(MAC_ADDR_0_LOW);
8098		addr1_high = tr32(MAC_ADDR_1_HIGH);
8099		addr1_low = tr32(MAC_ADDR_1_LOW);
8100
8101		/* Skip MAC addr 1 if ASF is using it. */
8102		if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8103		    !(addr1_high == 0 && addr1_low == 0))
8104			skip_mac_1 = 1;
8105	}
8106	spin_lock_bh(&tp->lock);
8107	__tg3_set_mac_addr(tp, skip_mac_1);
8108	spin_unlock_bh(&tp->lock);
8109
8110	return err;
8111}
8112
8113/* tp->lock is held. */
8114static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8115			   dma_addr_t mapping, u32 maxlen_flags,
8116			   u32 nic_addr)
8117{
8118	tg3_write_mem(tp,
8119		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8120		      ((u64) mapping >> 32));
8121	tg3_write_mem(tp,
8122		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8123		      ((u64) mapping & 0xffffffff));
8124	tg3_write_mem(tp,
8125		      (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8126		       maxlen_flags);
8127
8128	if (!tg3_flag(tp, 5705_PLUS))
8129		tg3_write_mem(tp,
8130			      (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8131			      nic_addr);
8132}
8133
8134static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8135{
8136	int i;
8137
8138	if (!tg3_flag(tp, ENABLE_TSS)) {
8139		tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8140		tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8141		tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8142	} else {
8143		tw32(HOSTCC_TXCOL_TICKS, 0);
8144		tw32(HOSTCC_TXMAX_FRAMES, 0);
8145		tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8146	}
8147
8148	if (!tg3_flag(tp, ENABLE_RSS)) {
8149		tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8150		tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8151		tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8152	} else {
8153		tw32(HOSTCC_RXCOL_TICKS, 0);
8154		tw32(HOSTCC_RXMAX_FRAMES, 0);
8155		tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8156	}
8157
8158	if (!tg3_flag(tp, 5705_PLUS)) {
8159		u32 val = ec->stats_block_coalesce_usecs;
8160
8161		tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8162		tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8163
8164		if (!netif_carrier_ok(tp->dev))
8165			val = 0;
8166
8167		tw32(HOSTCC_STAT_COAL_TICKS, val);
8168	}
8169
8170	for (i = 0; i < tp->irq_cnt - 1; i++) {
8171		u32 reg;
8172
8173		reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8174		tw32(reg, ec->rx_coalesce_usecs);
8175		reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8176		tw32(reg, ec->rx_max_coalesced_frames);
8177		reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8178		tw32(reg, ec->rx_max_coalesced_frames_irq);
8179
8180		if (tg3_flag(tp, ENABLE_TSS)) {
8181			reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8182			tw32(reg, ec->tx_coalesce_usecs);
8183			reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8184			tw32(reg, ec->tx_max_coalesced_frames);
8185			reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8186			tw32(reg, ec->tx_max_coalesced_frames_irq);
8187		}
8188	}
8189
8190	for (; i < tp->irq_max - 1; i++) {
8191		tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8192		tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8193		tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8194
8195		if (tg3_flag(tp, ENABLE_TSS)) {
8196			tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8197			tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8198			tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8199		}
8200	}
8201}
8202
8203/* tp->lock is held. */
8204static void tg3_rings_reset(struct tg3 *tp)
8205{
8206	int i;
8207	u32 stblk, txrcb, rxrcb, limit;
8208	struct tg3_napi *tnapi = &tp->napi[0];
8209
8210	/* Disable all transmit rings but the first. */
8211	if (!tg3_flag(tp, 5705_PLUS))
8212		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8213	else if (tg3_flag(tp, 5717_PLUS))
8214		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8215	else if (tg3_flag(tp, 57765_CLASS))
8216		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8217	else
8218		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8219
8220	for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8221	     txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8222		tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8223			      BDINFO_FLAGS_DISABLED);
8224
8225
8226	/* Disable all receive return rings but the first. */
8227	if (tg3_flag(tp, 5717_PLUS))
8228		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8229	else if (!tg3_flag(tp, 5705_PLUS))
8230		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8231	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8232		 tg3_flag(tp, 57765_CLASS))
8233		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8234	else
8235		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8236
8237	for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8238	     rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8239		tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8240			      BDINFO_FLAGS_DISABLED);
8241
8242	/* Disable interrupts */
8243	tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8244	tp->napi[0].chk_msi_cnt = 0;
8245	tp->napi[0].last_rx_cons = 0;
8246	tp->napi[0].last_tx_cons = 0;
8247
8248	/* Zero mailbox registers. */
8249	if (tg3_flag(tp, SUPPORT_MSIX)) {
8250		for (i = 1; i < tp->irq_max; i++) {
8251			tp->napi[i].tx_prod = 0;
8252			tp->napi[i].tx_cons = 0;
8253			if (tg3_flag(tp, ENABLE_TSS))
8254				tw32_mailbox(tp->napi[i].prodmbox, 0);
8255			tw32_rx_mbox(tp->napi[i].consmbox, 0);
8256			tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8257			tp->napi[i].chk_msi_cnt = 0;
8258			tp->napi[i].last_rx_cons = 0;
8259			tp->napi[i].last_tx_cons = 0;
8260		}
8261		if (!tg3_flag(tp, ENABLE_TSS))
8262			tw32_mailbox(tp->napi[0].prodmbox, 0);
8263	} else {
8264		tp->napi[0].tx_prod = 0;
8265		tp->napi[0].tx_cons = 0;
8266		tw32_mailbox(tp->napi[0].prodmbox, 0);
8267		tw32_rx_mbox(tp->napi[0].consmbox, 0);
8268	}
8269
8270	/* Make sure the NIC-based send BD rings are disabled. */
8271	if (!tg3_flag(tp, 5705_PLUS)) {
8272		u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8273		for (i = 0; i < 16; i++)
8274			tw32_tx_mbox(mbox + i * 8, 0);
8275	}
8276
8277	txrcb = NIC_SRAM_SEND_RCB;
8278	rxrcb = NIC_SRAM_RCV_RET_RCB;
8279
8280	/* Clear status block in ram. */
8281	memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8282
8283	/* Set status block DMA address */
8284	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8285	     ((u64) tnapi->status_mapping >> 32));
8286	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8287	     ((u64) tnapi->status_mapping & 0xffffffff));
8288
8289	if (tnapi->tx_ring) {
8290		tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8291			       (TG3_TX_RING_SIZE <<
8292				BDINFO_FLAGS_MAXLEN_SHIFT),
8293			       NIC_SRAM_TX_BUFFER_DESC);
8294		txrcb += TG3_BDINFO_SIZE;
8295	}
8296
8297	if (tnapi->rx_rcb) {
8298		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8299			       (tp->rx_ret_ring_mask + 1) <<
8300				BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8301		rxrcb += TG3_BDINFO_SIZE;
8302	}
8303
8304	stblk = HOSTCC_STATBLCK_RING1;
8305
8306	for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8307		u64 mapping = (u64)tnapi->status_mapping;
8308		tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8309		tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8310
8311		/* Clear status block in ram. */
8312		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8313
8314		if (tnapi->tx_ring) {
8315			tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8316				       (TG3_TX_RING_SIZE <<
8317					BDINFO_FLAGS_MAXLEN_SHIFT),
8318				       NIC_SRAM_TX_BUFFER_DESC);
8319			txrcb += TG3_BDINFO_SIZE;
8320		}
8321
8322		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8323			       ((tp->rx_ret_ring_mask + 1) <<
8324				BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8325
8326		stblk += 8;
8327		rxrcb += TG3_BDINFO_SIZE;
8328	}
8329}
8330
8331static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8332{
8333	u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8334
8335	if (!tg3_flag(tp, 5750_PLUS) ||
8336	    tg3_flag(tp, 5780_CLASS) ||
8337	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8338	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8339	    tg3_flag(tp, 57765_PLUS))
8340		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8341	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8342		 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8343		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8344	else
8345		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8346
8347	nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8348	host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8349
8350	val = min(nic_rep_thresh, host_rep_thresh);
8351	tw32(RCVBDI_STD_THRESH, val);
8352
8353	if (tg3_flag(tp, 57765_PLUS))
8354		tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8355
8356	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8357		return;
8358
8359	bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8360
8361	host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8362
8363	val = min(bdcache_maxcnt / 2, host_rep_thresh);
8364	tw32(RCVBDI_JUMBO_THRESH, val);
8365
8366	if (tg3_flag(tp, 57765_PLUS))
8367		tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8368}
8369
8370static inline u32 calc_crc(unsigned char *buf, int len)
8371{
8372	u32 reg;
8373	u32 tmp;
8374	int j, k;
8375
8376	reg = 0xffffffff;
8377
8378	for (j = 0; j < len; j++) {
8379		reg ^= buf[j];
8380
8381		for (k = 0; k < 8; k++) {
8382			tmp = reg & 0x01;
8383
8384			reg >>= 1;
8385
8386			if (tmp)
8387				reg ^= 0xedb88320;
8388		}
8389	}
8390
8391	return ~reg;
8392}
8393
8394static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8395{
8396	/* accept or reject all multicast frames */
8397	tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8398	tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8399	tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8400	tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8401}
8402
8403static void __tg3_set_rx_mode(struct net_device *dev)
8404{
8405	struct tg3 *tp = netdev_priv(dev);
8406	u32 rx_mode;
8407
8408	rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8409				  RX_MODE_KEEP_VLAN_TAG);
8410
8411#if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8412	/* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8413	 * flag clear.
8414	 */
8415	if (!tg3_flag(tp, ENABLE_ASF))
8416		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8417#endif
8418
8419	if (dev->flags & IFF_PROMISC) {
8420		/* Promiscuous mode. */
8421		rx_mode |= RX_MODE_PROMISC;
8422	} else if (dev->flags & IFF_ALLMULTI) {
8423		/* Accept all multicast. */
8424		tg3_set_multi(tp, 1);
8425	} else if (netdev_mc_empty(dev)) {
8426		/* Reject all multicast. */
8427		tg3_set_multi(tp, 0);
8428	} else {
8429		/* Accept one or more multicast(s). */
8430		struct netdev_hw_addr *ha;
8431		u32 mc_filter[4] = { 0, };
8432		u32 regidx;
8433		u32 bit;
8434		u32 crc;
8435
8436		netdev_for_each_mc_addr(ha, dev) {
8437			crc = calc_crc(ha->addr, ETH_ALEN);
8438			bit = ~crc & 0x7f;
8439			regidx = (bit & 0x60) >> 5;
8440			bit &= 0x1f;
8441			mc_filter[regidx] |= (1 << bit);
8442		}
8443
8444		tw32(MAC_HASH_REG_0, mc_filter[0]);
8445		tw32(MAC_HASH_REG_1, mc_filter[1]);
8446		tw32(MAC_HASH_REG_2, mc_filter[2]);
8447		tw32(MAC_HASH_REG_3, mc_filter[3]);
8448	}
8449
8450	if (rx_mode != tp->rx_mode) {
8451		tp->rx_mode = rx_mode;
8452		tw32_f(MAC_RX_MODE, rx_mode);
8453		udelay(10);
8454	}
8455}
8456
8457static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
8458{
8459	int i;
8460
8461	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8462		tp->rss_ind_tbl[i] =
8463			ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
8464}
8465
8466static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8467{
8468	int i;
8469
8470	if (!tg3_flag(tp, SUPPORT_MSIX))
8471		return;
8472
8473	if (tp->irq_cnt <= 2) {
8474		memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8475		return;
8476	}
8477
8478	/* Validate table against current IRQ count */
8479	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8480		if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
8481			break;
8482	}
8483
8484	if (i != TG3_RSS_INDIR_TBL_SIZE)
8485		tg3_rss_init_dflt_indir_tbl(tp);
8486}
8487
8488static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8489{
8490	int i = 0;
8491	u32 reg = MAC_RSS_INDIR_TBL_0;
8492
8493	while (i < TG3_RSS_INDIR_TBL_SIZE) {
8494		u32 val = tp->rss_ind_tbl[i];
8495		i++;
8496		for (; i % 8; i++) {
8497			val <<= 4;
8498			val |= tp->rss_ind_tbl[i];
8499		}
8500		tw32(reg, val);
8501		reg += 4;
8502	}
8503}
8504
8505/* tp->lock is held. */
8506static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8507{
8508	u32 val, rdmac_mode;
8509	int i, err, limit;
8510	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8511
8512	tg3_disable_ints(tp);
8513
8514	tg3_stop_fw(tp);
8515
8516	tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8517
8518	if (tg3_flag(tp, INIT_COMPLETE))
8519		tg3_abort_hw(tp, 1);
8520
8521	/* Enable MAC control of LPI */
8522	if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8523		tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8524		       TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8525		       TG3_CPMU_EEE_LNKIDL_UART_IDL);
8526
8527		tw32_f(TG3_CPMU_EEE_CTRL,
8528		       TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8529
8530		val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8531		      TG3_CPMU_EEEMD_LPI_IN_TX |
8532		      TG3_CPMU_EEEMD_LPI_IN_RX |
8533		      TG3_CPMU_EEEMD_EEE_ENABLE;
8534
8535		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8536			val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8537
8538		if (tg3_flag(tp, ENABLE_APE))
8539			val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8540
8541		tw32_f(TG3_CPMU_EEE_MODE, val);
8542
8543		tw32_f(TG3_CPMU_EEE_DBTMR1,
8544		       TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8545		       TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8546
8547		tw32_f(TG3_CPMU_EEE_DBTMR2,
8548		       TG3_CPMU_DBTMR2_APE_TX_2047US |
8549		       TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8550	}
8551
8552	if (reset_phy)
8553		tg3_phy_reset(tp);
8554
8555	err = tg3_chip_reset(tp);
8556	if (err)
8557		return err;
8558
8559	tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8560
8561	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8562		val = tr32(TG3_CPMU_CTRL);
8563		val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8564		tw32(TG3_CPMU_CTRL, val);
8565
8566		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8567		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8568		val |= CPMU_LSPD_10MB_MACCLK_6_25;
8569		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8570
8571		val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8572		val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8573		val |= CPMU_LNK_AWARE_MACCLK_6_25;
8574		tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8575
8576		val = tr32(TG3_CPMU_HST_ACC);
8577		val &= ~CPMU_HST_ACC_MACCLK_MASK;
8578		val |= CPMU_HST_ACC_MACCLK_6_25;
8579		tw32(TG3_CPMU_HST_ACC, val);
8580	}
8581
8582	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8583		val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8584		val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8585		       PCIE_PWR_MGMT_L1_THRESH_4MS;
8586		tw32(PCIE_PWR_MGMT_THRESH, val);
8587
8588		val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8589		tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8590
8591		tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8592
8593		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8594		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8595	}
8596
8597	if (tg3_flag(tp, L1PLLPD_EN)) {
8598		u32 grc_mode = tr32(GRC_MODE);
8599
8600		/* Access the lower 1K of PL PCIE block registers. */
8601		val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8602		tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8603
8604		val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8605		tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8606		     val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8607
8608		tw32(GRC_MODE, grc_mode);
8609	}
8610
8611	if (tg3_flag(tp, 57765_CLASS)) {
8612		if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8613			u32 grc_mode = tr32(GRC_MODE);
8614
8615			/* Access the lower 1K of PL PCIE block registers. */
8616			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8617			tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8618
8619			val = tr32(TG3_PCIE_TLDLPL_PORT +
8620				   TG3_PCIE_PL_LO_PHYCTL5);
8621			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8622			     val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8623
8624			tw32(GRC_MODE, grc_mode);
8625		}
8626
8627		if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8628			u32 grc_mode = tr32(GRC_MODE);
8629
8630			/* Access the lower 1K of DL PCIE block registers. */
8631			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8632			tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8633
8634			val = tr32(TG3_PCIE_TLDLPL_PORT +
8635				   TG3_PCIE_DL_LO_FTSMAX);
8636			val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8637			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8638			     val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8639
8640			tw32(GRC_MODE, grc_mode);
8641		}
8642
8643		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8644		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8645		val |= CPMU_LSPD_10MB_MACCLK_6_25;
8646		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8647	}
8648
8649	/* This works around an issue with Athlon chipsets on
8650	 * B3 tigon3 silicon.  This bit has no effect on any
8651	 * other revision.  But do not set this on PCI Express
8652	 * chips and don't even touch the clocks if the CPMU is present.
8653	 */
8654	if (!tg3_flag(tp, CPMU_PRESENT)) {
8655		if (!tg3_flag(tp, PCI_EXPRESS))
8656			tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8657		tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8658	}
8659
8660	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8661	    tg3_flag(tp, PCIX_MODE)) {
8662		val = tr32(TG3PCI_PCISTATE);
8663		val |= PCISTATE_RETRY_SAME_DMA;
8664		tw32(TG3PCI_PCISTATE, val);
8665	}
8666
8667	if (tg3_flag(tp, ENABLE_APE)) {
8668		/* Allow reads and writes to the
8669		 * APE register and memory space.
8670		 */
8671		val = tr32(TG3PCI_PCISTATE);
8672		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8673		       PCISTATE_ALLOW_APE_SHMEM_WR |
8674		       PCISTATE_ALLOW_APE_PSPACE_WR;
8675		tw32(TG3PCI_PCISTATE, val);
8676	}
8677
8678	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8679		/* Enable some hw fixes.  */
8680		val = tr32(TG3PCI_MSI_DATA);
8681		val |= (1 << 26) | (1 << 28) | (1 << 29);
8682		tw32(TG3PCI_MSI_DATA, val);
8683	}
8684
8685	/* Descriptor ring init may make accesses to the
8686	 * NIC SRAM area to setup the TX descriptors, so we
8687	 * can only do this after the hardware has been
8688	 * successfully reset.
8689	 */
8690	err = tg3_init_rings(tp);
8691	if (err)
8692		return err;
8693
8694	if (tg3_flag(tp, 57765_PLUS)) {
8695		val = tr32(TG3PCI_DMA_RW_CTRL) &
8696		      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8697		if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8698			val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8699		if (!tg3_flag(tp, 57765_CLASS) &&
8700		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8701			val |= DMA_RWCTRL_TAGGED_STAT_WA;
8702		tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8703	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8704		   GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8705		/* This value is determined during the probe time DMA
8706		 * engine test, tg3_test_dma.
8707		 */
8708		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8709	}
8710
8711	tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8712			  GRC_MODE_4X_NIC_SEND_RINGS |
8713			  GRC_MODE_NO_TX_PHDR_CSUM |
8714			  GRC_MODE_NO_RX_PHDR_CSUM);
8715	tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8716
8717	/* Pseudo-header checksum is done by hardware logic and not
8718	 * the offload processers, so make the chip do the pseudo-
8719	 * header checksums on receive.  For transmit it is more
8720	 * convenient to do the pseudo-header checksum in software
8721	 * as Linux does that on transmit for us in all cases.
8722	 */
8723	tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8724
8725	tw32(GRC_MODE,
8726	     tp->grc_mode |
8727	     (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8728
8729	/* Setup the timer prescalar register.  Clock is always 66Mhz. */
8730	val = tr32(GRC_MISC_CFG);
8731	val &= ~0xff;
8732	val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8733	tw32(GRC_MISC_CFG, val);
8734
8735	/* Initialize MBUF/DESC pool. */
8736	if (tg3_flag(tp, 5750_PLUS)) {
8737		/* Do nothing.  */
8738	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8739		tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8740		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8741			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8742		else
8743			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8744		tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8745		tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8746	} else if (tg3_flag(tp, TSO_CAPABLE)) {
8747		int fw_len;
8748
8749		fw_len = tp->fw_len;
8750		fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8751		tw32(BUFMGR_MB_POOL_ADDR,
8752		     NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8753		tw32(BUFMGR_MB_POOL_SIZE,
8754		     NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8755	}
8756
8757	if (tp->dev->mtu <= ETH_DATA_LEN) {
8758		tw32(BUFMGR_MB_RDMA_LOW_WATER,
8759		     tp->bufmgr_config.mbuf_read_dma_low_water);
8760		tw32(BUFMGR_MB_MACRX_LOW_WATER,
8761		     tp->bufmgr_config.mbuf_mac_rx_low_water);
8762		tw32(BUFMGR_MB_HIGH_WATER,
8763		     tp->bufmgr_config.mbuf_high_water);
8764	} else {
8765		tw32(BUFMGR_MB_RDMA_LOW_WATER,
8766		     tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8767		tw32(BUFMGR_MB_MACRX_LOW_WATER,
8768		     tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8769		tw32(BUFMGR_MB_HIGH_WATER,
8770		     tp->bufmgr_config.mbuf_high_water_jumbo);
8771	}
8772	tw32(BUFMGR_DMA_LOW_WATER,
8773	     tp->bufmgr_config.dma_low_water);
8774	tw32(BUFMGR_DMA_HIGH_WATER,
8775	     tp->bufmgr_config.dma_high_water);
8776
8777	val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8778	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8779		val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8780	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8781	    tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8782	    tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8783		val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8784	tw32(BUFMGR_MODE, val);
8785	for (i = 0; i < 2000; i++) {
8786		if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8787			break;
8788		udelay(10);
8789	}
8790	if (i >= 2000) {
8791		netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8792		return -ENODEV;
8793	}
8794
8795	if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8796		tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8797
8798	tg3_setup_rxbd_thresholds(tp);
8799
8800	/* Initialize TG3_BDINFO's at:
8801	 *  RCVDBDI_STD_BD:	standard eth size rx ring
8802	 *  RCVDBDI_JUMBO_BD:	jumbo frame rx ring
8803	 *  RCVDBDI_MINI_BD:	small frame rx ring (??? does not work)
8804	 *
8805	 * like so:
8806	 *  TG3_BDINFO_HOST_ADDR:	high/low parts of DMA address of ring
8807	 *  TG3_BDINFO_MAXLEN_FLAGS:	(rx max buffer size << 16) |
8808	 *                              ring attribute flags
8809	 *  TG3_BDINFO_NIC_ADDR:	location of descriptors in nic SRAM
8810	 *
8811	 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8812	 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8813	 *
8814	 * The size of each ring is fixed in the firmware, but the location is
8815	 * configurable.
8816	 */
8817	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8818	     ((u64) tpr->rx_std_mapping >> 32));
8819	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8820	     ((u64) tpr->rx_std_mapping & 0xffffffff));
8821	if (!tg3_flag(tp, 5717_PLUS))
8822		tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8823		     NIC_SRAM_RX_BUFFER_DESC);
8824
8825	/* Disable the mini ring */
8826	if (!tg3_flag(tp, 5705_PLUS))
8827		tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8828		     BDINFO_FLAGS_DISABLED);
8829
8830	/* Program the jumbo buffer descriptor ring control
8831	 * blocks on those devices that have them.
8832	 */
8833	if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8834	    (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8835
8836		if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8837			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8838			     ((u64) tpr->rx_jmb_mapping >> 32));
8839			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8840			     ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8841			val = TG3_RX_JMB_RING_SIZE(tp) <<
8842			      BDINFO_FLAGS_MAXLEN_SHIFT;
8843			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8844			     val | BDINFO_FLAGS_USE_EXT_RECV);
8845			if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8846			    tg3_flag(tp, 57765_CLASS))
8847				tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8848				     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8849		} else {
8850			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8851			     BDINFO_FLAGS_DISABLED);
8852		}
8853
8854		if (tg3_flag(tp, 57765_PLUS)) {
8855			val = TG3_RX_STD_RING_SIZE(tp);
8856			val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8857			val |= (TG3_RX_STD_DMA_SZ << 2);
8858		} else
8859			val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8860	} else
8861		val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8862
8863	tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8864
8865	tpr->rx_std_prod_idx = tp->rx_pending;
8866	tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8867
8868	tpr->rx_jmb_prod_idx =
8869		tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8870	tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8871
8872	tg3_rings_reset(tp);
8873
8874	/* Initialize MAC address and backoff seed. */
8875	__tg3_set_mac_addr(tp, 0);
8876
8877	/* MTU + ethernet header + FCS + optional VLAN tag */
8878	tw32(MAC_RX_MTU_SIZE,
8879	     tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8880
8881	/* The slot time is changed by tg3_setup_phy if we
8882	 * run at gigabit with half duplex.
8883	 */
8884	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8885	      (6 << TX_LENGTHS_IPG_SHIFT) |
8886	      (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8887
8888	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8889		val |= tr32(MAC_TX_LENGTHS) &
8890		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
8891			TX_LENGTHS_CNT_DWN_VAL_MSK);
8892
8893	tw32(MAC_TX_LENGTHS, val);
8894
8895	/* Receive rules. */
8896	tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8897	tw32(RCVLPC_CONFIG, 0x0181);
8898
8899	/* Calculate RDMAC_MODE setting early, we need it to determine
8900	 * the RCVLPC_STATE_ENABLE mask.
8901	 */
8902	rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8903		      RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8904		      RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8905		      RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8906		      RDMAC_MODE_LNGREAD_ENAB);
8907
8908	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8909		rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8910
8911	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8912	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8913	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8914		rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8915			      RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8916			      RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8917
8918	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8919	    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8920		if (tg3_flag(tp, TSO_CAPABLE) &&
8921		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8922			rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8923		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8924			   !tg3_flag(tp, IS_5788)) {
8925			rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8926		}
8927	}
8928
8929	if (tg3_flag(tp, PCI_EXPRESS))
8930		rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8931
8932	if (tg3_flag(tp, HW_TSO_1) ||
8933	    tg3_flag(tp, HW_TSO_2) ||
8934	    tg3_flag(tp, HW_TSO_3))
8935		rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8936
8937	if (tg3_flag(tp, 57765_PLUS) ||
8938	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8939	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8940		rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8941
8942	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8943		rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8944
8945	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8946	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8947	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8948	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8949	    tg3_flag(tp, 57765_PLUS)) {
8950		val = tr32(TG3_RDMA_RSRVCTRL_REG);
8951		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8952		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8953			val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8954				 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8955				 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8956			val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8957			       TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8958			       TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8959		}
8960		tw32(TG3_RDMA_RSRVCTRL_REG,
8961		     val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8962	}
8963
8964	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8965	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8966		val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8967		tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8968		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8969		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8970	}
8971
8972	/* Receive/send statistics. */
8973	if (tg3_flag(tp, 5750_PLUS)) {
8974		val = tr32(RCVLPC_STATS_ENABLE);
8975		val &= ~RCVLPC_STATSENAB_DACK_FIX;
8976		tw32(RCVLPC_STATS_ENABLE, val);
8977	} else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8978		   tg3_flag(tp, TSO_CAPABLE)) {
8979		val = tr32(RCVLPC_STATS_ENABLE);
8980		val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8981		tw32(RCVLPC_STATS_ENABLE, val);
8982	} else {
8983		tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8984	}
8985	tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8986	tw32(SNDDATAI_STATSENAB, 0xffffff);
8987	tw32(SNDDATAI_STATSCTRL,
8988	     (SNDDATAI_SCTRL_ENABLE |
8989	      SNDDATAI_SCTRL_FASTUPD));
8990
8991	/* Setup host coalescing engine. */
8992	tw32(HOSTCC_MODE, 0);
8993	for (i = 0; i < 2000; i++) {
8994		if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8995			break;
8996		udelay(10);
8997	}
8998
8999	__tg3_set_coalesce(tp, &tp->coal);
9000
9001	if (!tg3_flag(tp, 5705_PLUS)) {
9002		/* Status/statistics block address.  See tg3_timer,
9003		 * the tg3_periodic_fetch_stats call there, and
9004		 * tg3_get_stats to see how this works for 5705/5750 chips.
9005		 */
9006		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9007		     ((u64) tp->stats_mapping >> 32));
9008		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9009		     ((u64) tp->stats_mapping & 0xffffffff));
9010		tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9011
9012		tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9013
9014		/* Clear statistics and status block memory areas */
9015		for (i = NIC_SRAM_STATS_BLK;
9016		     i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9017		     i += sizeof(u32)) {
9018			tg3_write_mem(tp, i, 0);
9019			udelay(40);
9020		}
9021	}
9022
9023	tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9024
9025	tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9026	tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9027	if (!tg3_flag(tp, 5705_PLUS))
9028		tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9029
9030	if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9031		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9032		/* reset to prevent losing 1st rx packet intermittently */
9033		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9034		udelay(10);
9035	}
9036
9037	tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9038			MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9039			MAC_MODE_FHDE_ENABLE;
9040	if (tg3_flag(tp, ENABLE_APE))
9041		tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9042	if (!tg3_flag(tp, 5705_PLUS) &&
9043	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9044	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9045		tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9046	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9047	udelay(40);
9048
9049	/* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9050	 * If TG3_FLAG_IS_NIC is zero, we should read the
9051	 * register to preserve the GPIO settings for LOMs. The GPIOs,
9052	 * whether used as inputs or outputs, are set by boot code after
9053	 * reset.
9054	 */
9055	if (!tg3_flag(tp, IS_NIC)) {
9056		u32 gpio_mask;
9057
9058		gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9059			    GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9060			    GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9061
9062		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9063			gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9064				     GRC_LCLCTRL_GPIO_OUTPUT3;
9065
9066		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9067			gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9068
9069		tp->grc_local_ctrl &= ~gpio_mask;
9070		tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9071
9072		/* GPIO1 must be driven high for eeprom write protect */
9073		if (tg3_flag(tp, EEPROM_WRITE_PROT))
9074			tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9075					       GRC_LCLCTRL_GPIO_OUTPUT1);
9076	}
9077	tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9078	udelay(100);
9079
9080	if (tg3_flag(tp, USING_MSIX)) {
9081		val = tr32(MSGINT_MODE);
9082		val |= MSGINT_MODE_ENABLE;
9083		if (tp->irq_cnt > 1)
9084			val |= MSGINT_MODE_MULTIVEC_EN;
9085		if (!tg3_flag(tp, 1SHOT_MSI))
9086			val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9087		tw32(MSGINT_MODE, val);
9088	}
9089
9090	if (!tg3_flag(tp, 5705_PLUS)) {
9091		tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9092		udelay(40);
9093	}
9094
9095	val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9096	       WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9097	       WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9098	       WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9099	       WDMAC_MODE_LNGREAD_ENAB);
9100
9101	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9102	    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9103		if (tg3_flag(tp, TSO_CAPABLE) &&
9104		    (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9105		     tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9106			/* nothing */
9107		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9108			   !tg3_flag(tp, IS_5788)) {
9109			val |= WDMAC_MODE_RX_ACCEL;
9110		}
9111	}
9112
9113	/* Enable host coalescing bug fix */
9114	if (tg3_flag(tp, 5755_PLUS))
9115		val |= WDMAC_MODE_STATUS_TAG_FIX;
9116
9117	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9118		val |= WDMAC_MODE_BURST_ALL_DATA;
9119
9120	tw32_f(WDMAC_MODE, val);
9121	udelay(40);
9122
9123	if (tg3_flag(tp, PCIX_MODE)) {
9124		u16 pcix_cmd;
9125
9126		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9127				     &pcix_cmd);
9128		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9129			pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9130			pcix_cmd |= PCI_X_CMD_READ_2K;
9131		} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9132			pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9133			pcix_cmd |= PCI_X_CMD_READ_2K;
9134		}
9135		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9136				      pcix_cmd);
9137	}
9138
9139	tw32_f(RDMAC_MODE, rdmac_mode);
9140	udelay(40);
9141
9142	tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9143	if (!tg3_flag(tp, 5705_PLUS))
9144		tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9145
9146	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9147		tw32(SNDDATAC_MODE,
9148		     SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9149	else
9150		tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9151
9152	tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9153	tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9154	val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9155	if (tg3_flag(tp, LRG_PROD_RING_CAP))
9156		val |= RCVDBDI_MODE_LRG_RING_SZ;
9157	tw32(RCVDBDI_MODE, val);
9158	tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9159	if (tg3_flag(tp, HW_TSO_1) ||
9160	    tg3_flag(tp, HW_TSO_2) ||
9161	    tg3_flag(tp, HW_TSO_3))
9162		tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9163	val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9164	if (tg3_flag(tp, ENABLE_TSS))
9165		val |= SNDBDI_MODE_MULTI_TXQ_EN;
9166	tw32(SNDBDI_MODE, val);
9167	tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9168
9169	if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9170		err = tg3_load_5701_a0_firmware_fix(tp);
9171		if (err)
9172			return err;
9173	}
9174
9175	if (tg3_flag(tp, TSO_CAPABLE)) {
9176		err = tg3_load_tso_firmware(tp);
9177		if (err)
9178			return err;
9179	}
9180
9181	tp->tx_mode = TX_MODE_ENABLE;
9182
9183	if (tg3_flag(tp, 5755_PLUS) ||
9184	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9185		tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9186
9187	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9188		val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9189		tp->tx_mode &= ~val;
9190		tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9191	}
9192
9193	tw32_f(MAC_TX_MODE, tp->tx_mode);
9194	udelay(100);
9195
9196	if (tg3_flag(tp, ENABLE_RSS)) {
9197		tg3_rss_write_indir_tbl(tp);
9198
9199		/* Setup the "secret" hash key. */
9200		tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9201		tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9202		tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9203		tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9204		tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9205		tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9206		tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9207		tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9208		tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9209		tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9210	}
9211
9212	tp->rx_mode = RX_MODE_ENABLE;
9213	if (tg3_flag(tp, 5755_PLUS))
9214		tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9215
9216	if (tg3_flag(tp, ENABLE_RSS))
9217		tp->rx_mode |= RX_MODE_RSS_ENABLE |
9218			       RX_MODE_RSS_ITBL_HASH_BITS_7 |
9219			       RX_MODE_RSS_IPV6_HASH_EN |
9220			       RX_MODE_RSS_TCP_IPV6_HASH_EN |
9221			       RX_MODE_RSS_IPV4_HASH_EN |
9222			       RX_MODE_RSS_TCP_IPV4_HASH_EN;
9223
9224	tw32_f(MAC_RX_MODE, tp->rx_mode);
9225	udelay(10);
9226
9227	tw32(MAC_LED_CTRL, tp->led_ctrl);
9228
9229	tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9230	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9231		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9232		udelay(10);
9233	}
9234	tw32_f(MAC_RX_MODE, tp->rx_mode);
9235	udelay(10);
9236
9237	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9238		if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9239			!(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9240			/* Set drive transmission level to 1.2V  */
9241			/* only if the signal pre-emphasis bit is not set  */
9242			val = tr32(MAC_SERDES_CFG);
9243			val &= 0xfffff000;
9244			val |= 0x880;
9245			tw32(MAC_SERDES_CFG, val);
9246		}
9247		if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9248			tw32(MAC_SERDES_CFG, 0x616000);
9249	}
9250
9251	/* Prevent chip from dropping frames when flow control
9252	 * is enabled.
9253	 */
9254	if (tg3_flag(tp, 57765_CLASS))
9255		val = 1;
9256	else
9257		val = 2;
9258	tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9259
9260	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9261	    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9262		/* Use hardware link auto-negotiation */
9263		tg3_flag_set(tp, HW_AUTONEG);
9264	}
9265
9266	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9267	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9268		u32 tmp;
9269
9270		tmp = tr32(SERDES_RX_CTRL);
9271		tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9272		tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9273		tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9274		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9275	}
9276
9277	if (!tg3_flag(tp, USE_PHYLIB)) {
9278		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9279			tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9280
9281		err = tg3_setup_phy(tp, 0);
9282		if (err)
9283			return err;
9284
9285		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9286		    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9287			u32 tmp;
9288
9289			/* Clear CRC stats. */
9290			if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9291				tg3_writephy(tp, MII_TG3_TEST1,
9292					     tmp | MII_TG3_TEST1_CRC_EN);
9293				tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9294			}
9295		}
9296	}
9297
9298	__tg3_set_rx_mode(tp->dev);
9299
9300	/* Initialize receive rules. */
9301	tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9302	tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9303	tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9304	tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9305
9306	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9307		limit = 8;
9308	else
9309		limit = 16;
9310	if (tg3_flag(tp, ENABLE_ASF))
9311		limit -= 4;
9312	switch (limit) {
9313	case 16:
9314		tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9315	case 15:
9316		tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9317	case 14:
9318		tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9319	case 13:
9320		tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9321	case 12:
9322		tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9323	case 11:
9324		tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9325	case 10:
9326		tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9327	case 9:
9328		tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9329	case 8:
9330		tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9331	case 7:
9332		tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9333	case 6:
9334		tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9335	case 5:
9336		tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9337	case 4:
9338		/* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9339	case 3:
9340		/* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9341	case 2:
9342	case 1:
9343
9344	default:
9345		break;
9346	}
9347
9348	if (tg3_flag(tp, ENABLE_APE))
9349		/* Write our heartbeat update interval to APE. */
9350		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9351				APE_HOST_HEARTBEAT_INT_DISABLE);
9352
9353	tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9354
9355	return 0;
9356}
9357
9358/* Called at device open time to get the chip ready for
9359 * packet processing.  Invoked with tp->lock held.
9360 */
9361static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9362{
9363	tg3_switch_clocks(tp);
9364
9365	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9366
9367	return tg3_reset_hw(tp, reset_phy);
9368}
9369
9370#define TG3_STAT_ADD32(PSTAT, REG) \
9371do {	u32 __val = tr32(REG); \
9372	(PSTAT)->low += __val; \
9373	if ((PSTAT)->low < __val) \
9374		(PSTAT)->high += 1; \
9375} while (0)
9376
9377static void tg3_periodic_fetch_stats(struct tg3 *tp)
9378{
9379	struct tg3_hw_stats *sp = tp->hw_stats;
9380
9381	if (!netif_carrier_ok(tp->dev))
9382		return;
9383
9384	TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9385	TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9386	TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9387	TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9388	TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9389	TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9390	TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9391	TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9392	TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9393	TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9394	TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9395	TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9396	TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9397
9398	TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9399	TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9400	TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9401	TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9402	TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9403	TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9404	TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9405	TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9406	TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9407	TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9408	TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9409	TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9410	TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9411	TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9412
9413	TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9414	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9415	    tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9416	    tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9417		TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9418	} else {
9419		u32 val = tr32(HOSTCC_FLOW_ATTN);
9420		val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9421		if (val) {
9422			tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9423			sp->rx_discards.low += val;
9424			if (sp->rx_discards.low < val)
9425				sp->rx_discards.high += 1;
9426		}
9427		sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9428	}
9429	TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9430}
9431
9432static void tg3_chk_missed_msi(struct tg3 *tp)
9433{
9434	u32 i;
9435
9436	for (i = 0; i < tp->irq_cnt; i++) {
9437		struct tg3_napi *tnapi = &tp->napi[i];
9438
9439		if (tg3_has_work(tnapi)) {
9440			if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9441			    tnapi->last_tx_cons == tnapi->tx_cons) {
9442				if (tnapi->chk_msi_cnt < 1) {
9443					tnapi->chk_msi_cnt++;
9444					return;
9445				}
9446				tg3_msi(0, tnapi);
9447			}
9448		}
9449		tnapi->chk_msi_cnt = 0;
9450		tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9451		tnapi->last_tx_cons = tnapi->tx_cons;
9452	}
9453}
9454
9455static void tg3_timer(unsigned long __opaque)
9456{
9457	struct tg3 *tp = (struct tg3 *) __opaque;
9458
9459	if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9460		goto restart_timer;
9461
9462	spin_lock(&tp->lock);
9463
9464	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9465	    tg3_flag(tp, 57765_CLASS))
9466		tg3_chk_missed_msi(tp);
9467
9468	if (!tg3_flag(tp, TAGGED_STATUS)) {
9469		/* All of this garbage is because when using non-tagged
9470		 * IRQ status the mailbox/status_block protocol the chip
9471		 * uses with the cpu is race prone.
9472		 */
9473		if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9474			tw32(GRC_LOCAL_CTRL,
9475			     tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9476		} else {
9477			tw32(HOSTCC_MODE, tp->coalesce_mode |
9478			     HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9479		}
9480
9481		if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9482			spin_unlock(&tp->lock);
9483			tg3_reset_task_schedule(tp);
9484			goto restart_timer;
9485		}
9486	}
9487
9488	/* This part only runs once per second. */
9489	if (!--tp->timer_counter) {
9490		if (tg3_flag(tp, 5705_PLUS))
9491			tg3_periodic_fetch_stats(tp);
9492
9493		if (tp->setlpicnt && !--tp->setlpicnt)
9494			tg3_phy_eee_enable(tp);
9495
9496		if (tg3_flag(tp, USE_LINKCHG_REG)) {
9497			u32 mac_stat;
9498			int phy_event;
9499
9500			mac_stat = tr32(MAC_STATUS);
9501
9502			phy_event = 0;
9503			if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9504				if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9505					phy_event = 1;
9506			} else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9507				phy_event = 1;
9508
9509			if (phy_event)
9510				tg3_setup_phy(tp, 0);
9511		} else if (tg3_flag(tp, POLL_SERDES)) {
9512			u32 mac_stat = tr32(MAC_STATUS);
9513			int need_setup = 0;
9514
9515			if (netif_carrier_ok(tp->dev) &&
9516			    (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9517				need_setup = 1;
9518			}
9519			if (!netif_carrier_ok(tp->dev) &&
9520			    (mac_stat & (MAC_STATUS_PCS_SYNCED |
9521					 MAC_STATUS_SIGNAL_DET))) {
9522				need_setup = 1;
9523			}
9524			if (need_setup) {
9525				if (!tp->serdes_counter) {
9526					tw32_f(MAC_MODE,
9527					     (tp->mac_mode &
9528					      ~MAC_MODE_PORT_MODE_MASK));
9529					udelay(40);
9530					tw32_f(MAC_MODE, tp->mac_mode);
9531					udelay(40);
9532				}
9533				tg3_setup_phy(tp, 0);
9534			}
9535		} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9536			   tg3_flag(tp, 5780_CLASS)) {
9537			tg3_serdes_parallel_detect(tp);
9538		}
9539
9540		tp->timer_counter = tp->timer_multiplier;
9541	}
9542
9543	/* Heartbeat is only sent once every 2 seconds.
9544	 *
9545	 * The heartbeat is to tell the ASF firmware that the host
9546	 * driver is still alive.  In the event that the OS crashes,
9547	 * ASF needs to reset the hardware to free up the FIFO space
9548	 * that may be filled with rx packets destined for the host.
9549	 * If the FIFO is full, ASF will no longer function properly.
9550	 *
9551	 * Unintended resets have been reported on real time kernels
9552	 * where the timer doesn't run on time.  Netpoll will also have
9553	 * same problem.
9554	 *
9555	 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9556	 * to check the ring condition when the heartbeat is expiring
9557	 * before doing the reset.  This will prevent most unintended
9558	 * resets.
9559	 */
9560	if (!--tp->asf_counter) {
9561		if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9562			tg3_wait_for_event_ack(tp);
9563
9564			tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9565				      FWCMD_NICDRV_ALIVE3);
9566			tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9567			tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9568				      TG3_FW_UPDATE_TIMEOUT_SEC);
9569
9570			tg3_generate_fw_event(tp);
9571		}
9572		tp->asf_counter = tp->asf_multiplier;
9573	}
9574
9575	spin_unlock(&tp->lock);
9576
9577restart_timer:
9578	tp->timer.expires = jiffies + tp->timer_offset;
9579	add_timer(&tp->timer);
9580}
9581
9582static void __devinit tg3_timer_init(struct tg3 *tp)
9583{
9584	if (tg3_flag(tp, TAGGED_STATUS) &&
9585	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9586	    !tg3_flag(tp, 57765_CLASS))
9587		tp->timer_offset = HZ;
9588	else
9589		tp->timer_offset = HZ / 10;
9590
9591	BUG_ON(tp->timer_offset > HZ);
9592
9593	tp->timer_multiplier = (HZ / tp->timer_offset);
9594	tp->asf_multiplier = (HZ / tp->timer_offset) *
9595			     TG3_FW_UPDATE_FREQ_SEC;
9596
9597	init_timer(&tp->timer);
9598	tp->timer.data = (unsigned long) tp;
9599	tp->timer.function = tg3_timer;
9600}
9601
9602static void tg3_timer_start(struct tg3 *tp)
9603{
9604	tp->asf_counter   = tp->asf_multiplier;
9605	tp->timer_counter = tp->timer_multiplier;
9606
9607	tp->timer.expires = jiffies + tp->timer_offset;
9608	add_timer(&tp->timer);
9609}
9610
9611static void tg3_timer_stop(struct tg3 *tp)
9612{
9613	del_timer_sync(&tp->timer);
9614}
9615
9616/* Restart hardware after configuration changes, self-test, etc.
9617 * Invoked with tp->lock held.
9618 */
9619static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
9620	__releases(tp->lock)
9621	__acquires(tp->lock)
9622{
9623	int err;
9624
9625	err = tg3_init_hw(tp, reset_phy);
9626	if (err) {
9627		netdev_err(tp->dev,
9628			   "Failed to re-initialize device, aborting\n");
9629		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9630		tg3_full_unlock(tp);
9631		tg3_timer_stop(tp);
9632		tp->irq_sync = 0;
9633		tg3_napi_enable(tp);
9634		dev_close(tp->dev);
9635		tg3_full_lock(tp, 0);
9636	}
9637	return err;
9638}
9639
9640static void tg3_reset_task(struct work_struct *work)
9641{
9642	struct tg3 *tp = container_of(work, struct tg3, reset_task);
9643	int err;
9644
9645	tg3_full_lock(tp, 0);
9646
9647	if (!netif_running(tp->dev)) {
9648		tg3_flag_clear(tp, RESET_TASK_PENDING);
9649		tg3_full_unlock(tp);
9650		return;
9651	}
9652
9653	tg3_full_unlock(tp);
9654
9655	tg3_phy_stop(tp);
9656
9657	tg3_netif_stop(tp);
9658
9659	tg3_full_lock(tp, 1);
9660
9661	if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
9662		tp->write32_tx_mbox = tg3_write32_tx_mbox;
9663		tp->write32_rx_mbox = tg3_write_flush_reg32;
9664		tg3_flag_set(tp, MBOX_WRITE_REORDER);
9665		tg3_flag_clear(tp, TX_RECOVERY_PENDING);
9666	}
9667
9668	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
9669	err = tg3_init_hw(tp, 1);
9670	if (err)
9671		goto out;
9672
9673	tg3_netif_start(tp);
9674
9675out:
9676	tg3_full_unlock(tp);
9677
9678	if (!err)
9679		tg3_phy_start(tp);
9680
9681	tg3_flag_clear(tp, RESET_TASK_PENDING);
9682}
9683
9684static int tg3_request_irq(struct tg3 *tp, int irq_num)
9685{
9686	irq_handler_t fn;
9687	unsigned long flags;
9688	char *name;
9689	struct tg3_napi *tnapi = &tp->napi[irq_num];
9690
9691	if (tp->irq_cnt == 1)
9692		name = tp->dev->name;
9693	else {
9694		name = &tnapi->irq_lbl[0];
9695		snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9696		name[IFNAMSIZ-1] = 0;
9697	}
9698
9699	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9700		fn = tg3_msi;
9701		if (tg3_flag(tp, 1SHOT_MSI))
9702			fn = tg3_msi_1shot;
9703		flags = 0;
9704	} else {
9705		fn = tg3_interrupt;
9706		if (tg3_flag(tp, TAGGED_STATUS))
9707			fn = tg3_interrupt_tagged;
9708		flags = IRQF_SHARED;
9709	}
9710
9711	return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9712}
9713
9714static int tg3_test_interrupt(struct tg3 *tp)
9715{
9716	struct tg3_napi *tnapi = &tp->napi[0];
9717	struct net_device *dev = tp->dev;
9718	int err, i, intr_ok = 0;
9719	u32 val;
9720
9721	if (!netif_running(dev))
9722		return -ENODEV;
9723
9724	tg3_disable_ints(tp);
9725
9726	free_irq(tnapi->irq_vec, tnapi);
9727
9728	/*
9729	 * Turn off MSI one shot mode.  Otherwise this test has no
9730	 * observable way to know whether the interrupt was delivered.
9731	 */
9732	if (tg3_flag(tp, 57765_PLUS)) {
9733		val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9734		tw32(MSGINT_MODE, val);
9735	}
9736
9737	err = request_irq(tnapi->irq_vec, tg3_test_isr,
9738			  IRQF_SHARED, dev->name, tnapi);
9739	if (err)
9740		return err;
9741
9742	tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9743	tg3_enable_ints(tp);
9744
9745	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9746	       tnapi->coal_now);
9747
9748	for (i = 0; i < 5; i++) {
9749		u32 int_mbox, misc_host_ctrl;
9750
9751		int_mbox = tr32_mailbox(tnapi->int_mbox);
9752		misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9753
9754		if ((int_mbox != 0) ||
9755		    (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9756			intr_ok = 1;
9757			break;
9758		}
9759
9760		if (tg3_flag(tp, 57765_PLUS) &&
9761		    tnapi->hw_status->status_tag != tnapi->last_tag)
9762			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9763
9764		msleep(10);
9765	}
9766
9767	tg3_disable_ints(tp);
9768
9769	free_irq(tnapi->irq_vec, tnapi);
9770
9771	err = tg3_request_irq(tp, 0);
9772
9773	if (err)
9774		return err;
9775
9776	if (intr_ok) {
9777		/* Reenable MSI one shot mode. */
9778		if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9779			val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9780			tw32(MSGINT_MODE, val);
9781		}
9782		return 0;
9783	}
9784
9785	return -EIO;
9786}
9787
9788/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9789 * successfully restored
9790 */
9791static int tg3_test_msi(struct tg3 *tp)
9792{
9793	int err;
9794	u16 pci_cmd;
9795
9796	if (!tg3_flag(tp, USING_MSI))
9797		return 0;
9798
9799	/* Turn off SERR reporting in case MSI terminates with Master
9800	 * Abort.
9801	 */
9802	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9803	pci_write_config_word(tp->pdev, PCI_COMMAND,
9804			      pci_cmd & ~PCI_COMMAND_SERR);
9805
9806	err = tg3_test_interrupt(tp);
9807
9808	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9809
9810	if (!err)
9811		return 0;
9812
9813	/* other failures */
9814	if (err != -EIO)
9815		return err;
9816
9817	/* MSI test failed, go back to INTx mode */
9818	netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9819		    "to INTx mode. Please report this failure to the PCI "
9820		    "maintainer and include system chipset information\n");
9821
9822	free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9823
9824	pci_disable_msi(tp->pdev);
9825
9826	tg3_flag_clear(tp, USING_MSI);
9827	tp->napi[0].irq_vec = tp->pdev->irq;
9828
9829	err = tg3_request_irq(tp, 0);
9830	if (err)
9831		return err;
9832
9833	/* Need to reset the chip because the MSI cycle may have terminated
9834	 * with Master Abort.
9835	 */
9836	tg3_full_lock(tp, 1);
9837
9838	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9839	err = tg3_init_hw(tp, 1);
9840
9841	tg3_full_unlock(tp);
9842
9843	if (err)
9844		free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9845
9846	return err;
9847}
9848
9849static int tg3_request_firmware(struct tg3 *tp)
9850{
9851	const __be32 *fw_data;
9852
9853	if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9854		netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9855			   tp->fw_needed);
9856		return -ENOENT;
9857	}
9858
9859	fw_data = (void *)tp->fw->data;
9860
9861	/* Firmware blob starts with version numbers, followed by
9862	 * start address and _full_ length including BSS sections
9863	 * (which must be longer than the actual data, of course
9864	 */
9865
9866	tp->fw_len = be32_to_cpu(fw_data[2]);	/* includes bss */
9867	if (tp->fw_len < (tp->fw->size - 12)) {
9868		netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9869			   tp->fw_len, tp->fw_needed);
9870		release_firmware(tp->fw);
9871		tp->fw = NULL;
9872		return -EINVAL;
9873	}
9874
9875	/* We no longer need firmware; we have it. */
9876	tp->fw_needed = NULL;
9877	return 0;
9878}
9879
9880static bool tg3_enable_msix(struct tg3 *tp)
9881{
9882	int i, rc;
9883	struct msix_entry msix_ent[tp->irq_max];
9884
9885	tp->irq_cnt = num_online_cpus();
9886	if (tp->irq_cnt > 1) {
9887		/* We want as many rx rings enabled as there are cpus.
9888		 * In multiqueue MSI-X mode, the first MSI-X vector
9889		 * only deals with link interrupts, etc, so we add
9890		 * one to the number of vectors we are requesting.
9891		 */
9892		tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max);
9893	}
9894
9895	for (i = 0; i < tp->irq_max; i++) {
9896		msix_ent[i].entry  = i;
9897		msix_ent[i].vector = 0;
9898	}
9899
9900	rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9901	if (rc < 0) {
9902		return false;
9903	} else if (rc != 0) {
9904		if (pci_enable_msix(tp->pdev, msix_ent, rc))
9905			return false;
9906		netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9907			      tp->irq_cnt, rc);
9908		tp->irq_cnt = rc;
9909	}
9910
9911	for (i = 0; i < tp->irq_max; i++)
9912		tp->napi[i].irq_vec = msix_ent[i].vector;
9913
9914	netif_set_real_num_tx_queues(tp->dev, 1);
9915	rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9916	if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9917		pci_disable_msix(tp->pdev);
9918		return false;
9919	}
9920
9921	if (tp->irq_cnt > 1) {
9922		tg3_flag_set(tp, ENABLE_RSS);
9923
9924		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9925		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9926			tg3_flag_set(tp, ENABLE_TSS);
9927			netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9928		}
9929	}
9930
9931	return true;
9932}
9933
9934static void tg3_ints_init(struct tg3 *tp)
9935{
9936	if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9937	    !tg3_flag(tp, TAGGED_STATUS)) {
9938		/* All MSI supporting chips should support tagged
9939		 * status.  Assert that this is the case.
9940		 */
9941		netdev_warn(tp->dev,
9942			    "MSI without TAGGED_STATUS? Not using MSI\n");
9943		goto defcfg;
9944	}
9945
9946	if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9947		tg3_flag_set(tp, USING_MSIX);
9948	else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9949		tg3_flag_set(tp, USING_MSI);
9950
9951	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9952		u32 msi_mode = tr32(MSGINT_MODE);
9953		if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9954			msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9955		if (!tg3_flag(tp, 1SHOT_MSI))
9956			msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
9957		tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9958	}
9959defcfg:
9960	if (!tg3_flag(tp, USING_MSIX)) {
9961		tp->irq_cnt = 1;
9962		tp->napi[0].irq_vec = tp->pdev->irq;
9963		netif_set_real_num_tx_queues(tp->dev, 1);
9964		netif_set_real_num_rx_queues(tp->dev, 1);
9965	}
9966}
9967
9968static void tg3_ints_fini(struct tg3 *tp)
9969{
9970	if (tg3_flag(tp, USING_MSIX))
9971		pci_disable_msix(tp->pdev);
9972	else if (tg3_flag(tp, USING_MSI))
9973		pci_disable_msi(tp->pdev);
9974	tg3_flag_clear(tp, USING_MSI);
9975	tg3_flag_clear(tp, USING_MSIX);
9976	tg3_flag_clear(tp, ENABLE_RSS);
9977	tg3_flag_clear(tp, ENABLE_TSS);
9978}
9979
9980static int tg3_open(struct net_device *dev)
9981{
9982	struct tg3 *tp = netdev_priv(dev);
9983	int i, err;
9984
9985	if (tp->fw_needed) {
9986		err = tg3_request_firmware(tp);
9987		if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9988			if (err)
9989				return err;
9990		} else if (err) {
9991			netdev_warn(tp->dev, "TSO capability disabled\n");
9992			tg3_flag_clear(tp, TSO_CAPABLE);
9993		} else if (!tg3_flag(tp, TSO_CAPABLE)) {
9994			netdev_notice(tp->dev, "TSO capability restored\n");
9995			tg3_flag_set(tp, TSO_CAPABLE);
9996		}
9997	}
9998
9999	netif_carrier_off(tp->dev);
10000
10001	err = tg3_power_up(tp);
10002	if (err)
10003		return err;
10004
10005	tg3_full_lock(tp, 0);
10006
10007	tg3_disable_ints(tp);
10008	tg3_flag_clear(tp, INIT_COMPLETE);
10009
10010	tg3_full_unlock(tp);
10011
10012	/*
10013	 * Setup interrupts first so we know how
10014	 * many NAPI resources to allocate
10015	 */
10016	tg3_ints_init(tp);
10017
10018	tg3_rss_check_indir_tbl(tp);
10019
10020	/* The placement of this call is tied
10021	 * to the setup and use of Host TX descriptors.
10022	 */
10023	err = tg3_alloc_consistent(tp);
10024	if (err)
10025		goto err_out1;
10026
10027	tg3_napi_init(tp);
10028
10029	tg3_napi_enable(tp);
10030
10031	for (i = 0; i < tp->irq_cnt; i++) {
10032		struct tg3_napi *tnapi = &tp->napi[i];
10033		err = tg3_request_irq(tp, i);
10034		if (err) {
10035			for (i--; i >= 0; i--) {
10036				tnapi = &tp->napi[i];
10037				free_irq(tnapi->irq_vec, tnapi);
10038			}
10039			goto err_out2;
10040		}
10041	}
10042
10043	tg3_full_lock(tp, 0);
10044
10045	err = tg3_init_hw(tp, 1);
10046	if (err) {
10047		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10048		tg3_free_rings(tp);
10049	}
10050
10051	tg3_full_unlock(tp);
10052
10053	if (err)
10054		goto err_out3;
10055
10056	if (tg3_flag(tp, USING_MSI)) {
10057		err = tg3_test_msi(tp);
10058
10059		if (err) {
10060			tg3_full_lock(tp, 0);
10061			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10062			tg3_free_rings(tp);
10063			tg3_full_unlock(tp);
10064
10065			goto err_out2;
10066		}
10067
10068		if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10069			u32 val = tr32(PCIE_TRANSACTION_CFG);
10070
10071			tw32(PCIE_TRANSACTION_CFG,
10072			     val | PCIE_TRANS_CFG_1SHOT_MSI);
10073		}
10074	}
10075
10076	tg3_phy_start(tp);
10077
10078	tg3_full_lock(tp, 0);
10079
10080	tg3_timer_start(tp);
10081	tg3_flag_set(tp, INIT_COMPLETE);
10082	tg3_enable_ints(tp);
10083
10084	tg3_full_unlock(tp);
10085
10086	netif_tx_start_all_queues(dev);
10087
10088	/*
10089	 * Reset loopback feature if it was turned on while the device was down
10090	 * make sure that it's installed properly now.
10091	 */
10092	if (dev->features & NETIF_F_LOOPBACK)
10093		tg3_set_loopback(dev, dev->features);
10094
10095	return 0;
10096
10097err_out3:
10098	for (i = tp->irq_cnt - 1; i >= 0; i--) {
10099		struct tg3_napi *tnapi = &tp->napi[i];
10100		free_irq(tnapi->irq_vec, tnapi);
10101	}
10102
10103err_out2:
10104	tg3_napi_disable(tp);
10105	tg3_napi_fini(tp);
10106	tg3_free_consistent(tp);
10107
10108err_out1:
10109	tg3_ints_fini(tp);
10110	tg3_frob_aux_power(tp, false);
10111	pci_set_power_state(tp->pdev, PCI_D3hot);
10112	return err;
10113}
10114
10115static int tg3_close(struct net_device *dev)
10116{
10117	int i;
10118	struct tg3 *tp = netdev_priv(dev);
10119
10120	tg3_napi_disable(tp);
10121	tg3_reset_task_cancel(tp);
10122
10123	netif_tx_stop_all_queues(dev);
10124
10125	tg3_timer_stop(tp);
10126
10127	tg3_phy_stop(tp);
10128
10129	tg3_full_lock(tp, 1);
10130
10131	tg3_disable_ints(tp);
10132
10133	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10134	tg3_free_rings(tp);
10135	tg3_flag_clear(tp, INIT_COMPLETE);
10136
10137	tg3_full_unlock(tp);
10138
10139	for (i = tp->irq_cnt - 1; i >= 0; i--) {
10140		struct tg3_napi *tnapi = &tp->napi[i];
10141		free_irq(tnapi->irq_vec, tnapi);
10142	}
10143
10144	tg3_ints_fini(tp);
10145
10146	/* Clear stats across close / open calls */
10147	memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10148	memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10149
10150	tg3_napi_fini(tp);
10151
10152	tg3_free_consistent(tp);
10153
10154	tg3_power_down(tp);
10155
10156	netif_carrier_off(tp->dev);
10157
10158	return 0;
10159}
10160
10161static inline u64 get_stat64(tg3_stat64_t *val)
10162{
10163       return ((u64)val->high << 32) | ((u64)val->low);
10164}
10165
10166static u64 tg3_calc_crc_errors(struct tg3 *tp)
10167{
10168	struct tg3_hw_stats *hw_stats = tp->hw_stats;
10169
10170	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10171	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10172	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10173		u32 val;
10174
10175		if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10176			tg3_writephy(tp, MII_TG3_TEST1,
10177				     val | MII_TG3_TEST1_CRC_EN);
10178			tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10179		} else
10180			val = 0;
10181
10182		tp->phy_crc_errors += val;
10183
10184		return tp->phy_crc_errors;
10185	}
10186
10187	return get_stat64(&hw_stats->rx_fcs_errors);
10188}
10189
10190#define ESTAT_ADD(member) \
10191	estats->member =	old_estats->member + \
10192				get_stat64(&hw_stats->member)
10193
10194static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10195{
10196	struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10197	struct tg3_hw_stats *hw_stats = tp->hw_stats;
10198
10199	ESTAT_ADD(rx_octets);
10200	ESTAT_ADD(rx_fragments);
10201	ESTAT_ADD(rx_ucast_packets);
10202	ESTAT_ADD(rx_mcast_packets);
10203	ESTAT_ADD(rx_bcast_packets);
10204	ESTAT_ADD(rx_fcs_errors);
10205	ESTAT_ADD(rx_align_errors);
10206	ESTAT_ADD(rx_xon_pause_rcvd);
10207	ESTAT_ADD(rx_xoff_pause_rcvd);
10208	ESTAT_ADD(rx_mac_ctrl_rcvd);
10209	ESTAT_ADD(rx_xoff_entered);
10210	ESTAT_ADD(rx_frame_too_long_errors);
10211	ESTAT_ADD(rx_jabbers);
10212	ESTAT_ADD(rx_undersize_packets);
10213	ESTAT_ADD(rx_in_length_errors);
10214	ESTAT_ADD(rx_out_length_errors);
10215	ESTAT_ADD(rx_64_or_less_octet_packets);
10216	ESTAT_ADD(rx_65_to_127_octet_packets);
10217	ESTAT_ADD(rx_128_to_255_octet_packets);
10218	ESTAT_ADD(rx_256_to_511_octet_packets);
10219	ESTAT_ADD(rx_512_to_1023_octet_packets);
10220	ESTAT_ADD(rx_1024_to_1522_octet_packets);
10221	ESTAT_ADD(rx_1523_to_2047_octet_packets);
10222	ESTAT_ADD(rx_2048_to_4095_octet_packets);
10223	ESTAT_ADD(rx_4096_to_8191_octet_packets);
10224	ESTAT_ADD(rx_8192_to_9022_octet_packets);
10225
10226	ESTAT_ADD(tx_octets);
10227	ESTAT_ADD(tx_collisions);
10228	ESTAT_ADD(tx_xon_sent);
10229	ESTAT_ADD(tx_xoff_sent);
10230	ESTAT_ADD(tx_flow_control);
10231	ESTAT_ADD(tx_mac_errors);
10232	ESTAT_ADD(tx_single_collisions);
10233	ESTAT_ADD(tx_mult_collisions);
10234	ESTAT_ADD(tx_deferred);
10235	ESTAT_ADD(tx_excessive_collisions);
10236	ESTAT_ADD(tx_late_collisions);
10237	ESTAT_ADD(tx_collide_2times);
10238	ESTAT_ADD(tx_collide_3times);
10239	ESTAT_ADD(tx_collide_4times);
10240	ESTAT_ADD(tx_collide_5times);
10241	ESTAT_ADD(tx_collide_6times);
10242	ESTAT_ADD(tx_collide_7times);
10243	ESTAT_ADD(tx_collide_8times);
10244	ESTAT_ADD(tx_collide_9times);
10245	ESTAT_ADD(tx_collide_10times);
10246	ESTAT_ADD(tx_collide_11times);
10247	ESTAT_ADD(tx_collide_12times);
10248	ESTAT_ADD(tx_collide_13times);
10249	ESTAT_ADD(tx_collide_14times);
10250	ESTAT_ADD(tx_collide_15times);
10251	ESTAT_ADD(tx_ucast_packets);
10252	ESTAT_ADD(tx_mcast_packets);
10253	ESTAT_ADD(tx_bcast_packets);
10254	ESTAT_ADD(tx_carrier_sense_errors);
10255	ESTAT_ADD(tx_discards);
10256	ESTAT_ADD(tx_errors);
10257
10258	ESTAT_ADD(dma_writeq_full);
10259	ESTAT_ADD(dma_write_prioq_full);
10260	ESTAT_ADD(rxbds_empty);
10261	ESTAT_ADD(rx_discards);
10262	ESTAT_ADD(rx_errors);
10263	ESTAT_ADD(rx_threshold_hit);
10264
10265	ESTAT_ADD(dma_readq_full);
10266	ESTAT_ADD(dma_read_prioq_full);
10267	ESTAT_ADD(tx_comp_queue_full);
10268
10269	ESTAT_ADD(ring_set_send_prod_index);
10270	ESTAT_ADD(ring_status_update);
10271	ESTAT_ADD(nic_irqs);
10272	ESTAT_ADD(nic_avoided_irqs);
10273	ESTAT_ADD(nic_tx_threshold_hit);
10274
10275	ESTAT_ADD(mbuf_lwm_thresh_hit);
10276}
10277
10278static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
10279{
10280	struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10281	struct tg3_hw_stats *hw_stats = tp->hw_stats;
10282
10283	stats->rx_packets = old_stats->rx_packets +
10284		get_stat64(&hw_stats->rx_ucast_packets) +
10285		get_stat64(&hw_stats->rx_mcast_packets) +
10286		get_stat64(&hw_stats->rx_bcast_packets);
10287
10288	stats->tx_packets = old_stats->tx_packets +
10289		get_stat64(&hw_stats->tx_ucast_packets) +
10290		get_stat64(&hw_stats->tx_mcast_packets) +
10291		get_stat64(&hw_stats->tx_bcast_packets);
10292
10293	stats->rx_bytes = old_stats->rx_bytes +
10294		get_stat64(&hw_stats->rx_octets);
10295	stats->tx_bytes = old_stats->tx_bytes +
10296		get_stat64(&hw_stats->tx_octets);
10297
10298	stats->rx_errors = old_stats->rx_errors +
10299		get_stat64(&hw_stats->rx_errors);
10300	stats->tx_errors = old_stats->tx_errors +
10301		get_stat64(&hw_stats->tx_errors) +
10302		get_stat64(&hw_stats->tx_mac_errors) +
10303		get_stat64(&hw_stats->tx_carrier_sense_errors) +
10304		get_stat64(&hw_stats->tx_discards);
10305
10306	stats->multicast = old_stats->multicast +
10307		get_stat64(&hw_stats->rx_mcast_packets);
10308	stats->collisions = old_stats->collisions +
10309		get_stat64(&hw_stats->tx_collisions);
10310
10311	stats->rx_length_errors = old_stats->rx_length_errors +
10312		get_stat64(&hw_stats->rx_frame_too_long_errors) +
10313		get_stat64(&hw_stats->rx_undersize_packets);
10314
10315	stats->rx_over_errors = old_stats->rx_over_errors +
10316		get_stat64(&hw_stats->rxbds_empty);
10317	stats->rx_frame_errors = old_stats->rx_frame_errors +
10318		get_stat64(&hw_stats->rx_align_errors);
10319	stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10320		get_stat64(&hw_stats->tx_discards);
10321	stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10322		get_stat64(&hw_stats->tx_carrier_sense_errors);
10323
10324	stats->rx_crc_errors = old_stats->rx_crc_errors +
10325		tg3_calc_crc_errors(tp);
10326
10327	stats->rx_missed_errors = old_stats->rx_missed_errors +
10328		get_stat64(&hw_stats->rx_discards);
10329
10330	stats->rx_dropped = tp->rx_dropped;
10331	stats->tx_dropped = tp->tx_dropped;
10332}
10333
10334static int tg3_get_regs_len(struct net_device *dev)
10335{
10336	return TG3_REG_BLK_SIZE;
10337}
10338
10339static void tg3_get_regs(struct net_device *dev,
10340		struct ethtool_regs *regs, void *_p)
10341{
10342	struct tg3 *tp = netdev_priv(dev);
10343
10344	regs->version = 0;
10345
10346	memset(_p, 0, TG3_REG_BLK_SIZE);
10347
10348	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10349		return;
10350
10351	tg3_full_lock(tp, 0);
10352
10353	tg3_dump_legacy_regs(tp, (u32 *)_p);
10354
10355	tg3_full_unlock(tp);
10356}
10357
10358static int tg3_get_eeprom_len(struct net_device *dev)
10359{
10360	struct tg3 *tp = netdev_priv(dev);
10361
10362	return tp->nvram_size;
10363}
10364
10365static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10366{
10367	struct tg3 *tp = netdev_priv(dev);
10368	int ret;
10369	u8  *pd;
10370	u32 i, offset, len, b_offset, b_count;
10371	__be32 val;
10372
10373	if (tg3_flag(tp, NO_NVRAM))
10374		return -EINVAL;
10375
10376	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10377		return -EAGAIN;
10378
10379	offset = eeprom->offset;
10380	len = eeprom->len;
10381	eeprom->len = 0;
10382
10383	eeprom->magic = TG3_EEPROM_MAGIC;
10384
10385	if (offset & 3) {
10386		/* adjustments to start on required 4 byte boundary */
10387		b_offset = offset & 3;
10388		b_count = 4 - b_offset;
10389		if (b_count > len) {
10390			/* i.e. offset=1 len=2 */
10391			b_count = len;
10392		}
10393		ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10394		if (ret)
10395			return ret;
10396		memcpy(data, ((char *)&val) + b_offset, b_count);
10397		len -= b_count;
10398		offset += b_count;
10399		eeprom->len += b_count;
10400	}
10401
10402	/* read bytes up to the last 4 byte boundary */
10403	pd = &data[eeprom->len];
10404	for (i = 0; i < (len - (len & 3)); i += 4) {
10405		ret = tg3_nvram_read_be32(tp, offset + i, &val);
10406		if (ret) {
10407			eeprom->len += i;
10408			return ret;
10409		}
10410		memcpy(pd + i, &val, 4);
10411	}
10412	eeprom->len += i;
10413
10414	if (len & 3) {
10415		/* read last bytes not ending on 4 byte boundary */
10416		pd = &data[eeprom->len];
10417		b_count = len & 3;
10418		b_offset = offset + len - b_count;
10419		ret = tg3_nvram_read_be32(tp, b_offset, &val);
10420		if (ret)
10421			return ret;
10422		memcpy(pd, &val, b_count);
10423		eeprom->len += b_count;
10424	}
10425	return 0;
10426}
10427
10428static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10429{
10430	struct tg3 *tp = netdev_priv(dev);
10431	int ret;
10432	u32 offset, len, b_offset, odd_len;
10433	u8 *buf;
10434	__be32 start, end;
10435
10436	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10437		return -EAGAIN;
10438
10439	if (tg3_flag(tp, NO_NVRAM) ||
10440	    eeprom->magic != TG3_EEPROM_MAGIC)
10441		return -EINVAL;
10442
10443	offset = eeprom->offset;
10444	len = eeprom->len;
10445
10446	if ((b_offset = (offset & 3))) {
10447		/* adjustments to start on required 4 byte boundary */
10448		ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10449		if (ret)
10450			return ret;
10451		len += b_offset;
10452		offset &= ~3;
10453		if (len < 4)
10454			len = 4;
10455	}
10456
10457	odd_len = 0;
10458	if (len & 3) {
10459		/* adjustments to end on required 4 byte boundary */
10460		odd_len = 1;
10461		len = (len + 3) & ~3;
10462		ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10463		if (ret)
10464			return ret;
10465	}
10466
10467	buf = data;
10468	if (b_offset || odd_len) {
10469		buf = kmalloc(len, GFP_KERNEL);
10470		if (!buf)
10471			return -ENOMEM;
10472		if (b_offset)
10473			memcpy(buf, &start, 4);
10474		if (odd_len)
10475			memcpy(buf+len-4, &end, 4);
10476		memcpy(buf + b_offset, data, eeprom->len);
10477	}
10478
10479	ret = tg3_nvram_write_block(tp, offset, len, buf);
10480
10481	if (buf != data)
10482		kfree(buf);
10483
10484	return ret;
10485}
10486
10487static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10488{
10489	struct tg3 *tp = netdev_priv(dev);
10490
10491	if (tg3_flag(tp, USE_PHYLIB)) {
10492		struct phy_device *phydev;
10493		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10494			return -EAGAIN;
10495		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10496		return phy_ethtool_gset(phydev, cmd);
10497	}
10498
10499	cmd->supported = (SUPPORTED_Autoneg);
10500
10501	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10502		cmd->supported |= (SUPPORTED_1000baseT_Half |
10503				   SUPPORTED_1000baseT_Full);
10504
10505	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10506		cmd->supported |= (SUPPORTED_100baseT_Half |
10507				  SUPPORTED_100baseT_Full |
10508				  SUPPORTED_10baseT_Half |
10509				  SUPPORTED_10baseT_Full |
10510				  SUPPORTED_TP);
10511		cmd->port = PORT_TP;
10512	} else {
10513		cmd->supported |= SUPPORTED_FIBRE;
10514		cmd->port = PORT_FIBRE;
10515	}
10516
10517	cmd->advertising = tp->link_config.advertising;
10518	if (tg3_flag(tp, PAUSE_AUTONEG)) {
10519		if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10520			if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10521				cmd->advertising |= ADVERTISED_Pause;
10522			} else {
10523				cmd->advertising |= ADVERTISED_Pause |
10524						    ADVERTISED_Asym_Pause;
10525			}
10526		} else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10527			cmd->advertising |= ADVERTISED_Asym_Pause;
10528		}
10529	}
10530	if (netif_running(dev) && netif_carrier_ok(dev)) {
10531		ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10532		cmd->duplex = tp->link_config.active_duplex;
10533		cmd->lp_advertising = tp->link_config.rmt_adv;
10534		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10535			if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10536				cmd->eth_tp_mdix = ETH_TP_MDI_X;
10537			else
10538				cmd->eth_tp_mdix = ETH_TP_MDI;
10539		}
10540	} else {
10541		ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
10542		cmd->duplex = DUPLEX_UNKNOWN;
10543		cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
10544	}
10545	cmd->phy_address = tp->phy_addr;
10546	cmd->transceiver = XCVR_INTERNAL;
10547	cmd->autoneg = tp->link_config.autoneg;
10548	cmd->maxtxpkt = 0;
10549	cmd->maxrxpkt = 0;
10550	return 0;
10551}
10552
10553static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10554{
10555	struct tg3 *tp = netdev_priv(dev);
10556	u32 speed = ethtool_cmd_speed(cmd);
10557
10558	if (tg3_flag(tp, USE_PHYLIB)) {
10559		struct phy_device *phydev;
10560		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10561			return -EAGAIN;
10562		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10563		return phy_ethtool_sset(phydev, cmd);
10564	}
10565
10566	if (cmd->autoneg != AUTONEG_ENABLE &&
10567	    cmd->autoneg != AUTONEG_DISABLE)
10568		return -EINVAL;
10569
10570	if (cmd->autoneg == AUTONEG_DISABLE &&
10571	    cmd->duplex != DUPLEX_FULL &&
10572	    cmd->duplex != DUPLEX_HALF)
10573		return -EINVAL;
10574
10575	if (cmd->autoneg == AUTONEG_ENABLE) {
10576		u32 mask = ADVERTISED_Autoneg |
10577			   ADVERTISED_Pause |
10578			   ADVERTISED_Asym_Pause;
10579
10580		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10581			mask |= ADVERTISED_1000baseT_Half |
10582				ADVERTISED_1000baseT_Full;
10583
10584		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10585			mask |= ADVERTISED_100baseT_Half |
10586				ADVERTISED_100baseT_Full |
10587				ADVERTISED_10baseT_Half |
10588				ADVERTISED_10baseT_Full |
10589				ADVERTISED_TP;
10590		else
10591			mask |= ADVERTISED_FIBRE;
10592
10593		if (cmd->advertising & ~mask)
10594			return -EINVAL;
10595
10596		mask &= (ADVERTISED_1000baseT_Half |
10597			 ADVERTISED_1000baseT_Full |
10598			 ADVERTISED_100baseT_Half |
10599			 ADVERTISED_100baseT_Full |
10600			 ADVERTISED_10baseT_Half |
10601			 ADVERTISED_10baseT_Full);
10602
10603		cmd->advertising &= mask;
10604	} else {
10605		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10606			if (speed != SPEED_1000)
10607				return -EINVAL;
10608
10609			if (cmd->duplex != DUPLEX_FULL)
10610				return -EINVAL;
10611		} else {
10612			if (speed != SPEED_100 &&
10613			    speed != SPEED_10)
10614				return -EINVAL;
10615		}
10616	}
10617
10618	tg3_full_lock(tp, 0);
10619
10620	tp->link_config.autoneg = cmd->autoneg;
10621	if (cmd->autoneg == AUTONEG_ENABLE) {
10622		tp->link_config.advertising = (cmd->advertising |
10623					      ADVERTISED_Autoneg);
10624		tp->link_config.speed = SPEED_UNKNOWN;
10625		tp->link_config.duplex = DUPLEX_UNKNOWN;
10626	} else {
10627		tp->link_config.advertising = 0;
10628		tp->link_config.speed = speed;
10629		tp->link_config.duplex = cmd->duplex;
10630	}
10631
10632	if (netif_running(dev))
10633		tg3_setup_phy(tp, 1);
10634
10635	tg3_full_unlock(tp);
10636
10637	return 0;
10638}
10639
10640static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10641{
10642	struct tg3 *tp = netdev_priv(dev);
10643
10644	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
10645	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
10646	strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
10647	strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
10648}
10649
10650static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10651{
10652	struct tg3 *tp = netdev_priv(dev);
10653
10654	if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10655		wol->supported = WAKE_MAGIC;
10656	else
10657		wol->supported = 0;
10658	wol->wolopts = 0;
10659	if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10660		wol->wolopts = WAKE_MAGIC;
10661	memset(&wol->sopass, 0, sizeof(wol->sopass));
10662}
10663
10664static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10665{
10666	struct tg3 *tp = netdev_priv(dev);
10667	struct device *dp = &tp->pdev->dev;
10668
10669	if (wol->wolopts & ~WAKE_MAGIC)
10670		return -EINVAL;
10671	if ((wol->wolopts & WAKE_MAGIC) &&
10672	    !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10673		return -EINVAL;
10674
10675	device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10676
10677	spin_lock_bh(&tp->lock);
10678	if (device_may_wakeup(dp))
10679		tg3_flag_set(tp, WOL_ENABLE);
10680	else
10681		tg3_flag_clear(tp, WOL_ENABLE);
10682	spin_unlock_bh(&tp->lock);
10683
10684	return 0;
10685}
10686
10687static u32 tg3_get_msglevel(struct net_device *dev)
10688{
10689	struct tg3 *tp = netdev_priv(dev);
10690	return tp->msg_enable;
10691}
10692
10693static void tg3_set_msglevel(struct net_device *dev, u32 value)
10694{
10695	struct tg3 *tp = netdev_priv(dev);
10696	tp->msg_enable = value;
10697}
10698
10699static int tg3_nway_reset(struct net_device *dev)
10700{
10701	struct tg3 *tp = netdev_priv(dev);
10702	int r;
10703
10704	if (!netif_running(dev))
10705		return -EAGAIN;
10706
10707	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10708		return -EINVAL;
10709
10710	if (tg3_flag(tp, USE_PHYLIB)) {
10711		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10712			return -EAGAIN;
10713		r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10714	} else {
10715		u32 bmcr;
10716
10717		spin_lock_bh(&tp->lock);
10718		r = -EINVAL;
10719		tg3_readphy(tp, MII_BMCR, &bmcr);
10720		if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10721		    ((bmcr & BMCR_ANENABLE) ||
10722		     (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10723			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10724						   BMCR_ANENABLE);
10725			r = 0;
10726		}
10727		spin_unlock_bh(&tp->lock);
10728	}
10729
10730	return r;
10731}
10732
10733static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10734{
10735	struct tg3 *tp = netdev_priv(dev);
10736
10737	ering->rx_max_pending = tp->rx_std_ring_mask;
10738	if (tg3_flag(tp, JUMBO_RING_ENABLE))
10739		ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10740	else
10741		ering->rx_jumbo_max_pending = 0;
10742
10743	ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10744
10745	ering->rx_pending = tp->rx_pending;
10746	if (tg3_flag(tp, JUMBO_RING_ENABLE))
10747		ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10748	else
10749		ering->rx_jumbo_pending = 0;
10750
10751	ering->tx_pending = tp->napi[0].tx_pending;
10752}
10753
10754static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10755{
10756	struct tg3 *tp = netdev_priv(dev);
10757	int i, irq_sync = 0, err = 0;
10758
10759	if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10760	    (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10761	    (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10762	    (ering->tx_pending <= MAX_SKB_FRAGS) ||
10763	    (tg3_flag(tp, TSO_BUG) &&
10764	     (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10765		return -EINVAL;
10766
10767	if (netif_running(dev)) {
10768		tg3_phy_stop(tp);
10769		tg3_netif_stop(tp);
10770		irq_sync = 1;
10771	}
10772
10773	tg3_full_lock(tp, irq_sync);
10774
10775	tp->rx_pending = ering->rx_pending;
10776
10777	if (tg3_flag(tp, MAX_RXPEND_64) &&
10778	    tp->rx_pending > 63)
10779		tp->rx_pending = 63;
10780	tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10781
10782	for (i = 0; i < tp->irq_max; i++)
10783		tp->napi[i].tx_pending = ering->tx_pending;
10784
10785	if (netif_running(dev)) {
10786		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10787		err = tg3_restart_hw(tp, 1);
10788		if (!err)
10789			tg3_netif_start(tp);
10790	}
10791
10792	tg3_full_unlock(tp);
10793
10794	if (irq_sync && !err)
10795		tg3_phy_start(tp);
10796
10797	return err;
10798}
10799
10800static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10801{
10802	struct tg3 *tp = netdev_priv(dev);
10803
10804	epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10805
10806	if (tp->link_config.flowctrl & FLOW_CTRL_RX)
10807		epause->rx_pause = 1;
10808	else
10809		epause->rx_pause = 0;
10810
10811	if (tp->link_config.flowctrl & FLOW_CTRL_TX)
10812		epause->tx_pause = 1;
10813	else
10814		epause->tx_pause = 0;
10815}
10816
10817static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10818{
10819	struct tg3 *tp = netdev_priv(dev);
10820	int err = 0;
10821
10822	if (tg3_flag(tp, USE_PHYLIB)) {
10823		u32 newadv;
10824		struct phy_device *phydev;
10825
10826		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10827
10828		if (!(phydev->supported & SUPPORTED_Pause) ||
10829		    (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10830		     (epause->rx_pause != epause->tx_pause)))
10831			return -EINVAL;
10832
10833		tp->link_config.flowctrl = 0;
10834		if (epause->rx_pause) {
10835			tp->link_config.flowctrl |= FLOW_CTRL_RX;
10836
10837			if (epause->tx_pause) {
10838				tp->link_config.flowctrl |= FLOW_CTRL_TX;
10839				newadv = ADVERTISED_Pause;
10840			} else
10841				newadv = ADVERTISED_Pause |
10842					 ADVERTISED_Asym_Pause;
10843		} else if (epause->tx_pause) {
10844			tp->link_config.flowctrl |= FLOW_CTRL_TX;
10845			newadv = ADVERTISED_Asym_Pause;
10846		} else
10847			newadv = 0;
10848
10849		if (epause->autoneg)
10850			tg3_flag_set(tp, PAUSE_AUTONEG);
10851		else
10852			tg3_flag_clear(tp, PAUSE_AUTONEG);
10853
10854		if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10855			u32 oldadv = phydev->advertising &
10856				     (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10857			if (oldadv != newadv) {
10858				phydev->advertising &=
10859					~(ADVERTISED_Pause |
10860					  ADVERTISED_Asym_Pause);
10861				phydev->advertising |= newadv;
10862				if (phydev->autoneg) {
10863					/*
10864					 * Always renegotiate the link to
10865					 * inform our link partner of our
10866					 * flow control settings, even if the
10867					 * flow control is forced.  Let
10868					 * tg3_adjust_link() do the final
10869					 * flow control setup.
10870					 */
10871					return phy_start_aneg(phydev);
10872				}
10873			}
10874
10875			if (!epause->autoneg)
10876				tg3_setup_flow_control(tp, 0, 0);
10877		} else {
10878			tp->link_config.advertising &=
10879					~(ADVERTISED_Pause |
10880					  ADVERTISED_Asym_Pause);
10881			tp->link_config.advertising |= newadv;
10882		}
10883	} else {
10884		int irq_sync = 0;
10885
10886		if (netif_running(dev)) {
10887			tg3_netif_stop(tp);
10888			irq_sync = 1;
10889		}
10890
10891		tg3_full_lock(tp, irq_sync);
10892
10893		if (epause->autoneg)
10894			tg3_flag_set(tp, PAUSE_AUTONEG);
10895		else
10896			tg3_flag_clear(tp, PAUSE_AUTONEG);
10897		if (epause->rx_pause)
10898			tp->link_config.flowctrl |= FLOW_CTRL_RX;
10899		else
10900			tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10901		if (epause->tx_pause)
10902			tp->link_config.flowctrl |= FLOW_CTRL_TX;
10903		else
10904			tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10905
10906		if (netif_running(dev)) {
10907			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10908			err = tg3_restart_hw(tp, 1);
10909			if (!err)
10910				tg3_netif_start(tp);
10911		}
10912
10913		tg3_full_unlock(tp);
10914	}
10915
10916	return err;
10917}
10918
10919static int tg3_get_sset_count(struct net_device *dev, int sset)
10920{
10921	switch (sset) {
10922	case ETH_SS_TEST:
10923		return TG3_NUM_TEST;
10924	case ETH_SS_STATS:
10925		return TG3_NUM_STATS;
10926	default:
10927		return -EOPNOTSUPP;
10928	}
10929}
10930
10931static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
10932			 u32 *rules __always_unused)
10933{
10934	struct tg3 *tp = netdev_priv(dev);
10935
10936	if (!tg3_flag(tp, SUPPORT_MSIX))
10937		return -EOPNOTSUPP;
10938
10939	switch (info->cmd) {
10940	case ETHTOOL_GRXRINGS:
10941		if (netif_running(tp->dev))
10942			info->data = tp->irq_cnt;
10943		else {
10944			info->data = num_online_cpus();
10945			if (info->data > TG3_IRQ_MAX_VECS_RSS)
10946				info->data = TG3_IRQ_MAX_VECS_RSS;
10947		}
10948
10949		/* The first interrupt vector only
10950		 * handles link interrupts.
10951		 */
10952		info->data -= 1;
10953		return 0;
10954
10955	default:
10956		return -EOPNOTSUPP;
10957	}
10958}
10959
10960static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
10961{
10962	u32 size = 0;
10963	struct tg3 *tp = netdev_priv(dev);
10964
10965	if (tg3_flag(tp, SUPPORT_MSIX))
10966		size = TG3_RSS_INDIR_TBL_SIZE;
10967
10968	return size;
10969}
10970
10971static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
10972{
10973	struct tg3 *tp = netdev_priv(dev);
10974	int i;
10975
10976	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10977		indir[i] = tp->rss_ind_tbl[i];
10978
10979	return 0;
10980}
10981
10982static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
10983{
10984	struct tg3 *tp = netdev_priv(dev);
10985	size_t i;
10986
10987	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10988		tp->rss_ind_tbl[i] = indir[i];
10989
10990	if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
10991		return 0;
10992
10993	/* It is legal to write the indirection
10994	 * table while the device is running.
10995	 */
10996	tg3_full_lock(tp, 0);
10997	tg3_rss_write_indir_tbl(tp);
10998	tg3_full_unlock(tp);
10999
11000	return 0;
11001}
11002
11003static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11004{
11005	switch (stringset) {
11006	case ETH_SS_STATS:
11007		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11008		break;
11009	case ETH_SS_TEST:
11010		memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
11011		break;
11012	default:
11013		WARN_ON(1);	/* we need a WARN() */
11014		break;
11015	}
11016}
11017
11018static int tg3_set_phys_id(struct net_device *dev,
11019			    enum ethtool_phys_id_state state)
11020{
11021	struct tg3 *tp = netdev_priv(dev);
11022
11023	if (!netif_running(tp->dev))
11024		return -EAGAIN;
11025
11026	switch (state) {
11027	case ETHTOOL_ID_ACTIVE:
11028		return 1;	/* cycle on/off once per second */
11029
11030	case ETHTOOL_ID_ON:
11031		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11032		     LED_CTRL_1000MBPS_ON |
11033		     LED_CTRL_100MBPS_ON |
11034		     LED_CTRL_10MBPS_ON |
11035		     LED_CTRL_TRAFFIC_OVERRIDE |
11036		     LED_CTRL_TRAFFIC_BLINK |
11037		     LED_CTRL_TRAFFIC_LED);
11038		break;
11039
11040	case ETHTOOL_ID_OFF:
11041		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11042		     LED_CTRL_TRAFFIC_OVERRIDE);
11043		break;
11044
11045	case ETHTOOL_ID_INACTIVE:
11046		tw32(MAC_LED_CTRL, tp->led_ctrl);
11047		break;
11048	}
11049
11050	return 0;
11051}
11052
11053static void tg3_get_ethtool_stats(struct net_device *dev,
11054				   struct ethtool_stats *estats, u64 *tmp_stats)
11055{
11056	struct tg3 *tp = netdev_priv(dev);
11057
11058	if (tp->hw_stats)
11059		tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11060	else
11061		memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11062}
11063
11064static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11065{
11066	int i;
11067	__be32 *buf;
11068	u32 offset = 0, len = 0;
11069	u32 magic, val;
11070
11071	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11072		return NULL;
11073
11074	if (magic == TG3_EEPROM_MAGIC) {
11075		for (offset = TG3_NVM_DIR_START;
11076		     offset < TG3_NVM_DIR_END;
11077		     offset += TG3_NVM_DIRENT_SIZE) {
11078			if (tg3_nvram_read(tp, offset, &val))
11079				return NULL;
11080
11081			if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11082			    TG3_NVM_DIRTYPE_EXTVPD)
11083				break;
11084		}
11085
11086		if (offset != TG3_NVM_DIR_END) {
11087			len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11088			if (tg3_nvram_read(tp, offset + 4, &offset))
11089				return NULL;
11090
11091			offset = tg3_nvram_logical_addr(tp, offset);
11092		}
11093	}
11094
11095	if (!offset || !len) {
11096		offset = TG3_NVM_VPD_OFF;
11097		len = TG3_NVM_VPD_LEN;
11098	}
11099
11100	buf = kmalloc(len, GFP_KERNEL);
11101	if (buf == NULL)
11102		return NULL;
11103
11104	if (magic == TG3_EEPROM_MAGIC) {
11105		for (i = 0; i < len; i += 4) {
11106			/* The data is in little-endian format in NVRAM.
11107			 * Use the big-endian read routines to preserve
11108			 * the byte order as it exists in NVRAM.
11109			 */
11110			if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11111				goto error;
11112		}
11113	} else {
11114		u8 *ptr;
11115		ssize_t cnt;
11116		unsigned int pos = 0;
11117
11118		ptr = (u8 *)&buf[0];
11119		for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11120			cnt = pci_read_vpd(tp->pdev, pos,
11121					   len - pos, ptr);
11122			if (cnt == -ETIMEDOUT || cnt == -EINTR)
11123				cnt = 0;
11124			else if (cnt < 0)
11125				goto error;
11126		}
11127		if (pos != len)
11128			goto error;
11129	}
11130
11131	*vpdlen = len;
11132
11133	return buf;
11134
11135error:
11136	kfree(buf);
11137	return NULL;
11138}
11139
11140#define NVRAM_TEST_SIZE 0x100
11141#define NVRAM_SELFBOOT_FORMAT1_0_SIZE	0x14
11142#define NVRAM_SELFBOOT_FORMAT1_2_SIZE	0x18
11143#define NVRAM_SELFBOOT_FORMAT1_3_SIZE	0x1c
11144#define NVRAM_SELFBOOT_FORMAT1_4_SIZE	0x20
11145#define NVRAM_SELFBOOT_FORMAT1_5_SIZE	0x24
11146#define NVRAM_SELFBOOT_FORMAT1_6_SIZE	0x50
11147#define NVRAM_SELFBOOT_HW_SIZE 0x20
11148#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11149
11150static int tg3_test_nvram(struct tg3 *tp)
11151{
11152	u32 csum, magic, len;
11153	__be32 *buf;
11154	int i, j, k, err = 0, size;
11155
11156	if (tg3_flag(tp, NO_NVRAM))
11157		return 0;
11158
11159	if (tg3_nvram_read(tp, 0, &magic) != 0)
11160		return -EIO;
11161
11162	if (magic == TG3_EEPROM_MAGIC)
11163		size = NVRAM_TEST_SIZE;
11164	else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11165		if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11166		    TG3_EEPROM_SB_FORMAT_1) {
11167			switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11168			case TG3_EEPROM_SB_REVISION_0:
11169				size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11170				break;
11171			case TG3_EEPROM_SB_REVISION_2:
11172				size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11173				break;
11174			case TG3_EEPROM_SB_REVISION_3:
11175				size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11176				break;
11177			case TG3_EEPROM_SB_REVISION_4:
11178				size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11179				break;
11180			case TG3_EEPROM_SB_REVISION_5:
11181				size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11182				break;
11183			case TG3_EEPROM_SB_REVISION_6:
11184				size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11185				break;
11186			default:
11187				return -EIO;
11188			}
11189		} else
11190			return 0;
11191	} else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11192		size = NVRAM_SELFBOOT_HW_SIZE;
11193	else
11194		return -EIO;
11195
11196	buf = kmalloc(size, GFP_KERNEL);
11197	if (buf == NULL)
11198		return -ENOMEM;
11199
11200	err = -EIO;
11201	for (i = 0, j = 0; i < size; i += 4, j++) {
11202		err = tg3_nvram_read_be32(tp, i, &buf[j]);
11203		if (err)
11204			break;
11205	}
11206	if (i < size)
11207		goto out;
11208
11209	/* Selfboot format */
11210	magic = be32_to_cpu(buf[0]);
11211	if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11212	    TG3_EEPROM_MAGIC_FW) {
11213		u8 *buf8 = (u8 *) buf, csum8 = 0;
11214
11215		if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11216		    TG3_EEPROM_SB_REVISION_2) {
11217			/* For rev 2, the csum doesn't include the MBA. */
11218			for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11219				csum8 += buf8[i];
11220			for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11221				csum8 += buf8[i];
11222		} else {
11223			for (i = 0; i < size; i++)
11224				csum8 += buf8[i];
11225		}
11226
11227		if (csum8 == 0) {
11228			err = 0;
11229			goto out;
11230		}
11231
11232		err = -EIO;
11233		goto out;
11234	}
11235
11236	if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11237	    TG3_EEPROM_MAGIC_HW) {
11238		u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11239		u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11240		u8 *buf8 = (u8 *) buf;
11241
11242		/* Separate the parity bits and the data bytes.  */
11243		for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11244			if ((i == 0) || (i == 8)) {
11245				int l;
11246				u8 msk;
11247
11248				for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11249					parity[k++] = buf8[i] & msk;
11250				i++;
11251			} else if (i == 16) {
11252				int l;
11253				u8 msk;
11254
11255				for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11256					parity[k++] = buf8[i] & msk;
11257				i++;
11258
11259				for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11260					parity[k++] = buf8[i] & msk;
11261				i++;
11262			}
11263			data[j++] = buf8[i];
11264		}
11265
11266		err = -EIO;
11267		for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11268			u8 hw8 = hweight8(data[i]);
11269
11270			if ((hw8 & 0x1) && parity[i])
11271				goto out;
11272			else if (!(hw8 & 0x1) && !parity[i])
11273				goto out;
11274		}
11275		err = 0;
11276		goto out;
11277	}
11278
11279	err = -EIO;
11280
11281	/* Bootstrap checksum at offset 0x10 */
11282	csum = calc_crc((unsigned char *) buf, 0x10);
11283	if (csum != le32_to_cpu(buf[0x10/4]))
11284		goto out;
11285
11286	/* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11287	csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11288	if (csum != le32_to_cpu(buf[0xfc/4]))
11289		goto out;
11290
11291	kfree(buf);
11292
11293	buf = tg3_vpd_readblock(tp, &len);
11294	if (!buf)
11295		return -ENOMEM;
11296
11297	i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11298	if (i > 0) {
11299		j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11300		if (j < 0)
11301			goto out;
11302
11303		if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11304			goto out;
11305
11306		i += PCI_VPD_LRDT_TAG_SIZE;
11307		j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11308					      PCI_VPD_RO_KEYWORD_CHKSUM);
11309		if (j > 0) {
11310			u8 csum8 = 0;
11311
11312			j += PCI_VPD_INFO_FLD_HDR_SIZE;
11313
11314			for (i = 0; i <= j; i++)
11315				csum8 += ((u8 *)buf)[i];
11316
11317			if (csum8)
11318				goto out;
11319		}
11320	}
11321
11322	err = 0;
11323
11324out:
11325	kfree(buf);
11326	return err;
11327}
11328
11329#define TG3_SERDES_TIMEOUT_SEC	2
11330#define TG3_COPPER_TIMEOUT_SEC	6
11331
11332static int tg3_test_link(struct tg3 *tp)
11333{
11334	int i, max;
11335
11336	if (!netif_running(tp->dev))
11337		return -ENODEV;
11338
11339	if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11340		max = TG3_SERDES_TIMEOUT_SEC;
11341	else
11342		max = TG3_COPPER_TIMEOUT_SEC;
11343
11344	for (i = 0; i < max; i++) {
11345		if (netif_carrier_ok(tp->dev))
11346			return 0;
11347
11348		if (msleep_interruptible(1000))
11349			break;
11350	}
11351
11352	return -EIO;
11353}
11354
11355/* Only test the commonly used registers */
11356static int tg3_test_registers(struct tg3 *tp)
11357{
11358	int i, is_5705, is_5750;
11359	u32 offset, read_mask, write_mask, val, save_val, read_val;
11360	static struct {
11361		u16 offset;
11362		u16 flags;
11363#define TG3_FL_5705	0x1
11364#define TG3_FL_NOT_5705	0x2
11365#define TG3_FL_NOT_5788	0x4
11366#define TG3_FL_NOT_5750	0x8
11367		u32 read_mask;
11368		u32 write_mask;
11369	} reg_tbl[] = {
11370		/* MAC Control Registers */
11371		{ MAC_MODE, TG3_FL_NOT_5705,
11372			0x00000000, 0x00ef6f8c },
11373		{ MAC_MODE, TG3_FL_5705,
11374			0x00000000, 0x01ef6b8c },
11375		{ MAC_STATUS, TG3_FL_NOT_5705,
11376			0x03800107, 0x00000000 },
11377		{ MAC_STATUS, TG3_FL_5705,
11378			0x03800100, 0x00000000 },
11379		{ MAC_ADDR_0_HIGH, 0x0000,
11380			0x00000000, 0x0000ffff },
11381		{ MAC_ADDR_0_LOW, 0x0000,
11382			0x00000000, 0xffffffff },
11383		{ MAC_RX_MTU_SIZE, 0x0000,
11384			0x00000000, 0x0000ffff },
11385		{ MAC_TX_MODE, 0x0000,
11386			0x00000000, 0x00000070 },
11387		{ MAC_TX_LENGTHS, 0x0000,
11388			0x00000000, 0x00003fff },
11389		{ MAC_RX_MODE, TG3_FL_NOT_5705,
11390			0x00000000, 0x000007fc },
11391		{ MAC_RX_MODE, TG3_FL_5705,
11392			0x00000000, 0x000007dc },
11393		{ MAC_HASH_REG_0, 0x0000,
11394			0x00000000, 0xffffffff },
11395		{ MAC_HASH_REG_1, 0x0000,
11396			0x00000000, 0xffffffff },
11397		{ MAC_HASH_REG_2, 0x0000,
11398			0x00000000, 0xffffffff },
11399		{ MAC_HASH_REG_3, 0x0000,
11400			0x00000000, 0xffffffff },
11401
11402		/* Receive Data and Receive BD Initiator Control Registers. */
11403		{ RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11404			0x00000000, 0xffffffff },
11405		{ RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11406			0x00000000, 0xffffffff },
11407		{ RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11408			0x00000000, 0x00000003 },
11409		{ RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11410			0x00000000, 0xffffffff },
11411		{ RCVDBDI_STD_BD+0, 0x0000,
11412			0x00000000, 0xffffffff },
11413		{ RCVDBDI_STD_BD+4, 0x0000,
11414			0x00000000, 0xffffffff },
11415		{ RCVDBDI_STD_BD+8, 0x0000,
11416			0x00000000, 0xffff0002 },
11417		{ RCVDBDI_STD_BD+0xc, 0x0000,
11418			0x00000000, 0xffffffff },
11419
11420		/* Receive BD Initiator Control Registers. */
11421		{ RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11422			0x00000000, 0xffffffff },
11423		{ RCVBDI_STD_THRESH, TG3_FL_5705,
11424			0x00000000, 0x000003ff },
11425		{ RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11426			0x00000000, 0xffffffff },
11427
11428		/* Host Coalescing Control Registers. */
11429		{ HOSTCC_MODE, TG3_FL_NOT_5705,
11430			0x00000000, 0x00000004 },
11431		{ HOSTCC_MODE, TG3_FL_5705,
11432			0x00000000, 0x000000f6 },
11433		{ HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11434			0x00000000, 0xffffffff },
11435		{ HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11436			0x00000000, 0x000003ff },
11437		{ HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11438			0x00000000, 0xffffffff },
11439		{ HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11440			0x00000000, 0x000003ff },
11441		{ HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11442			0x00000000, 0xffffffff },
11443		{ HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11444			0x00000000, 0x000000ff },
11445		{ HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11446			0x00000000, 0xffffffff },
11447		{ HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11448			0x00000000, 0x000000ff },
11449		{ HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11450			0x00000000, 0xffffffff },
11451		{ HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11452			0x00000000, 0xffffffff },
11453		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11454			0x00000000, 0xffffffff },
11455		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11456			0x00000000, 0x000000ff },
11457		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11458			0x00000000, 0xffffffff },
11459		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11460			0x00000000, 0x000000ff },
11461		{ HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11462			0x00000000, 0xffffffff },
11463		{ HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11464			0x00000000, 0xffffffff },
11465		{ HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11466			0x00000000, 0xffffffff },
11467		{ HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11468			0x00000000, 0xffffffff },
11469		{ HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11470			0x00000000, 0xffffffff },
11471		{ HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11472			0xffffffff, 0x00000000 },
11473		{ HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11474			0xffffffff, 0x00000000 },
11475
11476		/* Buffer Manager Control Registers. */
11477		{ BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11478			0x00000000, 0x007fff80 },
11479		{ BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11480			0x00000000, 0x007fffff },
11481		{ BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11482			0x00000000, 0x0000003f },
11483		{ BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11484			0x00000000, 0x000001ff },
11485		{ BUFMGR_MB_HIGH_WATER, 0x0000,
11486			0x00000000, 0x000001ff },
11487		{ BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11488			0xffffffff, 0x00000000 },
11489		{ BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11490			0xffffffff, 0x00000000 },
11491
11492		/* Mailbox Registers */
11493		{ GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11494			0x00000000, 0x000001ff },
11495		{ GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11496			0x00000000, 0x000001ff },
11497		{ GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11498			0x00000000, 0x000007ff },
11499		{ GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11500			0x00000000, 0x000001ff },
11501
11502		{ 0xffff, 0x0000, 0x00000000, 0x00000000 },
11503	};
11504
11505	is_5705 = is_5750 = 0;
11506	if (tg3_flag(tp, 5705_PLUS)) {
11507		is_5705 = 1;
11508		if (tg3_flag(tp, 5750_PLUS))
11509			is_5750 = 1;
11510	}
11511
11512	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11513		if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11514			continue;
11515
11516		if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11517			continue;
11518
11519		if (tg3_flag(tp, IS_5788) &&
11520		    (reg_tbl[i].flags & TG3_FL_NOT_5788))
11521			continue;
11522
11523		if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11524			continue;
11525
11526		offset = (u32) reg_tbl[i].offset;
11527		read_mask = reg_tbl[i].read_mask;
11528		write_mask = reg_tbl[i].write_mask;
11529
11530		/* Save the original register content */
11531		save_val = tr32(offset);
11532
11533		/* Determine the read-only value. */
11534		read_val = save_val & read_mask;
11535
11536		/* Write zero to the register, then make sure the read-only bits
11537		 * are not changed and the read/write bits are all zeros.
11538		 */
11539		tw32(offset, 0);
11540
11541		val = tr32(offset);
11542
11543		/* Test the read-only and read/write bits. */
11544		if (((val & read_mask) != read_val) || (val & write_mask))
11545			goto out;
11546
11547		/* Write ones to all the bits defined by RdMask and WrMask, then
11548		 * make sure the read-only bits are not changed and the
11549		 * read/write bits are all ones.
11550		 */
11551		tw32(offset, read_mask | write_mask);
11552
11553		val = tr32(offset);
11554
11555		/* Test the read-only bits. */
11556		if ((val & read_mask) != read_val)
11557			goto out;
11558
11559		/* Test the read/write bits. */
11560		if ((val & write_mask) != write_mask)
11561			goto out;
11562
11563		tw32(offset, save_val);
11564	}
11565
11566	return 0;
11567
11568out:
11569	if (netif_msg_hw(tp))
11570		netdev_err(tp->dev,
11571			   "Register test failed at offset %x\n", offset);
11572	tw32(offset, save_val);
11573	return -EIO;
11574}
11575
11576static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11577{
11578	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11579	int i;
11580	u32 j;
11581
11582	for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11583		for (j = 0; j < len; j += 4) {
11584			u32 val;
11585
11586			tg3_write_mem(tp, offset + j, test_pattern[i]);
11587			tg3_read_mem(tp, offset + j, &val);
11588			if (val != test_pattern[i])
11589				return -EIO;
11590		}
11591	}
11592	return 0;
11593}
11594
11595static int tg3_test_memory(struct tg3 *tp)
11596{
11597	static struct mem_entry {
11598		u32 offset;
11599		u32 len;
11600	} mem_tbl_570x[] = {
11601		{ 0x00000000, 0x00b50},
11602		{ 0x00002000, 0x1c000},
11603		{ 0xffffffff, 0x00000}
11604	}, mem_tbl_5705[] = {
11605		{ 0x00000100, 0x0000c},
11606		{ 0x00000200, 0x00008},
11607		{ 0x00004000, 0x00800},
11608		{ 0x00006000, 0x01000},
11609		{ 0x00008000, 0x02000},
11610		{ 0x00010000, 0x0e000},
11611		{ 0xffffffff, 0x00000}
11612	}, mem_tbl_5755[] = {
11613		{ 0x00000200, 0x00008},
11614		{ 0x00004000, 0x00800},
11615		{ 0x00006000, 0x00800},
11616		{ 0x00008000, 0x02000},
11617		{ 0x00010000, 0x0c000},
11618		{ 0xffffffff, 0x00000}
11619	}, mem_tbl_5906[] = {
11620		{ 0x00000200, 0x00008},
11621		{ 0x00004000, 0x00400},
11622		{ 0x00006000, 0x00400},
11623		{ 0x00008000, 0x01000},
11624		{ 0x00010000, 0x01000},
11625		{ 0xffffffff, 0x00000}
11626	}, mem_tbl_5717[] = {
11627		{ 0x00000200, 0x00008},
11628		{ 0x00010000, 0x0a000},
11629		{ 0x00020000, 0x13c00},
11630		{ 0xffffffff, 0x00000}
11631	}, mem_tbl_57765[] = {
11632		{ 0x00000200, 0x00008},
11633		{ 0x00004000, 0x00800},
11634		{ 0x00006000, 0x09800},
11635		{ 0x00010000, 0x0a000},
11636		{ 0xffffffff, 0x00000}
11637	};
11638	struct mem_entry *mem_tbl;
11639	int err = 0;
11640	int i;
11641
11642	if (tg3_flag(tp, 5717_PLUS))
11643		mem_tbl = mem_tbl_5717;
11644	else if (tg3_flag(tp, 57765_CLASS))
11645		mem_tbl = mem_tbl_57765;
11646	else if (tg3_flag(tp, 5755_PLUS))
11647		mem_tbl = mem_tbl_5755;
11648	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11649		mem_tbl = mem_tbl_5906;
11650	else if (tg3_flag(tp, 5705_PLUS))
11651		mem_tbl = mem_tbl_5705;
11652	else
11653		mem_tbl = mem_tbl_570x;
11654
11655	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11656		err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11657		if (err)
11658			break;
11659	}
11660
11661	return err;
11662}
11663
11664#define TG3_TSO_MSS		500
11665
11666#define TG3_TSO_IP_HDR_LEN	20
11667#define TG3_TSO_TCP_HDR_LEN	20
11668#define TG3_TSO_TCP_OPT_LEN	12
11669
11670static const u8 tg3_tso_header[] = {
116710x08, 0x00,
116720x45, 0x00, 0x00, 0x00,
116730x00, 0x00, 0x40, 0x00,
116740x40, 0x06, 0x00, 0x00,
116750x0a, 0x00, 0x00, 0x01,
116760x0a, 0x00, 0x00, 0x02,
116770x0d, 0x00, 0xe0, 0x00,
116780x00, 0x00, 0x01, 0x00,
116790x00, 0x00, 0x02, 0x00,
116800x80, 0x10, 0x10, 0x00,
116810x14, 0x09, 0x00, 0x00,
116820x01, 0x01, 0x08, 0x0a,
116830x11, 0x11, 0x11, 0x11,
116840x11, 0x11, 0x11, 0x11,
11685};
11686
11687static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11688{
11689	u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11690	u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11691	u32 budget;
11692	struct sk_buff *skb;
11693	u8 *tx_data, *rx_data;
11694	dma_addr_t map;
11695	int num_pkts, tx_len, rx_len, i, err;
11696	struct tg3_rx_buffer_desc *desc;
11697	struct tg3_napi *tnapi, *rnapi;
11698	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11699
11700	tnapi = &tp->napi[0];
11701	rnapi = &tp->napi[0];
11702	if (tp->irq_cnt > 1) {
11703		if (tg3_flag(tp, ENABLE_RSS))
11704			rnapi = &tp->napi[1];
11705		if (tg3_flag(tp, ENABLE_TSS))
11706			tnapi = &tp->napi[1];
11707	}
11708	coal_now = tnapi->coal_now | rnapi->coal_now;
11709
11710	err = -EIO;
11711
11712	tx_len = pktsz;
11713	skb = netdev_alloc_skb(tp->dev, tx_len);
11714	if (!skb)
11715		return -ENOMEM;
11716
11717	tx_data = skb_put(skb, tx_len);
11718	memcpy(tx_data, tp->dev->dev_addr, 6);
11719	memset(tx_data + 6, 0x0, 8);
11720
11721	tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11722
11723	if (tso_loopback) {
11724		struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11725
11726		u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11727			      TG3_TSO_TCP_OPT_LEN;
11728
11729		memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11730		       sizeof(tg3_tso_header));
11731		mss = TG3_TSO_MSS;
11732
11733		val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11734		num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11735
11736		/* Set the total length field in the IP header */
11737		iph->tot_len = htons((u16)(mss + hdr_len));
11738
11739		base_flags = (TXD_FLAG_CPU_PRE_DMA |
11740			      TXD_FLAG_CPU_POST_DMA);
11741
11742		if (tg3_flag(tp, HW_TSO_1) ||
11743		    tg3_flag(tp, HW_TSO_2) ||
11744		    tg3_flag(tp, HW_TSO_3)) {
11745			struct tcphdr *th;
11746			val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11747			th = (struct tcphdr *)&tx_data[val];
11748			th->check = 0;
11749		} else
11750			base_flags |= TXD_FLAG_TCPUDP_CSUM;
11751
11752		if (tg3_flag(tp, HW_TSO_3)) {
11753			mss |= (hdr_len & 0xc) << 12;
11754			if (hdr_len & 0x10)
11755				base_flags |= 0x00000010;
11756			base_flags |= (hdr_len & 0x3e0) << 5;
11757		} else if (tg3_flag(tp, HW_TSO_2))
11758			mss |= hdr_len << 9;
11759		else if (tg3_flag(tp, HW_TSO_1) ||
11760			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11761			mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11762		} else {
11763			base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11764		}
11765
11766		data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11767	} else {
11768		num_pkts = 1;
11769		data_off = ETH_HLEN;
11770
11771		if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
11772		    tx_len > VLAN_ETH_FRAME_LEN)
11773			base_flags |= TXD_FLAG_JMB_PKT;
11774	}
11775
11776	for (i = data_off; i < tx_len; i++)
11777		tx_data[i] = (u8) (i & 0xff);
11778
11779	map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11780	if (pci_dma_mapping_error(tp->pdev, map)) {
11781		dev_kfree_skb(skb);
11782		return -EIO;
11783	}
11784
11785	val = tnapi->tx_prod;
11786	tnapi->tx_buffers[val].skb = skb;
11787	dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11788
11789	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11790	       rnapi->coal_now);
11791
11792	udelay(10);
11793
11794	rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11795
11796	budget = tg3_tx_avail(tnapi);
11797	if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11798			    base_flags | TXD_FLAG_END, mss, 0)) {
11799		tnapi->tx_buffers[val].skb = NULL;
11800		dev_kfree_skb(skb);
11801		return -EIO;
11802	}
11803
11804	tnapi->tx_prod++;
11805
11806	/* Sync BD data before updating mailbox */
11807	wmb();
11808
11809	tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11810	tr32_mailbox(tnapi->prodmbox);
11811
11812	udelay(10);
11813
11814	/* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11815	for (i = 0; i < 35; i++) {
11816		tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11817		       coal_now);
11818
11819		udelay(10);
11820
11821		tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11822		rx_idx = rnapi->hw_status->idx[0].rx_producer;
11823		if ((tx_idx == tnapi->tx_prod) &&
11824		    (rx_idx == (rx_start_idx + num_pkts)))
11825			break;
11826	}
11827
11828	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
11829	dev_kfree_skb(skb);
11830
11831	if (tx_idx != tnapi->tx_prod)
11832		goto out;
11833
11834	if (rx_idx != rx_start_idx + num_pkts)
11835		goto out;
11836
11837	val = data_off;
11838	while (rx_idx != rx_start_idx) {
11839		desc = &rnapi->rx_rcb[rx_start_idx++];
11840		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11841		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11842
11843		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11844		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11845			goto out;
11846
11847		rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11848			 - ETH_FCS_LEN;
11849
11850		if (!tso_loopback) {
11851			if (rx_len != tx_len)
11852				goto out;
11853
11854			if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11855				if (opaque_key != RXD_OPAQUE_RING_STD)
11856					goto out;
11857			} else {
11858				if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11859					goto out;
11860			}
11861		} else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11862			   (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11863			    >> RXD_TCPCSUM_SHIFT != 0xffff) {
11864			goto out;
11865		}
11866
11867		if (opaque_key == RXD_OPAQUE_RING_STD) {
11868			rx_data = tpr->rx_std_buffers[desc_idx].data;
11869			map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11870					     mapping);
11871		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11872			rx_data = tpr->rx_jmb_buffers[desc_idx].data;
11873			map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11874					     mapping);
11875		} else
11876			goto out;
11877
11878		pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11879					    PCI_DMA_FROMDEVICE);
11880
11881		rx_data += TG3_RX_OFFSET(tp);
11882		for (i = data_off; i < rx_len; i++, val++) {
11883			if (*(rx_data + i) != (u8) (val & 0xff))
11884				goto out;
11885		}
11886	}
11887
11888	err = 0;
11889
11890	/* tg3_free_rings will unmap and free the rx_data */
11891out:
11892	return err;
11893}
11894
11895#define TG3_STD_LOOPBACK_FAILED		1
11896#define TG3_JMB_LOOPBACK_FAILED		2
11897#define TG3_TSO_LOOPBACK_FAILED		4
11898#define TG3_LOOPBACK_FAILED \
11899	(TG3_STD_LOOPBACK_FAILED | \
11900	 TG3_JMB_LOOPBACK_FAILED | \
11901	 TG3_TSO_LOOPBACK_FAILED)
11902
11903static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11904{
11905	int err = -EIO;
11906	u32 eee_cap;
11907	u32 jmb_pkt_sz = 9000;
11908
11909	if (tp->dma_limit)
11910		jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
11911
11912	eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11913	tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11914
11915	if (!netif_running(tp->dev)) {
11916		data[0] = TG3_LOOPBACK_FAILED;
11917		data[1] = TG3_LOOPBACK_FAILED;
11918		if (do_extlpbk)
11919			data[2] = TG3_LOOPBACK_FAILED;
11920		goto done;
11921	}
11922
11923	err = tg3_reset_hw(tp, 1);
11924	if (err) {
11925		data[0] = TG3_LOOPBACK_FAILED;
11926		data[1] = TG3_LOOPBACK_FAILED;
11927		if (do_extlpbk)
11928			data[2] = TG3_LOOPBACK_FAILED;
11929		goto done;
11930	}
11931
11932	if (tg3_flag(tp, ENABLE_RSS)) {
11933		int i;
11934
11935		/* Reroute all rx packets to the 1st queue */
11936		for (i = MAC_RSS_INDIR_TBL_0;
11937		     i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11938			tw32(i, 0x0);
11939	}
11940
11941	/* HW errata - mac loopback fails in some cases on 5780.
11942	 * Normal traffic and PHY loopback are not affected by
11943	 * errata.  Also, the MAC loopback test is deprecated for
11944	 * all newer ASIC revisions.
11945	 */
11946	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11947	    !tg3_flag(tp, CPMU_PRESENT)) {
11948		tg3_mac_loopback(tp, true);
11949
11950		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11951			data[0] |= TG3_STD_LOOPBACK_FAILED;
11952
11953		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11954		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
11955			data[0] |= TG3_JMB_LOOPBACK_FAILED;
11956
11957		tg3_mac_loopback(tp, false);
11958	}
11959
11960	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11961	    !tg3_flag(tp, USE_PHYLIB)) {
11962		int i;
11963
11964		tg3_phy_lpbk_set(tp, 0, false);
11965
11966		/* Wait for link */
11967		for (i = 0; i < 100; i++) {
11968			if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11969				break;
11970			mdelay(1);
11971		}
11972
11973		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11974			data[1] |= TG3_STD_LOOPBACK_FAILED;
11975		if (tg3_flag(tp, TSO_CAPABLE) &&
11976		    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11977			data[1] |= TG3_TSO_LOOPBACK_FAILED;
11978		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11979		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
11980			data[1] |= TG3_JMB_LOOPBACK_FAILED;
11981
11982		if (do_extlpbk) {
11983			tg3_phy_lpbk_set(tp, 0, true);
11984
11985			/* All link indications report up, but the hardware
11986			 * isn't really ready for about 20 msec.  Double it
11987			 * to be sure.
11988			 */
11989			mdelay(40);
11990
11991			if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11992				data[2] |= TG3_STD_LOOPBACK_FAILED;
11993			if (tg3_flag(tp, TSO_CAPABLE) &&
11994			    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11995				data[2] |= TG3_TSO_LOOPBACK_FAILED;
11996			if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11997			    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
11998				data[2] |= TG3_JMB_LOOPBACK_FAILED;
11999		}
12000
12001		/* Re-enable gphy autopowerdown. */
12002		if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12003			tg3_phy_toggle_apd(tp, true);
12004	}
12005
12006	err = (data[0] | data[1] | data[2]) ? -EIO : 0;
12007
12008done:
12009	tp->phy_flags |= eee_cap;
12010
12011	return err;
12012}
12013
12014static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12015			  u64 *data)
12016{
12017	struct tg3 *tp = netdev_priv(dev);
12018	bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12019
12020	if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12021	    tg3_power_up(tp)) {
12022		etest->flags |= ETH_TEST_FL_FAILED;
12023		memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12024		return;
12025	}
12026
12027	memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12028
12029	if (tg3_test_nvram(tp) != 0) {
12030		etest->flags |= ETH_TEST_FL_FAILED;
12031		data[0] = 1;
12032	}
12033	if (!doextlpbk && tg3_test_link(tp)) {
12034		etest->flags |= ETH_TEST_FL_FAILED;
12035		data[1] = 1;
12036	}
12037	if (etest->flags & ETH_TEST_FL_OFFLINE) {
12038		int err, err2 = 0, irq_sync = 0;
12039
12040		if (netif_running(dev)) {
12041			tg3_phy_stop(tp);
12042			tg3_netif_stop(tp);
12043			irq_sync = 1;
12044		}
12045
12046		tg3_full_lock(tp, irq_sync);
12047
12048		tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12049		err = tg3_nvram_lock(tp);
12050		tg3_halt_cpu(tp, RX_CPU_BASE);
12051		if (!tg3_flag(tp, 5705_PLUS))
12052			tg3_halt_cpu(tp, TX_CPU_BASE);
12053		if (!err)
12054			tg3_nvram_unlock(tp);
12055
12056		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12057			tg3_phy_reset(tp);
12058
12059		if (tg3_test_registers(tp) != 0) {
12060			etest->flags |= ETH_TEST_FL_FAILED;
12061			data[2] = 1;
12062		}
12063
12064		if (tg3_test_memory(tp) != 0) {
12065			etest->flags |= ETH_TEST_FL_FAILED;
12066			data[3] = 1;
12067		}
12068
12069		if (doextlpbk)
12070			etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12071
12072		if (tg3_test_loopback(tp, &data[4], doextlpbk))
12073			etest->flags |= ETH_TEST_FL_FAILED;
12074
12075		tg3_full_unlock(tp);
12076
12077		if (tg3_test_interrupt(tp) != 0) {
12078			etest->flags |= ETH_TEST_FL_FAILED;
12079			data[7] = 1;
12080		}
12081
12082		tg3_full_lock(tp, 0);
12083
12084		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12085		if (netif_running(dev)) {
12086			tg3_flag_set(tp, INIT_COMPLETE);
12087			err2 = tg3_restart_hw(tp, 1);
12088			if (!err2)
12089				tg3_netif_start(tp);
12090		}
12091
12092		tg3_full_unlock(tp);
12093
12094		if (irq_sync && !err2)
12095			tg3_phy_start(tp);
12096	}
12097	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12098		tg3_power_down(tp);
12099
12100}
12101
12102static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12103{
12104	struct mii_ioctl_data *data = if_mii(ifr);
12105	struct tg3 *tp = netdev_priv(dev);
12106	int err;
12107
12108	if (tg3_flag(tp, USE_PHYLIB)) {
12109		struct phy_device *phydev;
12110		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12111			return -EAGAIN;
12112		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12113		return phy_mii_ioctl(phydev, ifr, cmd);
12114	}
12115
12116	switch (cmd) {
12117	case SIOCGMIIPHY:
12118		data->phy_id = tp->phy_addr;
12119
12120		/* fallthru */
12121	case SIOCGMIIREG: {
12122		u32 mii_regval;
12123
12124		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12125			break;			/* We have no PHY */
12126
12127		if (!netif_running(dev))
12128			return -EAGAIN;
12129
12130		spin_lock_bh(&tp->lock);
12131		err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12132		spin_unlock_bh(&tp->lock);
12133
12134		data->val_out = mii_regval;
12135
12136		return err;
12137	}
12138
12139	case SIOCSMIIREG:
12140		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12141			break;			/* We have no PHY */
12142
12143		if (!netif_running(dev))
12144			return -EAGAIN;
12145
12146		spin_lock_bh(&tp->lock);
12147		err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12148		spin_unlock_bh(&tp->lock);
12149
12150		return err;
12151
12152	default:
12153		/* do nothing */
12154		break;
12155	}
12156	return -EOPNOTSUPP;
12157}
12158
12159static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12160{
12161	struct tg3 *tp = netdev_priv(dev);
12162
12163	memcpy(ec, &tp->coal, sizeof(*ec));
12164	return 0;
12165}
12166
12167static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12168{
12169	struct tg3 *tp = netdev_priv(dev);
12170	u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12171	u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12172
12173	if (!tg3_flag(tp, 5705_PLUS)) {
12174		max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12175		max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12176		max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12177		min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12178	}
12179
12180	if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12181	    (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12182	    (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12183	    (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12184	    (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12185	    (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12186	    (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12187	    (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
12188	    (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
12189	    (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
12190		return -EINVAL;
12191
12192	/* No rx interrupts will be generated if both are zero */
12193	if ((ec->rx_coalesce_usecs == 0) &&
12194	    (ec->rx_max_coalesced_frames == 0))
12195		return -EINVAL;
12196
12197	/* No tx interrupts will be generated if both are zero */
12198	if ((ec->tx_coalesce_usecs == 0) &&
12199	    (ec->tx_max_coalesced_frames == 0))
12200		return -EINVAL;
12201
12202	/* Only copy relevant parameters, ignore all others. */
12203	tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
12204	tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
12205	tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
12206	tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
12207	tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12208	tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12209	tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12210	tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12211	tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12212
12213	if (netif_running(dev)) {
12214		tg3_full_lock(tp, 0);
12215		__tg3_set_coalesce(tp, &tp->coal);
12216		tg3_full_unlock(tp);
12217	}
12218	return 0;
12219}
12220
12221static const struct ethtool_ops tg3_ethtool_ops = {
12222	.get_settings		= tg3_get_settings,
12223	.set_settings		= tg3_set_settings,
12224	.get_drvinfo		= tg3_get_drvinfo,
12225	.get_regs_len		= tg3_get_regs_len,
12226	.get_regs		= tg3_get_regs,
12227	.get_wol		= tg3_get_wol,
12228	.set_wol		= tg3_set_wol,
12229	.get_msglevel		= tg3_get_msglevel,
12230	.set_msglevel		= tg3_set_msglevel,
12231	.nway_reset		= tg3_nway_reset,
12232	.get_link		= ethtool_op_get_link,
12233	.get_eeprom_len		= tg3_get_eeprom_len,
12234	.get_eeprom		= tg3_get_eeprom,
12235	.set_eeprom		= tg3_set_eeprom,
12236	.get_ringparam		= tg3_get_ringparam,
12237	.set_ringparam		= tg3_set_ringparam,
12238	.get_pauseparam		= tg3_get_pauseparam,
12239	.set_pauseparam		= tg3_set_pauseparam,
12240	.self_test		= tg3_self_test,
12241	.get_strings		= tg3_get_strings,
12242	.set_phys_id		= tg3_set_phys_id,
12243	.get_ethtool_stats	= tg3_get_ethtool_stats,
12244	.get_coalesce		= tg3_get_coalesce,
12245	.set_coalesce		= tg3_set_coalesce,
12246	.get_sset_count		= tg3_get_sset_count,
12247	.get_rxnfc		= tg3_get_rxnfc,
12248	.get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
12249	.get_rxfh_indir		= tg3_get_rxfh_indir,
12250	.set_rxfh_indir		= tg3_set_rxfh_indir,
12251};
12252
12253static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
12254						struct rtnl_link_stats64 *stats)
12255{
12256	struct tg3 *tp = netdev_priv(dev);
12257
12258	if (!tp->hw_stats)
12259		return &tp->net_stats_prev;
12260
12261	spin_lock_bh(&tp->lock);
12262	tg3_get_nstats(tp, stats);
12263	spin_unlock_bh(&tp->lock);
12264
12265	return stats;
12266}
12267
12268static void tg3_set_rx_mode(struct net_device *dev)
12269{
12270	struct tg3 *tp = netdev_priv(dev);
12271
12272	if (!netif_running(dev))
12273		return;
12274
12275	tg3_full_lock(tp, 0);
12276	__tg3_set_rx_mode(dev);
12277	tg3_full_unlock(tp);
12278}
12279
12280static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
12281			       int new_mtu)
12282{
12283	dev->mtu = new_mtu;
12284
12285	if (new_mtu > ETH_DATA_LEN) {
12286		if (tg3_flag(tp, 5780_CLASS)) {
12287			netdev_update_features(dev);
12288			tg3_flag_clear(tp, TSO_CAPABLE);
12289		} else {
12290			tg3_flag_set(tp, JUMBO_RING_ENABLE);
12291		}
12292	} else {
12293		if (tg3_flag(tp, 5780_CLASS)) {
12294			tg3_flag_set(tp, TSO_CAPABLE);
12295			netdev_update_features(dev);
12296		}
12297		tg3_flag_clear(tp, JUMBO_RING_ENABLE);
12298	}
12299}
12300
12301static int tg3_change_mtu(struct net_device *dev, int new_mtu)
12302{
12303	struct tg3 *tp = netdev_priv(dev);
12304	int err, reset_phy = 0;
12305
12306	if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
12307		return -EINVAL;
12308
12309	if (!netif_running(dev)) {
12310		/* We'll just catch it later when the
12311		 * device is up'd.
12312		 */
12313		tg3_set_mtu(dev, tp, new_mtu);
12314		return 0;
12315	}
12316
12317	tg3_phy_stop(tp);
12318
12319	tg3_netif_stop(tp);
12320
12321	tg3_full_lock(tp, 1);
12322
12323	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12324
12325	tg3_set_mtu(dev, tp, new_mtu);
12326
12327	/* Reset PHY, otherwise the read DMA engine will be in a mode that
12328	 * breaks all requests to 256 bytes.
12329	 */
12330	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
12331		reset_phy = 1;
12332
12333	err = tg3_restart_hw(tp, reset_phy);
12334
12335	if (!err)
12336		tg3_netif_start(tp);
12337
12338	tg3_full_unlock(tp);
12339
12340	if (!err)
12341		tg3_phy_start(tp);
12342
12343	return err;
12344}
12345
12346static const struct net_device_ops tg3_netdev_ops = {
12347	.ndo_open		= tg3_open,
12348	.ndo_stop		= tg3_close,
12349	.ndo_start_xmit		= tg3_start_xmit,
12350	.ndo_get_stats64	= tg3_get_stats64,
12351	.ndo_validate_addr	= eth_validate_addr,
12352	.ndo_set_rx_mode	= tg3_set_rx_mode,
12353	.ndo_set_mac_address	= tg3_set_mac_addr,
12354	.ndo_do_ioctl		= tg3_ioctl,
12355	.ndo_tx_timeout		= tg3_tx_timeout,
12356	.ndo_change_mtu		= tg3_change_mtu,
12357	.ndo_fix_features	= tg3_fix_features,
12358	.ndo_set_features	= tg3_set_features,
12359#ifdef CONFIG_NET_POLL_CONTROLLER
12360	.ndo_poll_controller	= tg3_poll_controller,
12361#endif
12362};
12363
12364static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12365{
12366	u32 cursize, val, magic;
12367
12368	tp->nvram_size = EEPROM_CHIP_SIZE;
12369
12370	if (tg3_nvram_read(tp, 0, &magic) != 0)
12371		return;
12372
12373	if ((magic != TG3_EEPROM_MAGIC) &&
12374	    ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12375	    ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12376		return;
12377
12378	/*
12379	 * Size the chip by reading offsets at increasing powers of two.
12380	 * When we encounter our validation signature, we know the addressing
12381	 * has wrapped around, and thus have our chip size.
12382	 */
12383	cursize = 0x10;
12384
12385	while (cursize < tp->nvram_size) {
12386		if (tg3_nvram_read(tp, cursize, &val) != 0)
12387			return;
12388
12389		if (val == magic)
12390			break;
12391
12392		cursize <<= 1;
12393	}
12394
12395	tp->nvram_size = cursize;
12396}
12397
12398static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12399{
12400	u32 val;
12401
12402	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12403		return;
12404
12405	/* Selfboot format */
12406	if (val != TG3_EEPROM_MAGIC) {
12407		tg3_get_eeprom_size(tp);
12408		return;
12409	}
12410
12411	if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12412		if (val != 0) {
12413			/* This is confusing.  We want to operate on the
12414			 * 16-bit value at offset 0xf2.  The tg3_nvram_read()
12415			 * call will read from NVRAM and byteswap the data
12416			 * according to the byteswapping settings for all
12417			 * other register accesses.  This ensures the data we
12418			 * want will always reside in the lower 16-bits.
12419			 * However, the data in NVRAM is in LE format, which
12420			 * means the data from the NVRAM read will always be
12421			 * opposite the endianness of the CPU.  The 16-bit
12422			 * byteswap then brings the data to CPU endianness.
12423			 */
12424			tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12425			return;
12426		}
12427	}
12428	tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12429}
12430
12431static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12432{
12433	u32 nvcfg1;
12434
12435	nvcfg1 = tr32(NVRAM_CFG1);
12436	if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12437		tg3_flag_set(tp, FLASH);
12438	} else {
12439		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12440		tw32(NVRAM_CFG1, nvcfg1);
12441	}
12442
12443	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12444	    tg3_flag(tp, 5780_CLASS)) {
12445		switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12446		case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12447			tp->nvram_jedecnum = JEDEC_ATMEL;
12448			tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12449			tg3_flag_set(tp, NVRAM_BUFFERED);
12450			break;
12451		case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12452			tp->nvram_jedecnum = JEDEC_ATMEL;
12453			tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12454			break;
12455		case FLASH_VENDOR_ATMEL_EEPROM:
12456			tp->nvram_jedecnum = JEDEC_ATMEL;
12457			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12458			tg3_flag_set(tp, NVRAM_BUFFERED);
12459			break;
12460		case FLASH_VENDOR_ST:
12461			tp->nvram_jedecnum = JEDEC_ST;
12462			tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12463			tg3_flag_set(tp, NVRAM_BUFFERED);
12464			break;
12465		case FLASH_VENDOR_SAIFUN:
12466			tp->nvram_jedecnum = JEDEC_SAIFUN;
12467			tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12468			break;
12469		case FLASH_VENDOR_SST_SMALL:
12470		case FLASH_VENDOR_SST_LARGE:
12471			tp->nvram_jedecnum = JEDEC_SST;
12472			tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12473			break;
12474		}
12475	} else {
12476		tp->nvram_jedecnum = JEDEC_ATMEL;
12477		tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12478		tg3_flag_set(tp, NVRAM_BUFFERED);
12479	}
12480}
12481
12482static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12483{
12484	switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12485	case FLASH_5752PAGE_SIZE_256:
12486		tp->nvram_pagesize = 256;
12487		break;
12488	case FLASH_5752PAGE_SIZE_512:
12489		tp->nvram_pagesize = 512;
12490		break;
12491	case FLASH_5752PAGE_SIZE_1K:
12492		tp->nvram_pagesize = 1024;
12493		break;
12494	case FLASH_5752PAGE_SIZE_2K:
12495		tp->nvram_pagesize = 2048;
12496		break;
12497	case FLASH_5752PAGE_SIZE_4K:
12498		tp->nvram_pagesize = 4096;
12499		break;
12500	case FLASH_5752PAGE_SIZE_264:
12501		tp->nvram_pagesize = 264;
12502		break;
12503	case FLASH_5752PAGE_SIZE_528:
12504		tp->nvram_pagesize = 528;
12505		break;
12506	}
12507}
12508
12509static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12510{
12511	u32 nvcfg1;
12512
12513	nvcfg1 = tr32(NVRAM_CFG1);
12514
12515	/* NVRAM protection for TPM */
12516	if (nvcfg1 & (1 << 27))
12517		tg3_flag_set(tp, PROTECTED_NVRAM);
12518
12519	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12520	case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12521	case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12522		tp->nvram_jedecnum = JEDEC_ATMEL;
12523		tg3_flag_set(tp, NVRAM_BUFFERED);
12524		break;
12525	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12526		tp->nvram_jedecnum = JEDEC_ATMEL;
12527		tg3_flag_set(tp, NVRAM_BUFFERED);
12528		tg3_flag_set(tp, FLASH);
12529		break;
12530	case FLASH_5752VENDOR_ST_M45PE10:
12531	case FLASH_5752VENDOR_ST_M45PE20:
12532	case FLASH_5752VENDOR_ST_M45PE40:
12533		tp->nvram_jedecnum = JEDEC_ST;
12534		tg3_flag_set(tp, NVRAM_BUFFERED);
12535		tg3_flag_set(tp, FLASH);
12536		break;
12537	}
12538
12539	if (tg3_flag(tp, FLASH)) {
12540		tg3_nvram_get_pagesize(tp, nvcfg1);
12541	} else {
12542		/* For eeprom, set pagesize to maximum eeprom size */
12543		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12544
12545		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12546		tw32(NVRAM_CFG1, nvcfg1);
12547	}
12548}
12549
12550static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12551{
12552	u32 nvcfg1, protect = 0;
12553
12554	nvcfg1 = tr32(NVRAM_CFG1);
12555
12556	/* NVRAM protection for TPM */
12557	if (nvcfg1 & (1 << 27)) {
12558		tg3_flag_set(tp, PROTECTED_NVRAM);
12559		protect = 1;
12560	}
12561
12562	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12563	switch (nvcfg1) {
12564	case FLASH_5755VENDOR_ATMEL_FLASH_1:
12565	case FLASH_5755VENDOR_ATMEL_FLASH_2:
12566	case FLASH_5755VENDOR_ATMEL_FLASH_3:
12567	case FLASH_5755VENDOR_ATMEL_FLASH_5:
12568		tp->nvram_jedecnum = JEDEC_ATMEL;
12569		tg3_flag_set(tp, NVRAM_BUFFERED);
12570		tg3_flag_set(tp, FLASH);
12571		tp->nvram_pagesize = 264;
12572		if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12573		    nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12574			tp->nvram_size = (protect ? 0x3e200 :
12575					  TG3_NVRAM_SIZE_512KB);
12576		else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12577			tp->nvram_size = (protect ? 0x1f200 :
12578					  TG3_NVRAM_SIZE_256KB);
12579		else
12580			tp->nvram_size = (protect ? 0x1f200 :
12581					  TG3_NVRAM_SIZE_128KB);
12582		break;
12583	case FLASH_5752VENDOR_ST_M45PE10:
12584	case FLASH_5752VENDOR_ST_M45PE20:
12585	case FLASH_5752VENDOR_ST_M45PE40:
12586		tp->nvram_jedecnum = JEDEC_ST;
12587		tg3_flag_set(tp, NVRAM_BUFFERED);
12588		tg3_flag_set(tp, FLASH);
12589		tp->nvram_pagesize = 256;
12590		if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12591			tp->nvram_size = (protect ?
12592					  TG3_NVRAM_SIZE_64KB :
12593					  TG3_NVRAM_SIZE_128KB);
12594		else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12595			tp->nvram_size = (protect ?
12596					  TG3_NVRAM_SIZE_64KB :
12597					  TG3_NVRAM_SIZE_256KB);
12598		else
12599			tp->nvram_size = (protect ?
12600					  TG3_NVRAM_SIZE_128KB :
12601					  TG3_NVRAM_SIZE_512KB);
12602		break;
12603	}
12604}
12605
12606static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12607{
12608	u32 nvcfg1;
12609
12610	nvcfg1 = tr32(NVRAM_CFG1);
12611
12612	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12613	case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12614	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12615	case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12616	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12617		tp->nvram_jedecnum = JEDEC_ATMEL;
12618		tg3_flag_set(tp, NVRAM_BUFFERED);
12619		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12620
12621		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12622		tw32(NVRAM_CFG1, nvcfg1);
12623		break;
12624	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12625	case FLASH_5755VENDOR_ATMEL_FLASH_1:
12626	case FLASH_5755VENDOR_ATMEL_FLASH_2:
12627	case FLASH_5755VENDOR_ATMEL_FLASH_3:
12628		tp->nvram_jedecnum = JEDEC_ATMEL;
12629		tg3_flag_set(tp, NVRAM_BUFFERED);
12630		tg3_flag_set(tp, FLASH);
12631		tp->nvram_pagesize = 264;
12632		break;
12633	case FLASH_5752VENDOR_ST_M45PE10:
12634	case FLASH_5752VENDOR_ST_M45PE20:
12635	case FLASH_5752VENDOR_ST_M45PE40:
12636		tp->nvram_jedecnum = JEDEC_ST;
12637		tg3_flag_set(tp, NVRAM_BUFFERED);
12638		tg3_flag_set(tp, FLASH);
12639		tp->nvram_pagesize = 256;
12640		break;
12641	}
12642}
12643
12644static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12645{
12646	u32 nvcfg1, protect = 0;
12647
12648	nvcfg1 = tr32(NVRAM_CFG1);
12649
12650	/* NVRAM protection for TPM */
12651	if (nvcfg1 & (1 << 27)) {
12652		tg3_flag_set(tp, PROTECTED_NVRAM);
12653		protect = 1;
12654	}
12655
12656	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12657	switch (nvcfg1) {
12658	case FLASH_5761VENDOR_ATMEL_ADB021D:
12659	case FLASH_5761VENDOR_ATMEL_ADB041D:
12660	case FLASH_5761VENDOR_ATMEL_ADB081D:
12661	case FLASH_5761VENDOR_ATMEL_ADB161D:
12662	case FLASH_5761VENDOR_ATMEL_MDB021D:
12663	case FLASH_5761VENDOR_ATMEL_MDB041D:
12664	case FLASH_5761VENDOR_ATMEL_MDB081D:
12665	case FLASH_5761VENDOR_ATMEL_MDB161D:
12666		tp->nvram_jedecnum = JEDEC_ATMEL;
12667		tg3_flag_set(tp, NVRAM_BUFFERED);
12668		tg3_flag_set(tp, FLASH);
12669		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12670		tp->nvram_pagesize = 256;
12671		break;
12672	case FLASH_5761VENDOR_ST_A_M45PE20:
12673	case FLASH_5761VENDOR_ST_A_M45PE40:
12674	case FLASH_5761VENDOR_ST_A_M45PE80:
12675	case FLASH_5761VENDOR_ST_A_M45PE16:
12676	case FLASH_5761VENDOR_ST_M_M45PE20:
12677	case FLASH_5761VENDOR_ST_M_M45PE40:
12678	case FLASH_5761VENDOR_ST_M_M45PE80:
12679	case FLASH_5761VENDOR_ST_M_M45PE16:
12680		tp->nvram_jedecnum = JEDEC_ST;
12681		tg3_flag_set(tp, NVRAM_BUFFERED);
12682		tg3_flag_set(tp, FLASH);
12683		tp->nvram_pagesize = 256;
12684		break;
12685	}
12686
12687	if (protect) {
12688		tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12689	} else {
12690		switch (nvcfg1) {
12691		case FLASH_5761VENDOR_ATMEL_ADB161D:
12692		case FLASH_5761VENDOR_ATMEL_MDB161D:
12693		case FLASH_5761VENDOR_ST_A_M45PE16:
12694		case FLASH_5761VENDOR_ST_M_M45PE16:
12695			tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12696			break;
12697		case FLASH_5761VENDOR_ATMEL_ADB081D:
12698		case FLASH_5761VENDOR_ATMEL_MDB081D:
12699		case FLASH_5761VENDOR_ST_A_M45PE80:
12700		case FLASH_5761VENDOR_ST_M_M45PE80:
12701			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12702			break;
12703		case FLASH_5761VENDOR_ATMEL_ADB041D:
12704		case FLASH_5761VENDOR_ATMEL_MDB041D:
12705		case FLASH_5761VENDOR_ST_A_M45PE40:
12706		case FLASH_5761VENDOR_ST_M_M45PE40:
12707			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12708			break;
12709		case FLASH_5761VENDOR_ATMEL_ADB021D:
12710		case FLASH_5761VENDOR_ATMEL_MDB021D:
12711		case FLASH_5761VENDOR_ST_A_M45PE20:
12712		case FLASH_5761VENDOR_ST_M_M45PE20:
12713			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12714			break;
12715		}
12716	}
12717}
12718
12719static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12720{
12721	tp->nvram_jedecnum = JEDEC_ATMEL;
12722	tg3_flag_set(tp, NVRAM_BUFFERED);
12723	tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12724}
12725
12726static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12727{
12728	u32 nvcfg1;
12729
12730	nvcfg1 = tr32(NVRAM_CFG1);
12731
12732	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12733	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12734	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12735		tp->nvram_jedecnum = JEDEC_ATMEL;
12736		tg3_flag_set(tp, NVRAM_BUFFERED);
12737		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12738
12739		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12740		tw32(NVRAM_CFG1, nvcfg1);
12741		return;
12742	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12743	case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12744	case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12745	case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12746	case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12747	case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12748	case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12749		tp->nvram_jedecnum = JEDEC_ATMEL;
12750		tg3_flag_set(tp, NVRAM_BUFFERED);
12751		tg3_flag_set(tp, FLASH);
12752
12753		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12754		case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12755		case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12756		case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12757			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12758			break;
12759		case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12760		case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12761			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12762			break;
12763		case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12764		case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12765			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12766			break;
12767		}
12768		break;
12769	case FLASH_5752VENDOR_ST_M45PE10:
12770	case FLASH_5752VENDOR_ST_M45PE20:
12771	case FLASH_5752VENDOR_ST_M45PE40:
12772		tp->nvram_jedecnum = JEDEC_ST;
12773		tg3_flag_set(tp, NVRAM_BUFFERED);
12774		tg3_flag_set(tp, FLASH);
12775
12776		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12777		case FLASH_5752VENDOR_ST_M45PE10:
12778			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12779			break;
12780		case FLASH_5752VENDOR_ST_M45PE20:
12781			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12782			break;
12783		case FLASH_5752VENDOR_ST_M45PE40:
12784			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12785			break;
12786		}
12787		break;
12788	default:
12789		tg3_flag_set(tp, NO_NVRAM);
12790		return;
12791	}
12792
12793	tg3_nvram_get_pagesize(tp, nvcfg1);
12794	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12795		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12796}
12797
12798
12799static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12800{
12801	u32 nvcfg1;
12802
12803	nvcfg1 = tr32(NVRAM_CFG1);
12804
12805	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12806	case FLASH_5717VENDOR_ATMEL_EEPROM:
12807	case FLASH_5717VENDOR_MICRO_EEPROM:
12808		tp->nvram_jedecnum = JEDEC_ATMEL;
12809		tg3_flag_set(tp, NVRAM_BUFFERED);
12810		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12811
12812		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12813		tw32(NVRAM_CFG1, nvcfg1);
12814		return;
12815	case FLASH_5717VENDOR_ATMEL_MDB011D:
12816	case FLASH_5717VENDOR_ATMEL_ADB011B:
12817	case FLASH_5717VENDOR_ATMEL_ADB011D:
12818	case FLASH_5717VENDOR_ATMEL_MDB021D:
12819	case FLASH_5717VENDOR_ATMEL_ADB021B:
12820	case FLASH_5717VENDOR_ATMEL_ADB021D:
12821	case FLASH_5717VENDOR_ATMEL_45USPT:
12822		tp->nvram_jedecnum = JEDEC_ATMEL;
12823		tg3_flag_set(tp, NVRAM_BUFFERED);
12824		tg3_flag_set(tp, FLASH);
12825
12826		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12827		case FLASH_5717VENDOR_ATMEL_MDB021D:
12828			/* Detect size with tg3_nvram_get_size() */
12829			break;
12830		case FLASH_5717VENDOR_ATMEL_ADB021B:
12831		case FLASH_5717VENDOR_ATMEL_ADB021D:
12832			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12833			break;
12834		default:
12835			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12836			break;
12837		}
12838		break;
12839	case FLASH_5717VENDOR_ST_M_M25PE10:
12840	case FLASH_5717VENDOR_ST_A_M25PE10:
12841	case FLASH_5717VENDOR_ST_M_M45PE10:
12842	case FLASH_5717VENDOR_ST_A_M45PE10:
12843	case FLASH_5717VENDOR_ST_M_M25PE20:
12844	case FLASH_5717VENDOR_ST_A_M25PE20:
12845	case FLASH_5717VENDOR_ST_M_M45PE20:
12846	case FLASH_5717VENDOR_ST_A_M45PE20:
12847	case FLASH_5717VENDOR_ST_25USPT:
12848	case FLASH_5717VENDOR_ST_45USPT:
12849		tp->nvram_jedecnum = JEDEC_ST;
12850		tg3_flag_set(tp, NVRAM_BUFFERED);
12851		tg3_flag_set(tp, FLASH);
12852
12853		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12854		case FLASH_5717VENDOR_ST_M_M25PE20:
12855		case FLASH_5717VENDOR_ST_M_M45PE20:
12856			/* Detect size with tg3_nvram_get_size() */
12857			break;
12858		case FLASH_5717VENDOR_ST_A_M25PE20:
12859		case FLASH_5717VENDOR_ST_A_M45PE20:
12860			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12861			break;
12862		default:
12863			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12864			break;
12865		}
12866		break;
12867	default:
12868		tg3_flag_set(tp, NO_NVRAM);
12869		return;
12870	}
12871
12872	tg3_nvram_get_pagesize(tp, nvcfg1);
12873	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12874		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12875}
12876
12877static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12878{
12879	u32 nvcfg1, nvmpinstrp;
12880
12881	nvcfg1 = tr32(NVRAM_CFG1);
12882	nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12883
12884	switch (nvmpinstrp) {
12885	case FLASH_5720_EEPROM_HD:
12886	case FLASH_5720_EEPROM_LD:
12887		tp->nvram_jedecnum = JEDEC_ATMEL;
12888		tg3_flag_set(tp, NVRAM_BUFFERED);
12889
12890		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12891		tw32(NVRAM_CFG1, nvcfg1);
12892		if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12893			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12894		else
12895			tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12896		return;
12897	case FLASH_5720VENDOR_M_ATMEL_DB011D:
12898	case FLASH_5720VENDOR_A_ATMEL_DB011B:
12899	case FLASH_5720VENDOR_A_ATMEL_DB011D:
12900	case FLASH_5720VENDOR_M_ATMEL_DB021D:
12901	case FLASH_5720VENDOR_A_ATMEL_DB021B:
12902	case FLASH_5720VENDOR_A_ATMEL_DB021D:
12903	case FLASH_5720VENDOR_M_ATMEL_DB041D:
12904	case FLASH_5720VENDOR_A_ATMEL_DB041B:
12905	case FLASH_5720VENDOR_A_ATMEL_DB041D:
12906	case FLASH_5720VENDOR_M_ATMEL_DB081D:
12907	case FLASH_5720VENDOR_A_ATMEL_DB081D:
12908	case FLASH_5720VENDOR_ATMEL_45USPT:
12909		tp->nvram_jedecnum = JEDEC_ATMEL;
12910		tg3_flag_set(tp, NVRAM_BUFFERED);
12911		tg3_flag_set(tp, FLASH);
12912
12913		switch (nvmpinstrp) {
12914		case FLASH_5720VENDOR_M_ATMEL_DB021D:
12915		case FLASH_5720VENDOR_A_ATMEL_DB021B:
12916		case FLASH_5720VENDOR_A_ATMEL_DB021D:
12917			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12918			break;
12919		case FLASH_5720VENDOR_M_ATMEL_DB041D:
12920		case FLASH_5720VENDOR_A_ATMEL_DB041B:
12921		case FLASH_5720VENDOR_A_ATMEL_DB041D:
12922			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12923			break;
12924		case FLASH_5720VENDOR_M_ATMEL_DB081D:
12925		case FLASH_5720VENDOR_A_ATMEL_DB081D:
12926			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12927			break;
12928		default:
12929			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12930			break;
12931		}
12932		break;
12933	case FLASH_5720VENDOR_M_ST_M25PE10:
12934	case FLASH_5720VENDOR_M_ST_M45PE10:
12935	case FLASH_5720VENDOR_A_ST_M25PE10:
12936	case FLASH_5720VENDOR_A_ST_M45PE10:
12937	case FLASH_5720VENDOR_M_ST_M25PE20:
12938	case FLASH_5720VENDOR_M_ST_M45PE20:
12939	case FLASH_5720VENDOR_A_ST_M25PE20:
12940	case FLASH_5720VENDOR_A_ST_M45PE20:
12941	case FLASH_5720VENDOR_M_ST_M25PE40:
12942	case FLASH_5720VENDOR_M_ST_M45PE40:
12943	case FLASH_5720VENDOR_A_ST_M25PE40:
12944	case FLASH_5720VENDOR_A_ST_M45PE40:
12945	case FLASH_5720VENDOR_M_ST_M25PE80:
12946	case FLASH_5720VENDOR_M_ST_M45PE80:
12947	case FLASH_5720VENDOR_A_ST_M25PE80:
12948	case FLASH_5720VENDOR_A_ST_M45PE80:
12949	case FLASH_5720VENDOR_ST_25USPT:
12950	case FLASH_5720VENDOR_ST_45USPT:
12951		tp->nvram_jedecnum = JEDEC_ST;
12952		tg3_flag_set(tp, NVRAM_BUFFERED);
12953		tg3_flag_set(tp, FLASH);
12954
12955		switch (nvmpinstrp) {
12956		case FLASH_5720VENDOR_M_ST_M25PE20:
12957		case FLASH_5720VENDOR_M_ST_M45PE20:
12958		case FLASH_5720VENDOR_A_ST_M25PE20:
12959		case FLASH_5720VENDOR_A_ST_M45PE20:
12960			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12961			break;
12962		case FLASH_5720VENDOR_M_ST_M25PE40:
12963		case FLASH_5720VENDOR_M_ST_M45PE40:
12964		case FLASH_5720VENDOR_A_ST_M25PE40:
12965		case FLASH_5720VENDOR_A_ST_M45PE40:
12966			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12967			break;
12968		case FLASH_5720VENDOR_M_ST_M25PE80:
12969		case FLASH_5720VENDOR_M_ST_M45PE80:
12970		case FLASH_5720VENDOR_A_ST_M25PE80:
12971		case FLASH_5720VENDOR_A_ST_M45PE80:
12972			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12973			break;
12974		default:
12975			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12976			break;
12977		}
12978		break;
12979	default:
12980		tg3_flag_set(tp, NO_NVRAM);
12981		return;
12982	}
12983
12984	tg3_nvram_get_pagesize(tp, nvcfg1);
12985	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12986		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12987}
12988
12989/* Chips other than 5700/5701 use the NVRAM for fetching info. */
12990static void __devinit tg3_nvram_init(struct tg3 *tp)
12991{
12992	tw32_f(GRC_EEPROM_ADDR,
12993	     (EEPROM_ADDR_FSM_RESET |
12994	      (EEPROM_DEFAULT_CLOCK_PERIOD <<
12995	       EEPROM_ADDR_CLKPERD_SHIFT)));
12996
12997	msleep(1);
12998
12999	/* Enable seeprom accesses. */
13000	tw32_f(GRC_LOCAL_CTRL,
13001	     tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
13002	udelay(100);
13003
13004	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13005	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
13006		tg3_flag_set(tp, NVRAM);
13007
13008		if (tg3_nvram_lock(tp)) {
13009			netdev_warn(tp->dev,
13010				    "Cannot get nvram lock, %s failed\n",
13011				    __func__);
13012			return;
13013		}
13014		tg3_enable_nvram_access(tp);
13015
13016		tp->nvram_size = 0;
13017
13018		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13019			tg3_get_5752_nvram_info(tp);
13020		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13021			tg3_get_5755_nvram_info(tp);
13022		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13023			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13024			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13025			tg3_get_5787_nvram_info(tp);
13026		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13027			tg3_get_5761_nvram_info(tp);
13028		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13029			tg3_get_5906_nvram_info(tp);
13030		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13031			 tg3_flag(tp, 57765_CLASS))
13032			tg3_get_57780_nvram_info(tp);
13033		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13034			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13035			tg3_get_5717_nvram_info(tp);
13036		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13037			tg3_get_5720_nvram_info(tp);
13038		else
13039			tg3_get_nvram_info(tp);
13040
13041		if (tp->nvram_size == 0)
13042			tg3_get_nvram_size(tp);
13043
13044		tg3_disable_nvram_access(tp);
13045		tg3_nvram_unlock(tp);
13046
13047	} else {
13048		tg3_flag_clear(tp, NVRAM);
13049		tg3_flag_clear(tp, NVRAM_BUFFERED);
13050
13051		tg3_get_eeprom_size(tp);
13052	}
13053}
13054
13055struct subsys_tbl_ent {
13056	u16 subsys_vendor, subsys_devid;
13057	u32 phy_id;
13058};
13059
13060static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
13061	/* Broadcom boards. */
13062	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13063	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13064	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13065	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13066	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13067	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13068	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13069	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13070	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13071	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13072	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13073	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13074	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13075	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13076	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13077	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13078	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13079	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13080	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13081	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13082	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13083	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13084
13085	/* 3com boards. */
13086	{ TG3PCI_SUBVENDOR_ID_3COM,
13087	  TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13088	{ TG3PCI_SUBVENDOR_ID_3COM,
13089	  TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13090	{ TG3PCI_SUBVENDOR_ID_3COM,
13091	  TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13092	{ TG3PCI_SUBVENDOR_ID_3COM,
13093	  TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13094	{ TG3PCI_SUBVENDOR_ID_3COM,
13095	  TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13096
13097	/* DELL boards. */
13098	{ TG3PCI_SUBVENDOR_ID_DELL,
13099	  TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13100	{ TG3PCI_SUBVENDOR_ID_DELL,
13101	  TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13102	{ TG3PCI_SUBVENDOR_ID_DELL,
13103	  TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13104	{ TG3PCI_SUBVENDOR_ID_DELL,
13105	  TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13106
13107	/* Compaq boards. */
13108	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
13109	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13110	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
13111	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13112	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
13113	  TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13114	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
13115	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13116	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
13117	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13118
13119	/* IBM boards. */
13120	{ TG3PCI_SUBVENDOR_ID_IBM,
13121	  TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13122};
13123
13124static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13125{
13126	int i;
13127
13128	for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13129		if ((subsys_id_to_phy_id[i].subsys_vendor ==
13130		     tp->pdev->subsystem_vendor) &&
13131		    (subsys_id_to_phy_id[i].subsys_devid ==
13132		     tp->pdev->subsystem_device))
13133			return &subsys_id_to_phy_id[i];
13134	}
13135	return NULL;
13136}
13137
13138static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13139{
13140	u32 val;
13141
13142	tp->phy_id = TG3_PHY_ID_INVALID;
13143	tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13144
13145	/* Assume an onboard device and WOL capable by default.  */
13146	tg3_flag_set(tp, EEPROM_WRITE_PROT);
13147	tg3_flag_set(tp, WOL_CAP);
13148
13149	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13150		if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13151			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13152			tg3_flag_set(tp, IS_NIC);
13153		}
13154		val = tr32(VCPU_CFGSHDW);
13155		if (val & VCPU_CFGSHDW_ASPM_DBNC)
13156			tg3_flag_set(tp, ASPM_WORKAROUND);
13157		if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13158		    (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13159			tg3_flag_set(tp, WOL_ENABLE);
13160			device_set_wakeup_enable(&tp->pdev->dev, true);
13161		}
13162		goto done;
13163	}
13164
13165	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13166	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13167		u32 nic_cfg, led_cfg;
13168		u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13169		int eeprom_phy_serdes = 0;
13170
13171		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13172		tp->nic_sram_data_cfg = nic_cfg;
13173
13174		tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13175		ver >>= NIC_SRAM_DATA_VER_SHIFT;
13176		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13177		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13178		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13179		    (ver > 0) && (ver < 0x100))
13180			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13181
13182		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13183			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13184
13185		if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13186		    NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13187			eeprom_phy_serdes = 1;
13188
13189		tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13190		if (nic_phy_id != 0) {
13191			u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13192			u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13193
13194			eeprom_phy_id  = (id1 >> 16) << 10;
13195			eeprom_phy_id |= (id2 & 0xfc00) << 16;
13196			eeprom_phy_id |= (id2 & 0x03ff) <<  0;
13197		} else
13198			eeprom_phy_id = 0;
13199
13200		tp->phy_id = eeprom_phy_id;
13201		if (eeprom_phy_serdes) {
13202			if (!tg3_flag(tp, 5705_PLUS))
13203				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13204			else
13205				tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13206		}
13207
13208		if (tg3_flag(tp, 5750_PLUS))
13209			led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13210				    SHASTA_EXT_LED_MODE_MASK);
13211		else
13212			led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13213
13214		switch (led_cfg) {
13215		default:
13216		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13217			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13218			break;
13219
13220		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13221			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13222			break;
13223
13224		case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13225			tp->led_ctrl = LED_CTRL_MODE_MAC;
13226
13227			/* Default to PHY_1_MODE if 0 (MAC_MODE) is
13228			 * read on some older 5700/5701 bootcode.
13229			 */
13230			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13231			    ASIC_REV_5700 ||
13232			    GET_ASIC_REV(tp->pci_chip_rev_id) ==
13233			    ASIC_REV_5701)
13234				tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13235
13236			break;
13237
13238		case SHASTA_EXT_LED_SHARED:
13239			tp->led_ctrl = LED_CTRL_MODE_SHARED;
13240			if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13241			    tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13242				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13243						 LED_CTRL_MODE_PHY_2);
13244			break;
13245
13246		case SHASTA_EXT_LED_MAC:
13247			tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13248			break;
13249
13250		case SHASTA_EXT_LED_COMBO:
13251			tp->led_ctrl = LED_CTRL_MODE_COMBO;
13252			if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13253				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13254						 LED_CTRL_MODE_PHY_2);
13255			break;
13256
13257		}
13258
13259		if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13260		     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13261		    tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13262			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13263
13264		if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13265			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13266
13267		if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13268			tg3_flag_set(tp, EEPROM_WRITE_PROT);
13269			if ((tp->pdev->subsystem_vendor ==
13270			     PCI_VENDOR_ID_ARIMA) &&
13271			    (tp->pdev->subsystem_device == 0x205a ||
13272			     tp->pdev->subsystem_device == 0x2063))
13273				tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13274		} else {
13275			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13276			tg3_flag_set(tp, IS_NIC);
13277		}
13278
13279		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13280			tg3_flag_set(tp, ENABLE_ASF);
13281			if (tg3_flag(tp, 5750_PLUS))
13282				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13283		}
13284
13285		if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13286		    tg3_flag(tp, 5750_PLUS))
13287			tg3_flag_set(tp, ENABLE_APE);
13288
13289		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13290		    !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13291			tg3_flag_clear(tp, WOL_CAP);
13292
13293		if (tg3_flag(tp, WOL_CAP) &&
13294		    (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13295			tg3_flag_set(tp, WOL_ENABLE);
13296			device_set_wakeup_enable(&tp->pdev->dev, true);
13297		}
13298
13299		if (cfg2 & (1 << 17))
13300			tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13301
13302		/* serdes signal pre-emphasis in register 0x590 set by */
13303		/* bootcode if bit 18 is set */
13304		if (cfg2 & (1 << 18))
13305			tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13306
13307		if ((tg3_flag(tp, 57765_PLUS) ||
13308		     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13309		      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13310		    (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13311			tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13312
13313		if (tg3_flag(tp, PCI_EXPRESS) &&
13314		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13315		    !tg3_flag(tp, 57765_PLUS)) {
13316			u32 cfg3;
13317
13318			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13319			if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13320				tg3_flag_set(tp, ASPM_WORKAROUND);
13321		}
13322
13323		if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13324			tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13325		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13326			tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13327		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13328			tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13329	}
13330done:
13331	if (tg3_flag(tp, WOL_CAP))
13332		device_set_wakeup_enable(&tp->pdev->dev,
13333					 tg3_flag(tp, WOL_ENABLE));
13334	else
13335		device_set_wakeup_capable(&tp->pdev->dev, false);
13336}
13337
13338static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13339{
13340	int i;
13341	u32 val;
13342
13343	tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13344	tw32(OTP_CTRL, cmd);
13345
13346	/* Wait for up to 1 ms for command to execute. */
13347	for (i = 0; i < 100; i++) {
13348		val = tr32(OTP_STATUS);
13349		if (val & OTP_STATUS_CMD_DONE)
13350			break;
13351		udelay(10);
13352	}
13353
13354	return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13355}
13356
13357/* Read the gphy configuration from the OTP region of the chip.  The gphy
13358 * configuration is a 32-bit value that straddles the alignment boundary.
13359 * We do two 32-bit reads and then shift and merge the results.
13360 */
13361static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13362{
13363	u32 bhalf_otp, thalf_otp;
13364
13365	tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13366
13367	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13368		return 0;
13369
13370	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13371
13372	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13373		return 0;
13374
13375	thalf_otp = tr32(OTP_READ_DATA);
13376
13377	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13378
13379	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13380		return 0;
13381
13382	bhalf_otp = tr32(OTP_READ_DATA);
13383
13384	return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13385}
13386
13387static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13388{
13389	u32 adv = ADVERTISED_Autoneg;
13390
13391	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13392		adv |= ADVERTISED_1000baseT_Half |
13393		       ADVERTISED_1000baseT_Full;
13394
13395	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13396		adv |= ADVERTISED_100baseT_Half |
13397		       ADVERTISED_100baseT_Full |
13398		       ADVERTISED_10baseT_Half |
13399		       ADVERTISED_10baseT_Full |
13400		       ADVERTISED_TP;
13401	else
13402		adv |= ADVERTISED_FIBRE;
13403
13404	tp->link_config.advertising = adv;
13405	tp->link_config.speed = SPEED_UNKNOWN;
13406	tp->link_config.duplex = DUPLEX_UNKNOWN;
13407	tp->link_config.autoneg = AUTONEG_ENABLE;
13408	tp->link_config.active_speed = SPEED_UNKNOWN;
13409	tp->link_config.active_duplex = DUPLEX_UNKNOWN;
13410
13411	tp->old_link = -1;
13412}
13413
13414static int __devinit tg3_phy_probe(struct tg3 *tp)
13415{
13416	u32 hw_phy_id_1, hw_phy_id_2;
13417	u32 hw_phy_id, hw_phy_id_masked;
13418	int err;
13419
13420	/* flow control autonegotiation is default behavior */
13421	tg3_flag_set(tp, PAUSE_AUTONEG);
13422	tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13423
13424	if (tg3_flag(tp, USE_PHYLIB))
13425		return tg3_phy_init(tp);
13426
13427	/* Reading the PHY ID register can conflict with ASF
13428	 * firmware access to the PHY hardware.
13429	 */
13430	err = 0;
13431	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13432		hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13433	} else {
13434		/* Now read the physical PHY_ID from the chip and verify
13435		 * that it is sane.  If it doesn't look good, we fall back
13436		 * to either the hard-coded table based PHY_ID and failing
13437		 * that the value found in the eeprom area.
13438		 */
13439		err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13440		err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13441
13442		hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13443		hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13444		hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13445
13446		hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13447	}
13448
13449	if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13450		tp->phy_id = hw_phy_id;
13451		if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13452			tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13453		else
13454			tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13455	} else {
13456		if (tp->phy_id != TG3_PHY_ID_INVALID) {
13457			/* Do nothing, phy ID already set up in
13458			 * tg3_get_eeprom_hw_cfg().
13459			 */
13460		} else {
13461			struct subsys_tbl_ent *p;
13462
13463			/* No eeprom signature?  Try the hardcoded
13464			 * subsys device table.
13465			 */
13466			p = tg3_lookup_by_subsys(tp);
13467			if (!p)
13468				return -ENODEV;
13469
13470			tp->phy_id = p->phy_id;
13471			if (!tp->phy_id ||
13472			    tp->phy_id == TG3_PHY_ID_BCM8002)
13473				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13474		}
13475	}
13476
13477	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13478	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13479	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13480	     (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13481	      tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13482	     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13483	      tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13484		tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13485
13486	tg3_phy_init_link_config(tp);
13487
13488	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13489	    !tg3_flag(tp, ENABLE_APE) &&
13490	    !tg3_flag(tp, ENABLE_ASF)) {
13491		u32 bmsr, dummy;
13492
13493		tg3_readphy(tp, MII_BMSR, &bmsr);
13494		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13495		    (bmsr & BMSR_LSTATUS))
13496			goto skip_phy_reset;
13497
13498		err = tg3_phy_reset(tp);
13499		if (err)
13500			return err;
13501
13502		tg3_phy_set_wirespeed(tp);
13503
13504		if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
13505			tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13506					    tp->link_config.flowctrl);
13507
13508			tg3_writephy(tp, MII_BMCR,
13509				     BMCR_ANENABLE | BMCR_ANRESTART);
13510		}
13511	}
13512
13513skip_phy_reset:
13514	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13515		err = tg3_init_5401phy_dsp(tp);
13516		if (err)
13517			return err;
13518
13519		err = tg3_init_5401phy_dsp(tp);
13520	}
13521
13522	return err;
13523}
13524
13525static void __devinit tg3_read_vpd(struct tg3 *tp)
13526{
13527	u8 *vpd_data;
13528	unsigned int block_end, rosize, len;
13529	u32 vpdlen;
13530	int j, i = 0;
13531
13532	vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13533	if (!vpd_data)
13534		goto out_no_vpd;
13535
13536	i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13537	if (i < 0)
13538		goto out_not_found;
13539
13540	rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13541	block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13542	i += PCI_VPD_LRDT_TAG_SIZE;
13543
13544	if (block_end > vpdlen)
13545		goto out_not_found;
13546
13547	j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13548				      PCI_VPD_RO_KEYWORD_MFR_ID);
13549	if (j > 0) {
13550		len = pci_vpd_info_field_size(&vpd_data[j]);
13551
13552		j += PCI_VPD_INFO_FLD_HDR_SIZE;
13553		if (j + len > block_end || len != 4 ||
13554		    memcmp(&vpd_data[j], "1028", 4))
13555			goto partno;
13556
13557		j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13558					      PCI_VPD_RO_KEYWORD_VENDOR0);
13559		if (j < 0)
13560			goto partno;
13561
13562		len = pci_vpd_info_field_size(&vpd_data[j]);
13563
13564		j += PCI_VPD_INFO_FLD_HDR_SIZE;
13565		if (j + len > block_end)
13566			goto partno;
13567
13568		memcpy(tp->fw_ver, &vpd_data[j], len);
13569		strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13570	}
13571
13572partno:
13573	i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13574				      PCI_VPD_RO_KEYWORD_PARTNO);
13575	if (i < 0)
13576		goto out_not_found;
13577
13578	len = pci_vpd_info_field_size(&vpd_data[i]);
13579
13580	i += PCI_VPD_INFO_FLD_HDR_SIZE;
13581	if (len > TG3_BPN_SIZE ||
13582	    (len + i) > vpdlen)
13583		goto out_not_found;
13584
13585	memcpy(tp->board_part_number, &vpd_data[i], len);
13586
13587out_not_found:
13588	kfree(vpd_data);
13589	if (tp->board_part_number[0])
13590		return;
13591
13592out_no_vpd:
13593	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13594		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13595			strcpy(tp->board_part_number, "BCM5717");
13596		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13597			strcpy(tp->board_part_number, "BCM5718");
13598		else
13599			goto nomatch;
13600	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13601		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13602			strcpy(tp->board_part_number, "BCM57780");
13603		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13604			strcpy(tp->board_part_number, "BCM57760");
13605		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13606			strcpy(tp->board_part_number, "BCM57790");
13607		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13608			strcpy(tp->board_part_number, "BCM57788");
13609		else
13610			goto nomatch;
13611	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13612		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13613			strcpy(tp->board_part_number, "BCM57761");
13614		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13615			strcpy(tp->board_part_number, "BCM57765");
13616		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13617			strcpy(tp->board_part_number, "BCM57781");
13618		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13619			strcpy(tp->board_part_number, "BCM57785");
13620		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13621			strcpy(tp->board_part_number, "BCM57791");
13622		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13623			strcpy(tp->board_part_number, "BCM57795");
13624		else
13625			goto nomatch;
13626	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
13627		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
13628			strcpy(tp->board_part_number, "BCM57762");
13629		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
13630			strcpy(tp->board_part_number, "BCM57766");
13631		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
13632			strcpy(tp->board_part_number, "BCM57782");
13633		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13634			strcpy(tp->board_part_number, "BCM57786");
13635		else
13636			goto nomatch;
13637	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13638		strcpy(tp->board_part_number, "BCM95906");
13639	} else {
13640nomatch:
13641		strcpy(tp->board_part_number, "none");
13642	}
13643}
13644
13645static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13646{
13647	u32 val;
13648
13649	if (tg3_nvram_read(tp, offset, &val) ||
13650	    (val & 0xfc000000) != 0x0c000000 ||
13651	    tg3_nvram_read(tp, offset + 4, &val) ||
13652	    val != 0)
13653		return 0;
13654
13655	return 1;
13656}
13657
13658static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13659{
13660	u32 val, offset, start, ver_offset;
13661	int i, dst_off;
13662	bool newver = false;
13663
13664	if (tg3_nvram_read(tp, 0xc, &offset) ||
13665	    tg3_nvram_read(tp, 0x4, &start))
13666		return;
13667
13668	offset = tg3_nvram_logical_addr(tp, offset);
13669
13670	if (tg3_nvram_read(tp, offset, &val))
13671		return;
13672
13673	if ((val & 0xfc000000) == 0x0c000000) {
13674		if (tg3_nvram_read(tp, offset + 4, &val))
13675			return;
13676
13677		if (val == 0)
13678			newver = true;
13679	}
13680
13681	dst_off = strlen(tp->fw_ver);
13682
13683	if (newver) {
13684		if (TG3_VER_SIZE - dst_off < 16 ||
13685		    tg3_nvram_read(tp, offset + 8, &ver_offset))
13686			return;
13687
13688		offset = offset + ver_offset - start;
13689		for (i = 0; i < 16; i += 4) {
13690			__be32 v;
13691			if (tg3_nvram_read_be32(tp, offset + i, &v))
13692				return;
13693
13694			memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13695		}
13696	} else {
13697		u32 major, minor;
13698
13699		if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13700			return;
13701
13702		major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13703			TG3_NVM_BCVER_MAJSFT;
13704		minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13705		snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13706			 "v%d.%02d", major, minor);
13707	}
13708}
13709
13710static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13711{
13712	u32 val, major, minor;
13713
13714	/* Use native endian representation */
13715	if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13716		return;
13717
13718	major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13719		TG3_NVM_HWSB_CFG1_MAJSFT;
13720	minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13721		TG3_NVM_HWSB_CFG1_MINSFT;
13722
13723	snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13724}
13725
13726static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13727{
13728	u32 offset, major, minor, build;
13729
13730	strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13731
13732	if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13733		return;
13734
13735	switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13736	case TG3_EEPROM_SB_REVISION_0:
13737		offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13738		break;
13739	case TG3_EEPROM_SB_REVISION_2:
13740		offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13741		break;
13742	case TG3_EEPROM_SB_REVISION_3:
13743		offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13744		break;
13745	case TG3_EEPROM_SB_REVISION_4:
13746		offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13747		break;
13748	case TG3_EEPROM_SB_REVISION_5:
13749		offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13750		break;
13751	case TG3_EEPROM_SB_REVISION_6:
13752		offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13753		break;
13754	default:
13755		return;
13756	}
13757
13758	if (tg3_nvram_read(tp, offset, &val))
13759		return;
13760
13761	build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13762		TG3_EEPROM_SB_EDH_BLD_SHFT;
13763	major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13764		TG3_EEPROM_SB_EDH_MAJ_SHFT;
13765	minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13766
13767	if (minor > 99 || build > 26)
13768		return;
13769
13770	offset = strlen(tp->fw_ver);
13771	snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13772		 " v%d.%02d", major, minor);
13773
13774	if (build > 0) {
13775		offset = strlen(tp->fw_ver);
13776		if (offset < TG3_VER_SIZE - 1)
13777			tp->fw_ver[offset] = 'a' + build - 1;
13778	}
13779}
13780
13781static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13782{
13783	u32 val, offset, start;
13784	int i, vlen;
13785
13786	for (offset = TG3_NVM_DIR_START;
13787	     offset < TG3_NVM_DIR_END;
13788	     offset += TG3_NVM_DIRENT_SIZE) {
13789		if (tg3_nvram_read(tp, offset, &val))
13790			return;
13791
13792		if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13793			break;
13794	}
13795
13796	if (offset == TG3_NVM_DIR_END)
13797		return;
13798
13799	if (!tg3_flag(tp, 5705_PLUS))
13800		start = 0x08000000;
13801	else if (tg3_nvram_read(tp, offset - 4, &start))
13802		return;
13803
13804	if (tg3_nvram_read(tp, offset + 4, &offset) ||
13805	    !tg3_fw_img_is_valid(tp, offset) ||
13806	    tg3_nvram_read(tp, offset + 8, &val))
13807		return;
13808
13809	offset += val - start;
13810
13811	vlen = strlen(tp->fw_ver);
13812
13813	tp->fw_ver[vlen++] = ',';
13814	tp->fw_ver[vlen++] = ' ';
13815
13816	for (i = 0; i < 4; i++) {
13817		__be32 v;
13818		if (tg3_nvram_read_be32(tp, offset, &v))
13819			return;
13820
13821		offset += sizeof(v);
13822
13823		if (vlen > TG3_VER_SIZE - sizeof(v)) {
13824			memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13825			break;
13826		}
13827
13828		memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13829		vlen += sizeof(v);
13830	}
13831}
13832
13833static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13834{
13835	int vlen;
13836	u32 apedata;
13837	char *fwtype;
13838
13839	if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13840		return;
13841
13842	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13843	if (apedata != APE_SEG_SIG_MAGIC)
13844		return;
13845
13846	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13847	if (!(apedata & APE_FW_STATUS_READY))
13848		return;
13849
13850	apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13851
13852	if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13853		tg3_flag_set(tp, APE_HAS_NCSI);
13854		fwtype = "NCSI";
13855	} else {
13856		fwtype = "DASH";
13857	}
13858
13859	vlen = strlen(tp->fw_ver);
13860
13861	snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13862		 fwtype,
13863		 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13864		 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13865		 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13866		 (apedata & APE_FW_VERSION_BLDMSK));
13867}
13868
13869static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13870{
13871	u32 val;
13872	bool vpd_vers = false;
13873
13874	if (tp->fw_ver[0] != 0)
13875		vpd_vers = true;
13876
13877	if (tg3_flag(tp, NO_NVRAM)) {
13878		strcat(tp->fw_ver, "sb");
13879		return;
13880	}
13881
13882	if (tg3_nvram_read(tp, 0, &val))
13883		return;
13884
13885	if (val == TG3_EEPROM_MAGIC)
13886		tg3_read_bc_ver(tp);
13887	else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13888		tg3_read_sb_ver(tp, val);
13889	else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13890		tg3_read_hwsb_ver(tp);
13891	else
13892		return;
13893
13894	if (vpd_vers)
13895		goto done;
13896
13897	if (tg3_flag(tp, ENABLE_APE)) {
13898		if (tg3_flag(tp, ENABLE_ASF))
13899			tg3_read_dash_ver(tp);
13900	} else if (tg3_flag(tp, ENABLE_ASF)) {
13901		tg3_read_mgmtfw_ver(tp);
13902	}
13903
13904done:
13905	tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13906}
13907
13908static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13909{
13910	if (tg3_flag(tp, LRG_PROD_RING_CAP))
13911		return TG3_RX_RET_MAX_SIZE_5717;
13912	else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13913		return TG3_RX_RET_MAX_SIZE_5700;
13914	else
13915		return TG3_RX_RET_MAX_SIZE_5705;
13916}
13917
13918static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13919	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13920	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13921	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13922	{ },
13923};
13924
13925static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13926{
13927	struct pci_dev *peer;
13928	unsigned int func, devnr = tp->pdev->devfn & ~7;
13929
13930	for (func = 0; func < 8; func++) {
13931		peer = pci_get_slot(tp->pdev->bus, devnr | func);
13932		if (peer && peer != tp->pdev)
13933			break;
13934		pci_dev_put(peer);
13935	}
13936	/* 5704 can be configured in single-port mode, set peer to
13937	 * tp->pdev in that case.
13938	 */
13939	if (!peer) {
13940		peer = tp->pdev;
13941		return peer;
13942	}
13943
13944	/*
13945	 * We don't need to keep the refcount elevated; there's no way
13946	 * to remove one half of this device without removing the other
13947	 */
13948	pci_dev_put(peer);
13949
13950	return peer;
13951}
13952
13953static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
13954{
13955	tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
13956	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13957		u32 reg;
13958
13959		/* All devices that use the alternate
13960		 * ASIC REV location have a CPMU.
13961		 */
13962		tg3_flag_set(tp, CPMU_PRESENT);
13963
13964		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13965		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13966		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13967		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13968			reg = TG3PCI_GEN2_PRODID_ASICREV;
13969		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13970			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13971			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13972			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13973			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13974			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
13975			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
13976			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
13977			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
13978			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13979			reg = TG3PCI_GEN15_PRODID_ASICREV;
13980		else
13981			reg = TG3PCI_PRODID_ASICREV;
13982
13983		pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
13984	}
13985
13986	/* Wrong chip ID in 5752 A0. This code can be removed later
13987	 * as A0 is not in production.
13988	 */
13989	if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13990		tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13991
13992	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13993	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13994	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13995		tg3_flag_set(tp, 5717_PLUS);
13996
13997	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13998	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
13999		tg3_flag_set(tp, 57765_CLASS);
14000
14001	if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
14002		tg3_flag_set(tp, 57765_PLUS);
14003
14004	/* Intentionally exclude ASIC_REV_5906 */
14005	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14006	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14007	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14008	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14009	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14010	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14011	    tg3_flag(tp, 57765_PLUS))
14012		tg3_flag_set(tp, 5755_PLUS);
14013
14014	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14015	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14016		tg3_flag_set(tp, 5780_CLASS);
14017
14018	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14019	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14020	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14021	    tg3_flag(tp, 5755_PLUS) ||
14022	    tg3_flag(tp, 5780_CLASS))
14023		tg3_flag_set(tp, 5750_PLUS);
14024
14025	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14026	    tg3_flag(tp, 5750_PLUS))
14027		tg3_flag_set(tp, 5705_PLUS);
14028}
14029
14030static int __devinit tg3_get_invariants(struct tg3 *tp)
14031{
14032	u32 misc_ctrl_reg;
14033	u32 pci_state_reg, grc_misc_cfg;
14034	u32 val;
14035	u16 pci_cmd;
14036	int err;
14037
14038	/* Force memory write invalidate off.  If we leave it on,
14039	 * then on 5700_BX chips we have to enable a workaround.
14040	 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
14041	 * to match the cacheline size.  The Broadcom driver have this
14042	 * workaround but turns MWI off all the times so never uses
14043	 * it.  This seems to suggest that the workaround is insufficient.
14044	 */
14045	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14046	pci_cmd &= ~PCI_COMMAND_INVALIDATE;
14047	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14048
14049	/* Important! -- Make sure register accesses are byteswapped
14050	 * correctly.  Also, for those chips that require it, make
14051	 * sure that indirect register accesses are enabled before
14052	 * the first operation.
14053	 */
14054	pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14055			      &misc_ctrl_reg);
14056	tp->misc_host_ctrl |= (misc_ctrl_reg &
14057			       MISC_HOST_CTRL_CHIPREV);
14058	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14059			       tp->misc_host_ctrl);
14060
14061	tg3_detect_asic_rev(tp, misc_ctrl_reg);
14062
14063	/* If we have 5702/03 A1 or A2 on certain ICH chipsets,
14064	 * we need to disable memory and use config. cycles
14065	 * only to access all registers. The 5702/03 chips
14066	 * can mistakenly decode the special cycles from the
14067	 * ICH chipsets as memory write cycles, causing corruption
14068	 * of register and memory space. Only certain ICH bridges
14069	 * will drive special cycles with non-zero data during the
14070	 * address phase which can fall within the 5703's address
14071	 * range. This is not an ICH bug as the PCI spec allows
14072	 * non-zero address during special cycles. However, only
14073	 * these ICH bridges are known to drive non-zero addresses
14074	 * during special cycles.
14075	 *
14076	 * Since special cycles do not cross PCI bridges, we only
14077	 * enable this workaround if the 5703 is on the secondary
14078	 * bus of these ICH bridges.
14079	 */
14080	if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
14081	    (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
14082		static struct tg3_dev_id {
14083			u32	vendor;
14084			u32	device;
14085			u32	rev;
14086		} ich_chipsets[] = {
14087			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
14088			  PCI_ANY_ID },
14089			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
14090			  PCI_ANY_ID },
14091			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
14092			  0xa },
14093			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
14094			  PCI_ANY_ID },
14095			{ },
14096		};
14097		struct tg3_dev_id *pci_id = &ich_chipsets[0];
14098		struct pci_dev *bridge = NULL;
14099
14100		while (pci_id->vendor != 0) {
14101			bridge = pci_get_device(pci_id->vendor, pci_id->device,
14102						bridge);
14103			if (!bridge) {
14104				pci_id++;
14105				continue;
14106			}
14107			if (pci_id->rev != PCI_ANY_ID) {
14108				if (bridge->revision > pci_id->rev)
14109					continue;
14110			}
14111			if (bridge->subordinate &&
14112			    (bridge->subordinate->number ==
14113			     tp->pdev->bus->number)) {
14114				tg3_flag_set(tp, ICH_WORKAROUND);
14115				pci_dev_put(bridge);
14116				break;
14117			}
14118		}
14119	}
14120
14121	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14122		static struct tg3_dev_id {
14123			u32	vendor;
14124			u32	device;
14125		} bridge_chipsets[] = {
14126			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14127			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14128			{ },
14129		};
14130		struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14131		struct pci_dev *bridge = NULL;
14132
14133		while (pci_id->vendor != 0) {
14134			bridge = pci_get_device(pci_id->vendor,
14135						pci_id->device,
14136						bridge);
14137			if (!bridge) {
14138				pci_id++;
14139				continue;
14140			}
14141			if (bridge->subordinate &&
14142			    (bridge->subordinate->number <=
14143			     tp->pdev->bus->number) &&
14144			    (bridge->subordinate->subordinate >=
14145			     tp->pdev->bus->number)) {
14146				tg3_flag_set(tp, 5701_DMA_BUG);
14147				pci_dev_put(bridge);
14148				break;
14149			}
14150		}
14151	}
14152
14153	/* The EPB bridge inside 5714, 5715, and 5780 cannot support
14154	 * DMA addresses > 40-bit. This bridge may have other additional
14155	 * 57xx devices behind it in some 4-port NIC designs for example.
14156	 * Any tg3 device found behind the bridge will also need the 40-bit
14157	 * DMA workaround.
14158	 */
14159	if (tg3_flag(tp, 5780_CLASS)) {
14160		tg3_flag_set(tp, 40BIT_DMA_BUG);
14161		tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
14162	} else {
14163		struct pci_dev *bridge = NULL;
14164
14165		do {
14166			bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14167						PCI_DEVICE_ID_SERVERWORKS_EPB,
14168						bridge);
14169			if (bridge && bridge->subordinate &&
14170			    (bridge->subordinate->number <=
14171			     tp->pdev->bus->number) &&
14172			    (bridge->subordinate->subordinate >=
14173			     tp->pdev->bus->number)) {
14174				tg3_flag_set(tp, 40BIT_DMA_BUG);
14175				pci_dev_put(bridge);
14176				break;
14177			}
14178		} while (bridge);
14179	}
14180
14181	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14182	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14183		tp->pdev_peer = tg3_find_peer(tp);
14184
14185	/* Determine TSO capabilities */
14186	if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14187		; /* Do nothing. HW bug. */
14188	else if (tg3_flag(tp, 57765_PLUS))
14189		tg3_flag_set(tp, HW_TSO_3);
14190	else if (tg3_flag(tp, 5755_PLUS) ||
14191		 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14192		tg3_flag_set(tp, HW_TSO_2);
14193	else if (tg3_flag(tp, 5750_PLUS)) {
14194		tg3_flag_set(tp, HW_TSO_1);
14195		tg3_flag_set(tp, TSO_BUG);
14196		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14197		    tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14198			tg3_flag_clear(tp, TSO_BUG);
14199	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14200		   GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14201		   tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14202			tg3_flag_set(tp, TSO_BUG);
14203		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14204			tp->fw_needed = FIRMWARE_TG3TSO5;
14205		else
14206			tp->fw_needed = FIRMWARE_TG3TSO;
14207	}
14208
14209	/* Selectively allow TSO based on operating conditions */
14210	if (tg3_flag(tp, HW_TSO_1) ||
14211	    tg3_flag(tp, HW_TSO_2) ||
14212	    tg3_flag(tp, HW_TSO_3) ||
14213	    tp->fw_needed) {
14214		/* For firmware TSO, assume ASF is disabled.
14215		 * We'll disable TSO later if we discover ASF
14216		 * is enabled in tg3_get_eeprom_hw_cfg().
14217		 */
14218		tg3_flag_set(tp, TSO_CAPABLE);
14219	} else {
14220		tg3_flag_clear(tp, TSO_CAPABLE);
14221		tg3_flag_clear(tp, TSO_BUG);
14222		tp->fw_needed = NULL;
14223	}
14224
14225	if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14226		tp->fw_needed = FIRMWARE_TG3;
14227
14228	tp->irq_max = 1;
14229
14230	if (tg3_flag(tp, 5750_PLUS)) {
14231		tg3_flag_set(tp, SUPPORT_MSI);
14232		if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14233		    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14234		    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14235		     tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14236		     tp->pdev_peer == tp->pdev))
14237			tg3_flag_clear(tp, SUPPORT_MSI);
14238
14239		if (tg3_flag(tp, 5755_PLUS) ||
14240		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14241			tg3_flag_set(tp, 1SHOT_MSI);
14242		}
14243
14244		if (tg3_flag(tp, 57765_PLUS)) {
14245			tg3_flag_set(tp, SUPPORT_MSIX);
14246			tp->irq_max = TG3_IRQ_MAX_VECS;
14247			tg3_rss_init_dflt_indir_tbl(tp);
14248		}
14249	}
14250
14251	if (tg3_flag(tp, 5755_PLUS) ||
14252	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14253		tg3_flag_set(tp, SHORT_DMA_BUG);
14254
14255	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14256		tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
14257
14258	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14259	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14260	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14261		tg3_flag_set(tp, LRG_PROD_RING_CAP);
14262
14263	if (tg3_flag(tp, 57765_PLUS) &&
14264	    tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14265		tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14266
14267	if (!tg3_flag(tp, 5705_PLUS) ||
14268	    tg3_flag(tp, 5780_CLASS) ||
14269	    tg3_flag(tp, USE_JUMBO_BDFLAG))
14270		tg3_flag_set(tp, JUMBO_CAPABLE);
14271
14272	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14273			      &pci_state_reg);
14274
14275	if (pci_is_pcie(tp->pdev)) {
14276		u16 lnkctl;
14277
14278		tg3_flag_set(tp, PCI_EXPRESS);
14279
14280		pci_read_config_word(tp->pdev,
14281				     pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14282				     &lnkctl);
14283		if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14284			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14285			    ASIC_REV_5906) {
14286				tg3_flag_clear(tp, HW_TSO_2);
14287				tg3_flag_clear(tp, TSO_CAPABLE);
14288			}
14289			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14290			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14291			    tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14292			    tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14293				tg3_flag_set(tp, CLKREQ_BUG);
14294		} else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14295			tg3_flag_set(tp, L1PLLPD_EN);
14296		}
14297	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14298		/* BCM5785 devices are effectively PCIe devices, and should
14299		 * follow PCIe codepaths, but do not have a PCIe capabilities
14300		 * section.
14301		 */
14302		tg3_flag_set(tp, PCI_EXPRESS);
14303	} else if (!tg3_flag(tp, 5705_PLUS) ||
14304		   tg3_flag(tp, 5780_CLASS)) {
14305		tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14306		if (!tp->pcix_cap) {
14307			dev_err(&tp->pdev->dev,
14308				"Cannot find PCI-X capability, aborting\n");
14309			return -EIO;
14310		}
14311
14312		if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14313			tg3_flag_set(tp, PCIX_MODE);
14314	}
14315
14316	/* If we have an AMD 762 or VIA K8T800 chipset, write
14317	 * reordering to the mailbox registers done by the host
14318	 * controller can cause major troubles.  We read back from
14319	 * every mailbox register write to force the writes to be
14320	 * posted to the chip in order.
14321	 */
14322	if (pci_dev_present(tg3_write_reorder_chipsets) &&
14323	    !tg3_flag(tp, PCI_EXPRESS))
14324		tg3_flag_set(tp, MBOX_WRITE_REORDER);
14325
14326	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14327			     &tp->pci_cacheline_sz);
14328	pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14329			     &tp->pci_lat_timer);
14330	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14331	    tp->pci_lat_timer < 64) {
14332		tp->pci_lat_timer = 64;
14333		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14334				      tp->pci_lat_timer);
14335	}
14336
14337	/* Important! -- It is critical that the PCI-X hw workaround
14338	 * situation is decided before the first MMIO register access.
14339	 */
14340	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14341		/* 5700 BX chips need to have their TX producer index
14342		 * mailboxes written twice to workaround a bug.
14343		 */
14344		tg3_flag_set(tp, TXD_MBOX_HWBUG);
14345
14346		/* If we are in PCI-X mode, enable register write workaround.
14347		 *
14348		 * The workaround is to use indirect register accesses
14349		 * for all chip writes not to mailbox registers.
14350		 */
14351		if (tg3_flag(tp, PCIX_MODE)) {
14352			u32 pm_reg;
14353
14354			tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14355
14356			/* The chip can have it's power management PCI config
14357			 * space registers clobbered due to this bug.
14358			 * So explicitly force the chip into D0 here.
14359			 */
14360			pci_read_config_dword(tp->pdev,
14361					      tp->pm_cap + PCI_PM_CTRL,
14362					      &pm_reg);
14363			pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14364			pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14365			pci_write_config_dword(tp->pdev,
14366					       tp->pm_cap + PCI_PM_CTRL,
14367					       pm_reg);
14368
14369			/* Also, force SERR#/PERR# in PCI command. */
14370			pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14371			pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14372			pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14373		}
14374	}
14375
14376	if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14377		tg3_flag_set(tp, PCI_HIGH_SPEED);
14378	if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14379		tg3_flag_set(tp, PCI_32BIT);
14380
14381	/* Chip-specific fixup from Broadcom driver */
14382	if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14383	    (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14384		pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14385		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14386	}
14387
14388	/* Default fast path register access methods */
14389	tp->read32 = tg3_read32;
14390	tp->write32 = tg3_write32;
14391	tp->read32_mbox = tg3_read32;
14392	tp->write32_mbox = tg3_write32;
14393	tp->write32_tx_mbox = tg3_write32;
14394	tp->write32_rx_mbox = tg3_write32;
14395
14396	/* Various workaround register access methods */
14397	if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14398		tp->write32 = tg3_write_indirect_reg32;
14399	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14400		 (tg3_flag(tp, PCI_EXPRESS) &&
14401		  tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14402		/*
14403		 * Back to back register writes can cause problems on these
14404		 * chips, the workaround is to read back all reg writes
14405		 * except those to mailbox regs.
14406		 *
14407		 * See tg3_write_indirect_reg32().
14408		 */
14409		tp->write32 = tg3_write_flush_reg32;
14410	}
14411
14412	if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14413		tp->write32_tx_mbox = tg3_write32_tx_mbox;
14414		if (tg3_flag(tp, MBOX_WRITE_REORDER))
14415			tp->write32_rx_mbox = tg3_write_flush_reg32;
14416	}
14417
14418	if (tg3_flag(tp, ICH_WORKAROUND)) {
14419		tp->read32 = tg3_read_indirect_reg32;
14420		tp->write32 = tg3_write_indirect_reg32;
14421		tp->read32_mbox = tg3_read_indirect_mbox;
14422		tp->write32_mbox = tg3_write_indirect_mbox;
14423		tp->write32_tx_mbox = tg3_write_indirect_mbox;
14424		tp->write32_rx_mbox = tg3_write_indirect_mbox;
14425
14426		iounmap(tp->regs);
14427		tp->regs = NULL;
14428
14429		pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14430		pci_cmd &= ~PCI_COMMAND_MEMORY;
14431		pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14432	}
14433	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14434		tp->read32_mbox = tg3_read32_mbox_5906;
14435		tp->write32_mbox = tg3_write32_mbox_5906;
14436		tp->write32_tx_mbox = tg3_write32_mbox_5906;
14437		tp->write32_rx_mbox = tg3_write32_mbox_5906;
14438	}
14439
14440	if (tp->write32 == tg3_write_indirect_reg32 ||
14441	    (tg3_flag(tp, PCIX_MODE) &&
14442	     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14443	      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14444		tg3_flag_set(tp, SRAM_USE_CONFIG);
14445
14446	/* The memory arbiter has to be enabled in order for SRAM accesses
14447	 * to succeed.  Normally on powerup the tg3 chip firmware will make
14448	 * sure it is enabled, but other entities such as system netboot
14449	 * code might disable it.
14450	 */
14451	val = tr32(MEMARB_MODE);
14452	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14453
14454	tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14455	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14456	    tg3_flag(tp, 5780_CLASS)) {
14457		if (tg3_flag(tp, PCIX_MODE)) {
14458			pci_read_config_dword(tp->pdev,
14459					      tp->pcix_cap + PCI_X_STATUS,
14460					      &val);
14461			tp->pci_fn = val & 0x7;
14462		}
14463	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14464		tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14465		if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14466		    NIC_SRAM_CPMUSTAT_SIG) {
14467			tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14468			tp->pci_fn = tp->pci_fn ? 1 : 0;
14469		}
14470	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14471		   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14472		tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14473		if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14474		    NIC_SRAM_CPMUSTAT_SIG) {
14475			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14476				     TG3_CPMU_STATUS_FSHFT_5719;
14477		}
14478	}
14479
14480	/* Get eeprom hw config before calling tg3_set_power_state().
14481	 * In particular, the TG3_FLAG_IS_NIC flag must be
14482	 * determined before calling tg3_set_power_state() so that
14483	 * we know whether or not to switch out of Vaux power.
14484	 * When the flag is set, it means that GPIO1 is used for eeprom
14485	 * write protect and also implies that it is a LOM where GPIOs
14486	 * are not used to switch power.
14487	 */
14488	tg3_get_eeprom_hw_cfg(tp);
14489
14490	if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14491		tg3_flag_clear(tp, TSO_CAPABLE);
14492		tg3_flag_clear(tp, TSO_BUG);
14493		tp->fw_needed = NULL;
14494	}
14495
14496	if (tg3_flag(tp, ENABLE_APE)) {
14497		/* Allow reads and writes to the
14498		 * APE register and memory space.
14499		 */
14500		pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14501				 PCISTATE_ALLOW_APE_SHMEM_WR |
14502				 PCISTATE_ALLOW_APE_PSPACE_WR;
14503		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14504				       pci_state_reg);
14505
14506		tg3_ape_lock_init(tp);
14507	}
14508
14509	/* Set up tp->grc_local_ctrl before calling
14510	 * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14511	 * will bring 5700's external PHY out of reset.
14512	 * It is also used as eeprom write protect on LOMs.
14513	 */
14514	tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14515	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14516	    tg3_flag(tp, EEPROM_WRITE_PROT))
14517		tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14518				       GRC_LCLCTRL_GPIO_OUTPUT1);
14519	/* Unused GPIO3 must be driven as output on 5752 because there
14520	 * are no pull-up resistors on unused GPIO pins.
14521	 */
14522	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14523		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14524
14525	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14526	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14527	    tg3_flag(tp, 57765_CLASS))
14528		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14529
14530	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14531	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14532		/* Turn off the debug UART. */
14533		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14534		if (tg3_flag(tp, IS_NIC))
14535			/* Keep VMain power. */
14536			tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14537					      GRC_LCLCTRL_GPIO_OUTPUT0;
14538	}
14539
14540	/* Switch out of Vaux if it is a NIC */
14541	tg3_pwrsrc_switch_to_vmain(tp);
14542
14543	/* Derive initial jumbo mode from MTU assigned in
14544	 * ether_setup() via the alloc_etherdev() call
14545	 */
14546	if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14547		tg3_flag_set(tp, JUMBO_RING_ENABLE);
14548
14549	/* Determine WakeOnLan speed to use. */
14550	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14551	    tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14552	    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14553	    tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14554		tg3_flag_clear(tp, WOL_SPEED_100MB);
14555	} else {
14556		tg3_flag_set(tp, WOL_SPEED_100MB);
14557	}
14558
14559	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14560		tp->phy_flags |= TG3_PHYFLG_IS_FET;
14561
14562	/* A few boards don't want Ethernet@WireSpeed phy feature */
14563	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14564	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14565	     (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14566	     (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14567	    (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14568	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14569		tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14570
14571	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14572	    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14573		tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14574	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14575		tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14576
14577	if (tg3_flag(tp, 5705_PLUS) &&
14578	    !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14579	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14580	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14581	    !tg3_flag(tp, 57765_PLUS)) {
14582		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14583		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14584		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14585		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14586			if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14587			    tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14588				tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14589			if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14590				tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14591		} else
14592			tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14593	}
14594
14595	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14596	    GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14597		tp->phy_otp = tg3_read_otp_phycfg(tp);
14598		if (tp->phy_otp == 0)
14599			tp->phy_otp = TG3_OTP_DEFAULT;
14600	}
14601
14602	if (tg3_flag(tp, CPMU_PRESENT))
14603		tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14604	else
14605		tp->mi_mode = MAC_MI_MODE_BASE;
14606
14607	tp->coalesce_mode = 0;
14608	if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14609	    GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14610		tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14611
14612	/* Set these bits to enable statistics workaround. */
14613	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14614	    tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14615	    tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14616		tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14617		tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14618	}
14619
14620	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14621	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14622		tg3_flag_set(tp, USE_PHYLIB);
14623
14624	err = tg3_mdio_init(tp);
14625	if (err)
14626		return err;
14627
14628	/* Initialize data/descriptor byte/word swapping. */
14629	val = tr32(GRC_MODE);
14630	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14631		val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14632			GRC_MODE_WORD_SWAP_B2HRX_DATA |
14633			GRC_MODE_B2HRX_ENABLE |
14634			GRC_MODE_HTX2B_ENABLE |
14635			GRC_MODE_HOST_STACKUP);
14636	else
14637		val &= GRC_MODE_HOST_STACKUP;
14638
14639	tw32(GRC_MODE, val | tp->grc_mode);
14640
14641	tg3_switch_clocks(tp);
14642
14643	/* Clear this out for sanity. */
14644	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14645
14646	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14647			      &pci_state_reg);
14648	if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14649	    !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14650		u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14651
14652		if (chiprevid == CHIPREV_ID_5701_A0 ||
14653		    chiprevid == CHIPREV_ID_5701_B0 ||
14654		    chiprevid == CHIPREV_ID_5701_B2 ||
14655		    chiprevid == CHIPREV_ID_5701_B5) {
14656			void __iomem *sram_base;
14657
14658			/* Write some dummy words into the SRAM status block
14659			 * area, see if it reads back correctly.  If the return
14660			 * value is bad, force enable the PCIX workaround.
14661			 */
14662			sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14663
14664			writel(0x00000000, sram_base);
14665			writel(0x00000000, sram_base + 4);
14666			writel(0xffffffff, sram_base + 4);
14667			if (readl(sram_base) != 0x00000000)
14668				tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14669		}
14670	}
14671
14672	udelay(50);
14673	tg3_nvram_init(tp);
14674
14675	grc_misc_cfg = tr32(GRC_MISC_CFG);
14676	grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14677
14678	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14679	    (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14680	     grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14681		tg3_flag_set(tp, IS_5788);
14682
14683	if (!tg3_flag(tp, IS_5788) &&
14684	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14685		tg3_flag_set(tp, TAGGED_STATUS);
14686	if (tg3_flag(tp, TAGGED_STATUS)) {
14687		tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14688				      HOSTCC_MODE_CLRTICK_TXBD);
14689
14690		tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14691		pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14692				       tp->misc_host_ctrl);
14693	}
14694
14695	/* Preserve the APE MAC_MODE bits */
14696	if (tg3_flag(tp, ENABLE_APE))
14697		tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14698	else
14699		tp->mac_mode = 0;
14700
14701	/* these are limited to 10/100 only */
14702	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14703	     (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14704	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14705	     tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14706	     (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14707	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14708	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14709	    (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14710	     (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14711	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14712	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14713	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14714	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14715	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14716	    (tp->phy_flags & TG3_PHYFLG_IS_FET))
14717		tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14718
14719	err = tg3_phy_probe(tp);
14720	if (err) {
14721		dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14722		/* ... but do not return immediately ... */
14723		tg3_mdio_fini(tp);
14724	}
14725
14726	tg3_read_vpd(tp);
14727	tg3_read_fw_ver(tp);
14728
14729	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14730		tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14731	} else {
14732		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14733			tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14734		else
14735			tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14736	}
14737
14738	/* 5700 {AX,BX} chips have a broken status block link
14739	 * change bit implementation, so we must use the
14740	 * status register in those cases.
14741	 */
14742	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14743		tg3_flag_set(tp, USE_LINKCHG_REG);
14744	else
14745		tg3_flag_clear(tp, USE_LINKCHG_REG);
14746
14747	/* The led_ctrl is set during tg3_phy_probe, here we might
14748	 * have to force the link status polling mechanism based
14749	 * upon subsystem IDs.
14750	 */
14751	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14752	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14753	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14754		tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14755		tg3_flag_set(tp, USE_LINKCHG_REG);
14756	}
14757
14758	/* For all SERDES we poll the MAC status register. */
14759	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14760		tg3_flag_set(tp, POLL_SERDES);
14761	else
14762		tg3_flag_clear(tp, POLL_SERDES);
14763
14764	tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
14765	tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14766	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14767	    tg3_flag(tp, PCIX_MODE)) {
14768		tp->rx_offset = NET_SKB_PAD;
14769#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14770		tp->rx_copy_thresh = ~(u16)0;
14771#endif
14772	}
14773
14774	tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14775	tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14776	tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14777
14778	tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14779
14780	/* Increment the rx prod index on the rx std ring by at most
14781	 * 8 for these chips to workaround hw errata.
14782	 */
14783	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14784	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14785	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14786		tp->rx_std_max_post = 8;
14787
14788	if (tg3_flag(tp, ASPM_WORKAROUND))
14789		tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14790				     PCIE_PWR_MGMT_L1_THRESH_MSK;
14791
14792	return err;
14793}
14794
14795#ifdef CONFIG_SPARC
14796static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14797{
14798	struct net_device *dev = tp->dev;
14799	struct pci_dev *pdev = tp->pdev;
14800	struct device_node *dp = pci_device_to_OF_node(pdev);
14801	const unsigned char *addr;
14802	int len;
14803
14804	addr = of_get_property(dp, "local-mac-address", &len);
14805	if (addr && len == 6) {
14806		memcpy(dev->dev_addr, addr, 6);
14807		memcpy(dev->perm_addr, dev->dev_addr, 6);
14808		return 0;
14809	}
14810	return -ENODEV;
14811}
14812
14813static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14814{
14815	struct net_device *dev = tp->dev;
14816
14817	memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14818	memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14819	return 0;
14820}
14821#endif
14822
14823static int __devinit tg3_get_device_address(struct tg3 *tp)
14824{
14825	struct net_device *dev = tp->dev;
14826	u32 hi, lo, mac_offset;
14827	int addr_ok = 0;
14828
14829#ifdef CONFIG_SPARC
14830	if (!tg3_get_macaddr_sparc(tp))
14831		return 0;
14832#endif
14833
14834	mac_offset = 0x7c;
14835	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14836	    tg3_flag(tp, 5780_CLASS)) {
14837		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14838			mac_offset = 0xcc;
14839		if (tg3_nvram_lock(tp))
14840			tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14841		else
14842			tg3_nvram_unlock(tp);
14843	} else if (tg3_flag(tp, 5717_PLUS)) {
14844		if (tp->pci_fn & 1)
14845			mac_offset = 0xcc;
14846		if (tp->pci_fn > 1)
14847			mac_offset += 0x18c;
14848	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14849		mac_offset = 0x10;
14850
14851	/* First try to get it from MAC address mailbox. */
14852	tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14853	if ((hi >> 16) == 0x484b) {
14854		dev->dev_addr[0] = (hi >>  8) & 0xff;
14855		dev->dev_addr[1] = (hi >>  0) & 0xff;
14856
14857		tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14858		dev->dev_addr[2] = (lo >> 24) & 0xff;
14859		dev->dev_addr[3] = (lo >> 16) & 0xff;
14860		dev->dev_addr[4] = (lo >>  8) & 0xff;
14861		dev->dev_addr[5] = (lo >>  0) & 0xff;
14862
14863		/* Some old bootcode may report a 0 MAC address in SRAM */
14864		addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14865	}
14866	if (!addr_ok) {
14867		/* Next, try NVRAM. */
14868		if (!tg3_flag(tp, NO_NVRAM) &&
14869		    !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14870		    !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14871			memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14872			memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14873		}
14874		/* Finally just fetch it out of the MAC control regs. */
14875		else {
14876			hi = tr32(MAC_ADDR_0_HIGH);
14877			lo = tr32(MAC_ADDR_0_LOW);
14878
14879			dev->dev_addr[5] = lo & 0xff;
14880			dev->dev_addr[4] = (lo >> 8) & 0xff;
14881			dev->dev_addr[3] = (lo >> 16) & 0xff;
14882			dev->dev_addr[2] = (lo >> 24) & 0xff;
14883			dev->dev_addr[1] = hi & 0xff;
14884			dev->dev_addr[0] = (hi >> 8) & 0xff;
14885		}
14886	}
14887
14888	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14889#ifdef CONFIG_SPARC
14890		if (!tg3_get_default_macaddr_sparc(tp))
14891			return 0;
14892#endif
14893		return -EINVAL;
14894	}
14895	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14896	return 0;
14897}
14898
14899#define BOUNDARY_SINGLE_CACHELINE	1
14900#define BOUNDARY_MULTI_CACHELINE	2
14901
14902static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14903{
14904	int cacheline_size;
14905	u8 byte;
14906	int goal;
14907
14908	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14909	if (byte == 0)
14910		cacheline_size = 1024;
14911	else
14912		cacheline_size = (int) byte * 4;
14913
14914	/* On 5703 and later chips, the boundary bits have no
14915	 * effect.
14916	 */
14917	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14918	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14919	    !tg3_flag(tp, PCI_EXPRESS))
14920		goto out;
14921
14922#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14923	goal = BOUNDARY_MULTI_CACHELINE;
14924#else
14925#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14926	goal = BOUNDARY_SINGLE_CACHELINE;
14927#else
14928	goal = 0;
14929#endif
14930#endif
14931
14932	if (tg3_flag(tp, 57765_PLUS)) {
14933		val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14934		goto out;
14935	}
14936
14937	if (!goal)
14938		goto out;
14939
14940	/* PCI controllers on most RISC systems tend to disconnect
14941	 * when a device tries to burst across a cache-line boundary.
14942	 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14943	 *
14944	 * Unfortunately, for PCI-E there are only limited
14945	 * write-side controls for this, and thus for reads
14946	 * we will still get the disconnects.  We'll also waste
14947	 * these PCI cycles for both read and write for chips
14948	 * other than 5700 and 5701 which do not implement the
14949	 * boundary bits.
14950	 */
14951	if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14952		switch (cacheline_size) {
14953		case 16:
14954		case 32:
14955		case 64:
14956		case 128:
14957			if (goal == BOUNDARY_SINGLE_CACHELINE) {
14958				val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14959					DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14960			} else {
14961				val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14962					DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14963			}
14964			break;
14965
14966		case 256:
14967			val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14968				DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14969			break;
14970
14971		default:
14972			val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14973				DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14974			break;
14975		}
14976	} else if (tg3_flag(tp, PCI_EXPRESS)) {
14977		switch (cacheline_size) {
14978		case 16:
14979		case 32:
14980		case 64:
14981			if (goal == BOUNDARY_SINGLE_CACHELINE) {
14982				val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14983				val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14984				break;
14985			}
14986			/* fallthrough */
14987		case 128:
14988		default:
14989			val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14990			val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14991			break;
14992		}
14993	} else {
14994		switch (cacheline_size) {
14995		case 16:
14996			if (goal == BOUNDARY_SINGLE_CACHELINE) {
14997				val |= (DMA_RWCTRL_READ_BNDRY_16 |
14998					DMA_RWCTRL_WRITE_BNDRY_16);
14999				break;
15000			}
15001			/* fallthrough */
15002		case 32:
15003			if (goal == BOUNDARY_SINGLE_CACHELINE) {
15004				val |= (DMA_RWCTRL_READ_BNDRY_32 |
15005					DMA_RWCTRL_WRITE_BNDRY_32);
15006				break;
15007			}
15008			/* fallthrough */
15009		case 64:
15010			if (goal == BOUNDARY_SINGLE_CACHELINE) {
15011				val |= (DMA_RWCTRL_READ_BNDRY_64 |
15012					DMA_RWCTRL_WRITE_BNDRY_64);
15013				break;
15014			}
15015			/* fallthrough */
15016		case 128:
15017			if (goal == BOUNDARY_SINGLE_CACHELINE) {
15018				val |= (DMA_RWCTRL_READ_BNDRY_128 |
15019					DMA_RWCTRL_WRITE_BNDRY_128);
15020				break;
15021			}
15022			/* fallthrough */
15023		case 256:
15024			val |= (DMA_RWCTRL_READ_BNDRY_256 |
15025				DMA_RWCTRL_WRITE_BNDRY_256);
15026			break;
15027		case 512:
15028			val |= (DMA_RWCTRL_READ_BNDRY_512 |
15029				DMA_RWCTRL_WRITE_BNDRY_512);
15030			break;
15031		case 1024:
15032		default:
15033			val |= (DMA_RWCTRL_READ_BNDRY_1024 |
15034				DMA_RWCTRL_WRITE_BNDRY_1024);
15035			break;
15036		}
15037	}
15038
15039out:
15040	return val;
15041}
15042
15043static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
15044{
15045	struct tg3_internal_buffer_desc test_desc;
15046	u32 sram_dma_descs;
15047	int i, ret;
15048
15049	sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
15050
15051	tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15052	tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15053	tw32(RDMAC_STATUS, 0);
15054	tw32(WDMAC_STATUS, 0);
15055
15056	tw32(BUFMGR_MODE, 0);
15057	tw32(FTQ_RESET, 0);
15058
15059	test_desc.addr_hi = ((u64) buf_dma) >> 32;
15060	test_desc.addr_lo = buf_dma & 0xffffffff;
15061	test_desc.nic_mbuf = 0x00002100;
15062	test_desc.len = size;
15063
15064	/*
15065	 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15066	 * the *second* time the tg3 driver was getting loaded after an
15067	 * initial scan.
15068	 *
15069	 * Broadcom tells me:
15070	 *   ...the DMA engine is connected to the GRC block and a DMA
15071	 *   reset may affect the GRC block in some unpredictable way...
15072	 *   The behavior of resets to individual blocks has not been tested.
15073	 *
15074	 * Broadcom noted the GRC reset will also reset all sub-components.
15075	 */
15076	if (to_device) {
15077		test_desc.cqid_sqid = (13 << 8) | 2;
15078
15079		tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15080		udelay(40);
15081	} else {
15082		test_desc.cqid_sqid = (16 << 8) | 7;
15083
15084		tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15085		udelay(40);
15086	}
15087	test_desc.flags = 0x00000005;
15088
15089	for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15090		u32 val;
15091
15092		val = *(((u32 *)&test_desc) + i);
15093		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15094				       sram_dma_descs + (i * sizeof(u32)));
15095		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15096	}
15097	pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15098
15099	if (to_device)
15100		tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15101	else
15102		tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15103
15104	ret = -ENODEV;
15105	for (i = 0; i < 40; i++) {
15106		u32 val;
15107
15108		if (to_device)
15109			val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15110		else
15111			val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15112		if ((val & 0xffff) == sram_dma_descs) {
15113			ret = 0;
15114			break;
15115		}
15116
15117		udelay(100);
15118	}
15119
15120	return ret;
15121}
15122
15123#define TEST_BUFFER_SIZE	0x2000
15124
15125static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15126	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15127	{ },
15128};
15129
15130static int __devinit tg3_test_dma(struct tg3 *tp)
15131{
15132	dma_addr_t buf_dma;
15133	u32 *buf, saved_dma_rwctrl;
15134	int ret = 0;
15135
15136	buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15137				 &buf_dma, GFP_KERNEL);
15138	if (!buf) {
15139		ret = -ENOMEM;
15140		goto out_nofree;
15141	}
15142
15143	tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15144			  (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15145
15146	tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15147
15148	if (tg3_flag(tp, 57765_PLUS))
15149		goto out;
15150
15151	if (tg3_flag(tp, PCI_EXPRESS)) {
15152		/* DMA read watermark not used on PCIE */
15153		tp->dma_rwctrl |= 0x00180000;
15154	} else if (!tg3_flag(tp, PCIX_MODE)) {
15155		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15156		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15157			tp->dma_rwctrl |= 0x003f0000;
15158		else
15159			tp->dma_rwctrl |= 0x003f000f;
15160	} else {
15161		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15162		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15163			u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15164			u32 read_water = 0x7;
15165
15166			/* If the 5704 is behind the EPB bridge, we can
15167			 * do the less restrictive ONE_DMA workaround for
15168			 * better performance.
15169			 */
15170			if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15171			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15172				tp->dma_rwctrl |= 0x8000;
15173			else if (ccval == 0x6 || ccval == 0x7)
15174				tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15175
15176			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15177				read_water = 4;
15178			/* Set bit 23 to enable PCIX hw bug fix */
15179			tp->dma_rwctrl |=
15180				(read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15181				(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15182				(1 << 23);
15183		} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15184			/* 5780 always in PCIX mode */
15185			tp->dma_rwctrl |= 0x00144000;
15186		} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15187			/* 5714 always in PCIX mode */
15188			tp->dma_rwctrl |= 0x00148000;
15189		} else {
15190			tp->dma_rwctrl |= 0x001b000f;
15191		}
15192	}
15193
15194	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15195	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15196		tp->dma_rwctrl &= 0xfffffff0;
15197
15198	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15199	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15200		/* Remove this if it causes problems for some boards. */
15201		tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15202
15203		/* On 5700/5701 chips, we need to set this bit.
15204		 * Otherwise the chip will issue cacheline transactions
15205		 * to streamable DMA memory with not all the byte
15206		 * enables turned on.  This is an error on several
15207		 * RISC PCI controllers, in particular sparc64.
15208		 *
15209		 * On 5703/5704 chips, this bit has been reassigned
15210		 * a different meaning.  In particular, it is used
15211		 * on those chips to enable a PCI-X workaround.
15212		 */
15213		tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15214	}
15215
15216	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15217
15218#if 0
15219	/* Unneeded, already done by tg3_get_invariants.  */
15220	tg3_switch_clocks(tp);
15221#endif
15222
15223	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15224	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15225		goto out;
15226
15227	/* It is best to perform DMA test with maximum write burst size
15228	 * to expose the 5700/5701 write DMA bug.
15229	 */
15230	saved_dma_rwctrl = tp->dma_rwctrl;
15231	tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15232	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15233
15234	while (1) {
15235		u32 *p = buf, i;
15236
15237		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15238			p[i] = i;
15239
15240		/* Send the buffer to the chip. */
15241		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15242		if (ret) {
15243			dev_err(&tp->pdev->dev,
15244				"%s: Buffer write failed. err = %d\n",
15245				__func__, ret);
15246			break;
15247		}
15248
15249#if 0
15250		/* validate data reached card RAM correctly. */
15251		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15252			u32 val;
15253			tg3_read_mem(tp, 0x2100 + (i*4), &val);
15254			if (le32_to_cpu(val) != p[i]) {
15255				dev_err(&tp->pdev->dev,
15256					"%s: Buffer corrupted on device! "
15257					"(%d != %d)\n", __func__, val, i);
15258				/* ret = -ENODEV here? */
15259			}
15260			p[i] = 0;
15261		}
15262#endif
15263		/* Now read it back. */
15264		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15265		if (ret) {
15266			dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15267				"err = %d\n", __func__, ret);
15268			break;
15269		}
15270
15271		/* Verify it. */
15272		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15273			if (p[i] == i)
15274				continue;
15275
15276			if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15277			    DMA_RWCTRL_WRITE_BNDRY_16) {
15278				tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15279				tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15280				tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15281				break;
15282			} else {
15283				dev_err(&tp->pdev->dev,
15284					"%s: Buffer corrupted on read back! "
15285					"(%d != %d)\n", __func__, p[i], i);
15286				ret = -ENODEV;
15287				goto out;
15288			}
15289		}
15290
15291		if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15292			/* Success. */
15293			ret = 0;
15294			break;
15295		}
15296	}
15297	if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15298	    DMA_RWCTRL_WRITE_BNDRY_16) {
15299		/* DMA test passed without adjusting DMA boundary,
15300		 * now look for chipsets that are known to expose the
15301		 * DMA bug without failing the test.
15302		 */
15303		if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15304			tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15305			tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15306		} else {
15307			/* Safe to use the calculated DMA boundary. */
15308			tp->dma_rwctrl = saved_dma_rwctrl;
15309		}
15310
15311		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15312	}
15313
15314out:
15315	dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15316out_nofree:
15317	return ret;
15318}
15319
15320static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15321{
15322	if (tg3_flag(tp, 57765_PLUS)) {
15323		tp->bufmgr_config.mbuf_read_dma_low_water =
15324			DEFAULT_MB_RDMA_LOW_WATER_5705;
15325		tp->bufmgr_config.mbuf_mac_rx_low_water =
15326			DEFAULT_MB_MACRX_LOW_WATER_57765;
15327		tp->bufmgr_config.mbuf_high_water =
15328			DEFAULT_MB_HIGH_WATER_57765;
15329
15330		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15331			DEFAULT_MB_RDMA_LOW_WATER_5705;
15332		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15333			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15334		tp->bufmgr_config.mbuf_high_water_jumbo =
15335			DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15336	} else if (tg3_flag(tp, 5705_PLUS)) {
15337		tp->bufmgr_config.mbuf_read_dma_low_water =
15338			DEFAULT_MB_RDMA_LOW_WATER_5705;
15339		tp->bufmgr_config.mbuf_mac_rx_low_water =
15340			DEFAULT_MB_MACRX_LOW_WATER_5705;
15341		tp->bufmgr_config.mbuf_high_water =
15342			DEFAULT_MB_HIGH_WATER_5705;
15343		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15344			tp->bufmgr_config.mbuf_mac_rx_low_water =
15345				DEFAULT_MB_MACRX_LOW_WATER_5906;
15346			tp->bufmgr_config.mbuf_high_water =
15347				DEFAULT_MB_HIGH_WATER_5906;
15348		}
15349
15350		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15351			DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15352		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15353			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15354		tp->bufmgr_config.mbuf_high_water_jumbo =
15355			DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15356	} else {
15357		tp->bufmgr_config.mbuf_read_dma_low_water =
15358			DEFAULT_MB_RDMA_LOW_WATER;
15359		tp->bufmgr_config.mbuf_mac_rx_low_water =
15360			DEFAULT_MB_MACRX_LOW_WATER;
15361		tp->bufmgr_config.mbuf_high_water =
15362			DEFAULT_MB_HIGH_WATER;
15363
15364		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15365			DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15366		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15367			DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15368		tp->bufmgr_config.mbuf_high_water_jumbo =
15369			DEFAULT_MB_HIGH_WATER_JUMBO;
15370	}
15371
15372	tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15373	tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15374}
15375
15376static char * __devinit tg3_phy_string(struct tg3 *tp)
15377{
15378	switch (tp->phy_id & TG3_PHY_ID_MASK) {
15379	case TG3_PHY_ID_BCM5400:	return "5400";
15380	case TG3_PHY_ID_BCM5401:	return "5401";
15381	case TG3_PHY_ID_BCM5411:	return "5411";
15382	case TG3_PHY_ID_BCM5701:	return "5701";
15383	case TG3_PHY_ID_BCM5703:	return "5703";
15384	case TG3_PHY_ID_BCM5704:	return "5704";
15385	case TG3_PHY_ID_BCM5705:	return "5705";
15386	case TG3_PHY_ID_BCM5750:	return "5750";
15387	case TG3_PHY_ID_BCM5752:	return "5752";
15388	case TG3_PHY_ID_BCM5714:	return "5714";
15389	case TG3_PHY_ID_BCM5780:	return "5780";
15390	case TG3_PHY_ID_BCM5755:	return "5755";
15391	case TG3_PHY_ID_BCM5787:	return "5787";
15392	case TG3_PHY_ID_BCM5784:	return "5784";
15393	case TG3_PHY_ID_BCM5756:	return "5722/5756";
15394	case TG3_PHY_ID_BCM5906:	return "5906";
15395	case TG3_PHY_ID_BCM5761:	return "5761";
15396	case TG3_PHY_ID_BCM5718C:	return "5718C";
15397	case TG3_PHY_ID_BCM5718S:	return "5718S";
15398	case TG3_PHY_ID_BCM57765:	return "57765";
15399	case TG3_PHY_ID_BCM5719C:	return "5719C";
15400	case TG3_PHY_ID_BCM5720C:	return "5720C";
15401	case TG3_PHY_ID_BCM8002:	return "8002/serdes";
15402	case 0:			return "serdes";
15403	default:		return "unknown";
15404	}
15405}
15406
15407static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15408{
15409	if (tg3_flag(tp, PCI_EXPRESS)) {
15410		strcpy(str, "PCI Express");
15411		return str;
15412	} else if (tg3_flag(tp, PCIX_MODE)) {
15413		u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15414
15415		strcpy(str, "PCIX:");
15416
15417		if ((clock_ctrl == 7) ||
15418		    ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15419		     GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15420			strcat(str, "133MHz");
15421		else if (clock_ctrl == 0)
15422			strcat(str, "33MHz");
15423		else if (clock_ctrl == 2)
15424			strcat(str, "50MHz");
15425		else if (clock_ctrl == 4)
15426			strcat(str, "66MHz");
15427		else if (clock_ctrl == 6)
15428			strcat(str, "100MHz");
15429	} else {
15430		strcpy(str, "PCI:");
15431		if (tg3_flag(tp, PCI_HIGH_SPEED))
15432			strcat(str, "66MHz");
15433		else
15434			strcat(str, "33MHz");
15435	}
15436	if (tg3_flag(tp, PCI_32BIT))
15437		strcat(str, ":32-bit");
15438	else
15439		strcat(str, ":64-bit");
15440	return str;
15441}
15442
15443static void __devinit tg3_init_coal(struct tg3 *tp)
15444{
15445	struct ethtool_coalesce *ec = &tp->coal;
15446
15447	memset(ec, 0, sizeof(*ec));
15448	ec->cmd = ETHTOOL_GCOALESCE;
15449	ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15450	ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15451	ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15452	ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15453	ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15454	ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15455	ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15456	ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15457	ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15458
15459	if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15460				 HOSTCC_MODE_CLRTICK_TXBD)) {
15461		ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15462		ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15463		ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15464		ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15465	}
15466
15467	if (tg3_flag(tp, 5705_PLUS)) {
15468		ec->rx_coalesce_usecs_irq = 0;
15469		ec->tx_coalesce_usecs_irq = 0;
15470		ec->stats_block_coalesce_usecs = 0;
15471	}
15472}
15473
15474static int __devinit tg3_init_one(struct pci_dev *pdev,
15475				  const struct pci_device_id *ent)
15476{
15477	struct net_device *dev;
15478	struct tg3 *tp;
15479	int i, err, pm_cap;
15480	u32 sndmbx, rcvmbx, intmbx;
15481	char str[40];
15482	u64 dma_mask, persist_dma_mask;
15483	netdev_features_t features = 0;
15484
15485	printk_once(KERN_INFO "%s\n", version);
15486
15487	err = pci_enable_device(pdev);
15488	if (err) {
15489		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15490		return err;
15491	}
15492
15493	err = pci_request_regions(pdev, DRV_MODULE_NAME);
15494	if (err) {
15495		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15496		goto err_out_disable_pdev;
15497	}
15498
15499	pci_set_master(pdev);
15500
15501	/* Find power-management capability. */
15502	pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15503	if (pm_cap == 0) {
15504		dev_err(&pdev->dev,
15505			"Cannot find Power Management capability, aborting\n");
15506		err = -EIO;
15507		goto err_out_free_res;
15508	}
15509
15510	err = pci_set_power_state(pdev, PCI_D0);
15511	if (err) {
15512		dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15513		goto err_out_free_res;
15514	}
15515
15516	dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15517	if (!dev) {
15518		err = -ENOMEM;
15519		goto err_out_power_down;
15520	}
15521
15522	SET_NETDEV_DEV(dev, &pdev->dev);
15523
15524	tp = netdev_priv(dev);
15525	tp->pdev = pdev;
15526	tp->dev = dev;
15527	tp->pm_cap = pm_cap;
15528	tp->rx_mode = TG3_DEF_RX_MODE;
15529	tp->tx_mode = TG3_DEF_TX_MODE;
15530
15531	if (tg3_debug > 0)
15532		tp->msg_enable = tg3_debug;
15533	else
15534		tp->msg_enable = TG3_DEF_MSG_ENABLE;
15535
15536	/* The word/byte swap controls here control register access byte
15537	 * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15538	 * setting below.
15539	 */
15540	tp->misc_host_ctrl =
15541		MISC_HOST_CTRL_MASK_PCI_INT |
15542		MISC_HOST_CTRL_WORD_SWAP |
15543		MISC_HOST_CTRL_INDIR_ACCESS |
15544		MISC_HOST_CTRL_PCISTATE_RW;
15545
15546	/* The NONFRM (non-frame) byte/word swap controls take effect
15547	 * on descriptor entries, anything which isn't packet data.
15548	 *
15549	 * The StrongARM chips on the board (one for tx, one for rx)
15550	 * are running in big-endian mode.
15551	 */
15552	tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15553			GRC_MODE_WSWAP_NONFRM_DATA);
15554#ifdef __BIG_ENDIAN
15555	tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15556#endif
15557	spin_lock_init(&tp->lock);
15558	spin_lock_init(&tp->indirect_lock);
15559	INIT_WORK(&tp->reset_task, tg3_reset_task);
15560
15561	tp->regs = pci_ioremap_bar(pdev, BAR_0);
15562	if (!tp->regs) {
15563		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15564		err = -ENOMEM;
15565		goto err_out_free_dev;
15566	}
15567
15568	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15569	    tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15570	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15571	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15572	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15573	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15574	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15575	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15576		tg3_flag_set(tp, ENABLE_APE);
15577		tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15578		if (!tp->aperegs) {
15579			dev_err(&pdev->dev,
15580				"Cannot map APE registers, aborting\n");
15581			err = -ENOMEM;
15582			goto err_out_iounmap;
15583		}
15584	}
15585
15586	tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15587	tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15588
15589	dev->ethtool_ops = &tg3_ethtool_ops;
15590	dev->watchdog_timeo = TG3_TX_TIMEOUT;
15591	dev->netdev_ops = &tg3_netdev_ops;
15592	dev->irq = pdev->irq;
15593
15594	err = tg3_get_invariants(tp);
15595	if (err) {
15596		dev_err(&pdev->dev,
15597			"Problem fetching invariants of chip, aborting\n");
15598		goto err_out_apeunmap;
15599	}
15600
15601	/* The EPB bridge inside 5714, 5715, and 5780 and any
15602	 * device behind the EPB cannot support DMA addresses > 40-bit.
15603	 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15604	 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15605	 * do DMA address check in tg3_start_xmit().
15606	 */
15607	if (tg3_flag(tp, IS_5788))
15608		persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15609	else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15610		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15611#ifdef CONFIG_HIGHMEM
15612		dma_mask = DMA_BIT_MASK(64);
15613#endif
15614	} else
15615		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15616
15617	/* Configure DMA attributes. */
15618	if (dma_mask > DMA_BIT_MASK(32)) {
15619		err = pci_set_dma_mask(pdev, dma_mask);
15620		if (!err) {
15621			features |= NETIF_F_HIGHDMA;
15622			err = pci_set_consistent_dma_mask(pdev,
15623							  persist_dma_mask);
15624			if (err < 0) {
15625				dev_err(&pdev->dev, "Unable to obtain 64 bit "
15626					"DMA for consistent allocations\n");
15627				goto err_out_apeunmap;
15628			}
15629		}
15630	}
15631	if (err || dma_mask == DMA_BIT_MASK(32)) {
15632		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15633		if (err) {
15634			dev_err(&pdev->dev,
15635				"No usable DMA configuration, aborting\n");
15636			goto err_out_apeunmap;
15637		}
15638	}
15639
15640	tg3_init_bufmgr_config(tp);
15641
15642	features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15643
15644	/* 5700 B0 chips do not support checksumming correctly due
15645	 * to hardware bugs.
15646	 */
15647	if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15648		features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15649
15650		if (tg3_flag(tp, 5755_PLUS))
15651			features |= NETIF_F_IPV6_CSUM;
15652	}
15653
15654	/* TSO is on by default on chips that support hardware TSO.
15655	 * Firmware TSO on older chips gives lower performance, so it
15656	 * is off by default, but can be enabled using ethtool.
15657	 */
15658	if ((tg3_flag(tp, HW_TSO_1) ||
15659	     tg3_flag(tp, HW_TSO_2) ||
15660	     tg3_flag(tp, HW_TSO_3)) &&
15661	    (features & NETIF_F_IP_CSUM))
15662		features |= NETIF_F_TSO;
15663	if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15664		if (features & NETIF_F_IPV6_CSUM)
15665			features |= NETIF_F_TSO6;
15666		if (tg3_flag(tp, HW_TSO_3) ||
15667		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15668		    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15669		     GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15670		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15671		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15672			features |= NETIF_F_TSO_ECN;
15673	}
15674
15675	dev->features |= features;
15676	dev->vlan_features |= features;
15677
15678	/*
15679	 * Add loopback capability only for a subset of devices that support
15680	 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15681	 * loopback for the remaining devices.
15682	 */
15683	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15684	    !tg3_flag(tp, CPMU_PRESENT))
15685		/* Add the loopback capability */
15686		features |= NETIF_F_LOOPBACK;
15687
15688	dev->hw_features |= features;
15689
15690	if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15691	    !tg3_flag(tp, TSO_CAPABLE) &&
15692	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15693		tg3_flag_set(tp, MAX_RXPEND_64);
15694		tp->rx_pending = 63;
15695	}
15696
15697	err = tg3_get_device_address(tp);
15698	if (err) {
15699		dev_err(&pdev->dev,
15700			"Could not obtain valid ethernet address, aborting\n");
15701		goto err_out_apeunmap;
15702	}
15703
15704	/*
15705	 * Reset chip in case UNDI or EFI driver did not shutdown
15706	 * DMA self test will enable WDMAC and we'll see (spurious)
15707	 * pending DMA on the PCI bus at that point.
15708	 */
15709	if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15710	    (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15711		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15712		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15713	}
15714
15715	err = tg3_test_dma(tp);
15716	if (err) {
15717		dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15718		goto err_out_apeunmap;
15719	}
15720
15721	intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15722	rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15723	sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15724	for (i = 0; i < tp->irq_max; i++) {
15725		struct tg3_napi *tnapi = &tp->napi[i];
15726
15727		tnapi->tp = tp;
15728		tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15729
15730		tnapi->int_mbox = intmbx;
15731		if (i <= 4)
15732			intmbx += 0x8;
15733		else
15734			intmbx += 0x4;
15735
15736		tnapi->consmbox = rcvmbx;
15737		tnapi->prodmbox = sndmbx;
15738
15739		if (i)
15740			tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15741		else
15742			tnapi->coal_now = HOSTCC_MODE_NOW;
15743
15744		if (!tg3_flag(tp, SUPPORT_MSIX))
15745			break;
15746
15747		/*
15748		 * If we support MSIX, we'll be using RSS.  If we're using
15749		 * RSS, the first vector only handles link interrupts and the
15750		 * remaining vectors handle rx and tx interrupts.  Reuse the
15751		 * mailbox values for the next iteration.  The values we setup
15752		 * above are still useful for the single vectored mode.
15753		 */
15754		if (!i)
15755			continue;
15756
15757		rcvmbx += 0x8;
15758
15759		if (sndmbx & 0x4)
15760			sndmbx -= 0x4;
15761		else
15762			sndmbx += 0xc;
15763	}
15764
15765	tg3_init_coal(tp);
15766
15767	pci_set_drvdata(pdev, dev);
15768
15769	if (tg3_flag(tp, 5717_PLUS)) {
15770		/* Resume a low-power mode */
15771		tg3_frob_aux_power(tp, false);
15772	}
15773
15774	tg3_timer_init(tp);
15775
15776	err = register_netdev(dev);
15777	if (err) {
15778		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15779		goto err_out_apeunmap;
15780	}
15781
15782	netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15783		    tp->board_part_number,
15784		    tp->pci_chip_rev_id,
15785		    tg3_bus_string(tp, str),
15786		    dev->dev_addr);
15787
15788	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15789		struct phy_device *phydev;
15790		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15791		netdev_info(dev,
15792			    "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15793			    phydev->drv->name, dev_name(&phydev->dev));
15794	} else {
15795		char *ethtype;
15796
15797		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15798			ethtype = "10/100Base-TX";
15799		else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15800			ethtype = "1000Base-SX";
15801		else
15802			ethtype = "10/100/1000Base-T";
15803
15804		netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15805			    "(WireSpeed[%d], EEE[%d])\n",
15806			    tg3_phy_string(tp), ethtype,
15807			    (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15808			    (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15809	}
15810
15811	netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15812		    (dev->features & NETIF_F_RXCSUM) != 0,
15813		    tg3_flag(tp, USE_LINKCHG_REG) != 0,
15814		    (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15815		    tg3_flag(tp, ENABLE_ASF) != 0,
15816		    tg3_flag(tp, TSO_CAPABLE) != 0);
15817	netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15818		    tp->dma_rwctrl,
15819		    pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15820		    ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15821
15822	pci_save_state(pdev);
15823
15824	return 0;
15825
15826err_out_apeunmap:
15827	if (tp->aperegs) {
15828		iounmap(tp->aperegs);
15829		tp->aperegs = NULL;
15830	}
15831
15832err_out_iounmap:
15833	if (tp->regs) {
15834		iounmap(tp->regs);
15835		tp->regs = NULL;
15836	}
15837
15838err_out_free_dev:
15839	free_netdev(dev);
15840
15841err_out_power_down:
15842	pci_set_power_state(pdev, PCI_D3hot);
15843
15844err_out_free_res:
15845	pci_release_regions(pdev);
15846
15847err_out_disable_pdev:
15848	pci_disable_device(pdev);
15849	pci_set_drvdata(pdev, NULL);
15850	return err;
15851}
15852
15853static void __devexit tg3_remove_one(struct pci_dev *pdev)
15854{
15855	struct net_device *dev = pci_get_drvdata(pdev);
15856
15857	if (dev) {
15858		struct tg3 *tp = netdev_priv(dev);
15859
15860		if (tp->fw)
15861			release_firmware(tp->fw);
15862
15863		tg3_reset_task_cancel(tp);
15864
15865		if (tg3_flag(tp, USE_PHYLIB)) {
15866			tg3_phy_fini(tp);
15867			tg3_mdio_fini(tp);
15868		}
15869
15870		unregister_netdev(dev);
15871		if (tp->aperegs) {
15872			iounmap(tp->aperegs);
15873			tp->aperegs = NULL;
15874		}
15875		if (tp->regs) {
15876			iounmap(tp->regs);
15877			tp->regs = NULL;
15878		}
15879		free_netdev(dev);
15880		pci_release_regions(pdev);
15881		pci_disable_device(pdev);
15882		pci_set_drvdata(pdev, NULL);
15883	}
15884}
15885
15886#ifdef CONFIG_PM_SLEEP
15887static int tg3_suspend(struct device *device)
15888{
15889	struct pci_dev *pdev = to_pci_dev(device);
15890	struct net_device *dev = pci_get_drvdata(pdev);
15891	struct tg3 *tp = netdev_priv(dev);
15892	int err;
15893
15894	if (!netif_running(dev))
15895		return 0;
15896
15897	tg3_reset_task_cancel(tp);
15898	tg3_phy_stop(tp);
15899	tg3_netif_stop(tp);
15900
15901	tg3_timer_stop(tp);
15902
15903	tg3_full_lock(tp, 1);
15904	tg3_disable_ints(tp);
15905	tg3_full_unlock(tp);
15906
15907	netif_device_detach(dev);
15908
15909	tg3_full_lock(tp, 0);
15910	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15911	tg3_flag_clear(tp, INIT_COMPLETE);
15912	tg3_full_unlock(tp);
15913
15914	err = tg3_power_down_prepare(tp);
15915	if (err) {
15916		int err2;
15917
15918		tg3_full_lock(tp, 0);
15919
15920		tg3_flag_set(tp, INIT_COMPLETE);
15921		err2 = tg3_restart_hw(tp, 1);
15922		if (err2)
15923			goto out;
15924
15925		tg3_timer_start(tp);
15926
15927		netif_device_attach(dev);
15928		tg3_netif_start(tp);
15929
15930out:
15931		tg3_full_unlock(tp);
15932
15933		if (!err2)
15934			tg3_phy_start(tp);
15935	}
15936
15937	return err;
15938}
15939
15940static int tg3_resume(struct device *device)
15941{
15942	struct pci_dev *pdev = to_pci_dev(device);
15943	struct net_device *dev = pci_get_drvdata(pdev);
15944	struct tg3 *tp = netdev_priv(dev);
15945	int err;
15946
15947	if (!netif_running(dev))
15948		return 0;
15949
15950	netif_device_attach(dev);
15951
15952	tg3_full_lock(tp, 0);
15953
15954	tg3_flag_set(tp, INIT_COMPLETE);
15955	err = tg3_restart_hw(tp, 1);
15956	if (err)
15957		goto out;
15958
15959	tg3_timer_start(tp);
15960
15961	tg3_netif_start(tp);
15962
15963out:
15964	tg3_full_unlock(tp);
15965
15966	if (!err)
15967		tg3_phy_start(tp);
15968
15969	return err;
15970}
15971
15972static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15973#define TG3_PM_OPS (&tg3_pm_ops)
15974
15975#else
15976
15977#define TG3_PM_OPS NULL
15978
15979#endif /* CONFIG_PM_SLEEP */
15980
15981/**
15982 * tg3_io_error_detected - called when PCI error is detected
15983 * @pdev: Pointer to PCI device
15984 * @state: The current pci connection state
15985 *
15986 * This function is called after a PCI bus error affecting
15987 * this device has been detected.
15988 */
15989static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15990					      pci_channel_state_t state)
15991{
15992	struct net_device *netdev = pci_get_drvdata(pdev);
15993	struct tg3 *tp = netdev_priv(netdev);
15994	pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15995
15996	netdev_info(netdev, "PCI I/O error detected\n");
15997
15998	rtnl_lock();
15999
16000	if (!netif_running(netdev))
16001		goto done;
16002
16003	tg3_phy_stop(tp);
16004
16005	tg3_netif_stop(tp);
16006
16007	tg3_timer_stop(tp);
16008
16009	/* Want to make sure that the reset task doesn't run */
16010	tg3_reset_task_cancel(tp);
16011
16012	netif_device_detach(netdev);
16013
16014	/* Clean up software state, even if MMIO is blocked */
16015	tg3_full_lock(tp, 0);
16016	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
16017	tg3_full_unlock(tp);
16018
16019done:
16020	if (state == pci_channel_io_perm_failure)
16021		err = PCI_ERS_RESULT_DISCONNECT;
16022	else
16023		pci_disable_device(pdev);
16024
16025	rtnl_unlock();
16026
16027	return err;
16028}
16029
16030/**
16031 * tg3_io_slot_reset - called after the pci bus has been reset.
16032 * @pdev: Pointer to PCI device
16033 *
16034 * Restart the card from scratch, as if from a cold-boot.
16035 * At this point, the card has exprienced a hard reset,
16036 * followed by fixups by BIOS, and has its config space
16037 * set up identically to what it was at cold boot.
16038 */
16039static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
16040{
16041	struct net_device *netdev = pci_get_drvdata(pdev);
16042	struct tg3 *tp = netdev_priv(netdev);
16043	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16044	int err;
16045
16046	rtnl_lock();
16047
16048	if (pci_enable_device(pdev)) {
16049		netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16050		goto done;
16051	}
16052
16053	pci_set_master(pdev);
16054	pci_restore_state(pdev);
16055	pci_save_state(pdev);
16056
16057	if (!netif_running(netdev)) {
16058		rc = PCI_ERS_RESULT_RECOVERED;
16059		goto done;
16060	}
16061
16062	err = tg3_power_up(tp);
16063	if (err)
16064		goto done;
16065
16066	rc = PCI_ERS_RESULT_RECOVERED;
16067
16068done:
16069	rtnl_unlock();
16070
16071	return rc;
16072}
16073
16074/**
16075 * tg3_io_resume - called when traffic can start flowing again.
16076 * @pdev: Pointer to PCI device
16077 *
16078 * This callback is called when the error recovery driver tells
16079 * us that its OK to resume normal operation.
16080 */
16081static void tg3_io_resume(struct pci_dev *pdev)
16082{
16083	struct net_device *netdev = pci_get_drvdata(pdev);
16084	struct tg3 *tp = netdev_priv(netdev);
16085	int err;
16086
16087	rtnl_lock();
16088
16089	if (!netif_running(netdev))
16090		goto done;
16091
16092	tg3_full_lock(tp, 0);
16093	tg3_flag_set(tp, INIT_COMPLETE);
16094	err = tg3_restart_hw(tp, 1);
16095	tg3_full_unlock(tp);
16096	if (err) {
16097		netdev_err(netdev, "Cannot restart hardware after reset.\n");
16098		goto done;
16099	}
16100
16101	netif_device_attach(netdev);
16102
16103	tg3_timer_start(tp);
16104
16105	tg3_netif_start(tp);
16106
16107	tg3_phy_start(tp);
16108
16109done:
16110	rtnl_unlock();
16111}
16112
16113static struct pci_error_handlers tg3_err_handler = {
16114	.error_detected	= tg3_io_error_detected,
16115	.slot_reset	= tg3_io_slot_reset,
16116	.resume		= tg3_io_resume
16117};
16118
16119static struct pci_driver tg3_driver = {
16120	.name		= DRV_MODULE_NAME,
16121	.id_table	= tg3_pci_tbl,
16122	.probe		= tg3_init_one,
16123	.remove		= __devexit_p(tg3_remove_one),
16124	.err_handler	= &tg3_err_handler,
16125	.driver.pm	= TG3_PM_OPS,
16126};
16127
16128static int __init tg3_init(void)
16129{
16130	return pci_register_driver(&tg3_driver);
16131}
16132
16133static void __exit tg3_cleanup(void)
16134{
16135	pci_unregister_driver(&tg3_driver);
16136}
16137
16138module_init(tg3_init);
16139module_exit(tg3_cleanup);
16140