igb_ethtool.c revision 63d4a8f963fce8fe5d8ba3d5eba34d7d7ca6f82b
1/*******************************************************************************
2
3  Intel(R) Gigabit Ethernet Linux driver
4  Copyright(c) 2007-2012 Intel Corporation.
5
6  This program is free software; you can redistribute it and/or modify it
7  under the terms and conditions of the GNU General Public License,
8  version 2, as published by the Free Software Foundation.
9
10  This program is distributed in the hope it will be useful, but WITHOUT
11  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  more details.
14
15  You should have received a copy of the GNU General Public License along with
16  this program; if not, write to the Free Software Foundation, Inc.,
17  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19  The full GNU General Public License is included in this distribution in
20  the file called "COPYING".
21
22  Contact Information:
23  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28/* ethtool support for igb */
29
30#include <linux/vmalloc.h>
31#include <linux/netdevice.h>
32#include <linux/pci.h>
33#include <linux/delay.h>
34#include <linux/interrupt.h>
35#include <linux/if_ether.h>
36#include <linux/ethtool.h>
37#include <linux/sched.h>
38#include <linux/slab.h>
39#include <linux/pm_runtime.h>
40#include <linux/highmem.h>
41
42#include "igb.h"
43
44struct igb_stats {
45	char stat_string[ETH_GSTRING_LEN];
46	int sizeof_stat;
47	int stat_offset;
48};
49
50#define IGB_STAT(_name, _stat) { \
51	.stat_string = _name, \
52	.sizeof_stat = FIELD_SIZEOF(struct igb_adapter, _stat), \
53	.stat_offset = offsetof(struct igb_adapter, _stat) \
54}
55static const struct igb_stats igb_gstrings_stats[] = {
56	IGB_STAT("rx_packets", stats.gprc),
57	IGB_STAT("tx_packets", stats.gptc),
58	IGB_STAT("rx_bytes", stats.gorc),
59	IGB_STAT("tx_bytes", stats.gotc),
60	IGB_STAT("rx_broadcast", stats.bprc),
61	IGB_STAT("tx_broadcast", stats.bptc),
62	IGB_STAT("rx_multicast", stats.mprc),
63	IGB_STAT("tx_multicast", stats.mptc),
64	IGB_STAT("multicast", stats.mprc),
65	IGB_STAT("collisions", stats.colc),
66	IGB_STAT("rx_crc_errors", stats.crcerrs),
67	IGB_STAT("rx_no_buffer_count", stats.rnbc),
68	IGB_STAT("rx_missed_errors", stats.mpc),
69	IGB_STAT("tx_aborted_errors", stats.ecol),
70	IGB_STAT("tx_carrier_errors", stats.tncrs),
71	IGB_STAT("tx_window_errors", stats.latecol),
72	IGB_STAT("tx_abort_late_coll", stats.latecol),
73	IGB_STAT("tx_deferred_ok", stats.dc),
74	IGB_STAT("tx_single_coll_ok", stats.scc),
75	IGB_STAT("tx_multi_coll_ok", stats.mcc),
76	IGB_STAT("tx_timeout_count", tx_timeout_count),
77	IGB_STAT("rx_long_length_errors", stats.roc),
78	IGB_STAT("rx_short_length_errors", stats.ruc),
79	IGB_STAT("rx_align_errors", stats.algnerrc),
80	IGB_STAT("tx_tcp_seg_good", stats.tsctc),
81	IGB_STAT("tx_tcp_seg_failed", stats.tsctfc),
82	IGB_STAT("rx_flow_control_xon", stats.xonrxc),
83	IGB_STAT("rx_flow_control_xoff", stats.xoffrxc),
84	IGB_STAT("tx_flow_control_xon", stats.xontxc),
85	IGB_STAT("tx_flow_control_xoff", stats.xofftxc),
86	IGB_STAT("rx_long_byte_count", stats.gorc),
87	IGB_STAT("tx_dma_out_of_sync", stats.doosync),
88	IGB_STAT("tx_smbus", stats.mgptc),
89	IGB_STAT("rx_smbus", stats.mgprc),
90	IGB_STAT("dropped_smbus", stats.mgpdc),
91	IGB_STAT("os2bmc_rx_by_bmc", stats.o2bgptc),
92	IGB_STAT("os2bmc_tx_by_bmc", stats.b2ospc),
93	IGB_STAT("os2bmc_tx_by_host", stats.o2bspc),
94	IGB_STAT("os2bmc_rx_by_host", stats.b2ogprc),
95};
96
97#define IGB_NETDEV_STAT(_net_stat) { \
98	.stat_string = __stringify(_net_stat), \
99	.sizeof_stat = FIELD_SIZEOF(struct rtnl_link_stats64, _net_stat), \
100	.stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \
101}
102static const struct igb_stats igb_gstrings_net_stats[] = {
103	IGB_NETDEV_STAT(rx_errors),
104	IGB_NETDEV_STAT(tx_errors),
105	IGB_NETDEV_STAT(tx_dropped),
106	IGB_NETDEV_STAT(rx_length_errors),
107	IGB_NETDEV_STAT(rx_over_errors),
108	IGB_NETDEV_STAT(rx_frame_errors),
109	IGB_NETDEV_STAT(rx_fifo_errors),
110	IGB_NETDEV_STAT(tx_fifo_errors),
111	IGB_NETDEV_STAT(tx_heartbeat_errors)
112};
113
114#define IGB_GLOBAL_STATS_LEN	\
115	(sizeof(igb_gstrings_stats) / sizeof(struct igb_stats))
116#define IGB_NETDEV_STATS_LEN	\
117	(sizeof(igb_gstrings_net_stats) / sizeof(struct igb_stats))
118#define IGB_RX_QUEUE_STATS_LEN \
119	(sizeof(struct igb_rx_queue_stats) / sizeof(u64))
120
121#define IGB_TX_QUEUE_STATS_LEN 3 /* packets, bytes, restart_queue */
122
123#define IGB_QUEUE_STATS_LEN \
124	((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \
125	  IGB_RX_QUEUE_STATS_LEN) + \
126	 (((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues * \
127	  IGB_TX_QUEUE_STATS_LEN))
128#define IGB_STATS_LEN \
129	(IGB_GLOBAL_STATS_LEN + IGB_NETDEV_STATS_LEN + IGB_QUEUE_STATS_LEN)
130
131static const char igb_gstrings_test[][ETH_GSTRING_LEN] = {
132	"Register test  (offline)", "Eeprom test    (offline)",
133	"Interrupt test (offline)", "Loopback test  (offline)",
134	"Link test   (on/offline)"
135};
136#define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN)
137
138static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
139{
140	struct igb_adapter *adapter = netdev_priv(netdev);
141	struct e1000_hw *hw = &adapter->hw;
142	u32 status;
143
144	if (hw->phy.media_type == e1000_media_type_copper) {
145
146		ecmd->supported = (SUPPORTED_10baseT_Half |
147				   SUPPORTED_10baseT_Full |
148				   SUPPORTED_100baseT_Half |
149				   SUPPORTED_100baseT_Full |
150				   SUPPORTED_1000baseT_Full|
151				   SUPPORTED_Autoneg |
152				   SUPPORTED_TP |
153				   SUPPORTED_Pause);
154		ecmd->advertising = ADVERTISED_TP;
155
156		if (hw->mac.autoneg == 1) {
157			ecmd->advertising |= ADVERTISED_Autoneg;
158			/* the e1000 autoneg seems to match ethtool nicely */
159			ecmd->advertising |= hw->phy.autoneg_advertised;
160		}
161
162		if (hw->mac.autoneg != 1)
163			ecmd->advertising &= ~(ADVERTISED_Pause |
164					       ADVERTISED_Asym_Pause);
165
166		if (hw->fc.requested_mode == e1000_fc_full)
167			ecmd->advertising |= ADVERTISED_Pause;
168		else if (hw->fc.requested_mode == e1000_fc_rx_pause)
169			ecmd->advertising |= (ADVERTISED_Pause |
170					      ADVERTISED_Asym_Pause);
171		else if (hw->fc.requested_mode == e1000_fc_tx_pause)
172			ecmd->advertising |=  ADVERTISED_Asym_Pause;
173		else
174			ecmd->advertising &= ~(ADVERTISED_Pause |
175					       ADVERTISED_Asym_Pause);
176
177		ecmd->port = PORT_TP;
178		ecmd->phy_address = hw->phy.addr;
179	} else {
180		ecmd->supported   = (SUPPORTED_1000baseT_Full |
181				     SUPPORTED_FIBRE |
182				     SUPPORTED_Autoneg);
183
184		ecmd->advertising = (ADVERTISED_1000baseT_Full |
185				     ADVERTISED_FIBRE |
186				     ADVERTISED_Autoneg |
187				     ADVERTISED_Pause);
188
189		ecmd->port = PORT_FIBRE;
190	}
191
192	ecmd->transceiver = XCVR_INTERNAL;
193
194	status = rd32(E1000_STATUS);
195
196	if (status & E1000_STATUS_LU) {
197
198		if ((status & E1000_STATUS_SPEED_1000) ||
199		    hw->phy.media_type != e1000_media_type_copper)
200			ethtool_cmd_speed_set(ecmd, SPEED_1000);
201		else if (status & E1000_STATUS_SPEED_100)
202			ethtool_cmd_speed_set(ecmd, SPEED_100);
203		else
204			ethtool_cmd_speed_set(ecmd, SPEED_10);
205
206		if ((status & E1000_STATUS_FD) ||
207		    hw->phy.media_type != e1000_media_type_copper)
208			ecmd->duplex = DUPLEX_FULL;
209		else
210			ecmd->duplex = DUPLEX_HALF;
211	} else {
212		ethtool_cmd_speed_set(ecmd, -1);
213		ecmd->duplex = -1;
214	}
215
216	ecmd->autoneg = hw->mac.autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
217
218	/* MDI-X => 2; MDI =>1; Invalid =>0 */
219	if (hw->phy.media_type == e1000_media_type_copper)
220		ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X :
221						      ETH_TP_MDI;
222	else
223		ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
224
225	if (hw->phy.mdix == AUTO_ALL_MODES)
226		ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
227	else
228		ecmd->eth_tp_mdix_ctrl = hw->phy.mdix;
229
230	return 0;
231}
232
233static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
234{
235	struct igb_adapter *adapter = netdev_priv(netdev);
236	struct e1000_hw *hw = &adapter->hw;
237
238	/* When SoL/IDER sessions are active, autoneg/speed/duplex
239	 * cannot be changed */
240	if (igb_check_reset_block(hw)) {
241		dev_err(&adapter->pdev->dev,
242			"Cannot change link characteristics when SoL/IDER is active.\n");
243		return -EINVAL;
244	}
245
246	/*
247	 * MDI setting is only allowed when autoneg enabled because
248	 * some hardware doesn't allow MDI setting when speed or
249	 * duplex is forced.
250	 */
251	if (ecmd->eth_tp_mdix_ctrl) {
252		if (hw->phy.media_type != e1000_media_type_copper)
253			return -EOPNOTSUPP;
254
255		if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
256		    (ecmd->autoneg != AUTONEG_ENABLE)) {
257			dev_err(&adapter->pdev->dev, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
258			return -EINVAL;
259		}
260	}
261
262	while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
263		msleep(1);
264
265	if (ecmd->autoneg == AUTONEG_ENABLE) {
266		hw->mac.autoneg = 1;
267		hw->phy.autoneg_advertised = ecmd->advertising |
268					     ADVERTISED_TP |
269					     ADVERTISED_Autoneg;
270		ecmd->advertising = hw->phy.autoneg_advertised;
271		if (adapter->fc_autoneg)
272			hw->fc.requested_mode = e1000_fc_default;
273	} else {
274		u32 speed = ethtool_cmd_speed(ecmd);
275		/* calling this overrides forced MDI setting */
276		if (igb_set_spd_dplx(adapter, speed, ecmd->duplex)) {
277			clear_bit(__IGB_RESETTING, &adapter->state);
278			return -EINVAL;
279		}
280	}
281
282	/* MDI-X => 2; MDI => 1; Auto => 3 */
283	if (ecmd->eth_tp_mdix_ctrl) {
284		/*
285		 * fix up the value for auto (3 => 0) as zero is mapped
286		 * internally to auto
287		 */
288		if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
289			hw->phy.mdix = AUTO_ALL_MODES;
290		else
291			hw->phy.mdix = ecmd->eth_tp_mdix_ctrl;
292	}
293
294	/* reset the link */
295	if (netif_running(adapter->netdev)) {
296		igb_down(adapter);
297		igb_up(adapter);
298	} else
299		igb_reset(adapter);
300
301	clear_bit(__IGB_RESETTING, &adapter->state);
302	return 0;
303}
304
305static u32 igb_get_link(struct net_device *netdev)
306{
307	struct igb_adapter *adapter = netdev_priv(netdev);
308	struct e1000_mac_info *mac = &adapter->hw.mac;
309
310	/*
311	 * If the link is not reported up to netdev, interrupts are disabled,
312	 * and so the physical link state may have changed since we last
313	 * looked. Set get_link_status to make sure that the true link
314	 * state is interrogated, rather than pulling a cached and possibly
315	 * stale link state from the driver.
316	 */
317	if (!netif_carrier_ok(netdev))
318		mac->get_link_status = 1;
319
320	return igb_has_link(adapter);
321}
322
323static void igb_get_pauseparam(struct net_device *netdev,
324			       struct ethtool_pauseparam *pause)
325{
326	struct igb_adapter *adapter = netdev_priv(netdev);
327	struct e1000_hw *hw = &adapter->hw;
328
329	pause->autoneg =
330		(adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
331
332	if (hw->fc.current_mode == e1000_fc_rx_pause)
333		pause->rx_pause = 1;
334	else if (hw->fc.current_mode == e1000_fc_tx_pause)
335		pause->tx_pause = 1;
336	else if (hw->fc.current_mode == e1000_fc_full) {
337		pause->rx_pause = 1;
338		pause->tx_pause = 1;
339	}
340}
341
342static int igb_set_pauseparam(struct net_device *netdev,
343			      struct ethtool_pauseparam *pause)
344{
345	struct igb_adapter *adapter = netdev_priv(netdev);
346	struct e1000_hw *hw = &adapter->hw;
347	int retval = 0;
348
349	adapter->fc_autoneg = pause->autoneg;
350
351	while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
352		msleep(1);
353
354	if (adapter->fc_autoneg == AUTONEG_ENABLE) {
355		hw->fc.requested_mode = e1000_fc_default;
356		if (netif_running(adapter->netdev)) {
357			igb_down(adapter);
358			igb_up(adapter);
359		} else {
360			igb_reset(adapter);
361		}
362	} else {
363		if (pause->rx_pause && pause->tx_pause)
364			hw->fc.requested_mode = e1000_fc_full;
365		else if (pause->rx_pause && !pause->tx_pause)
366			hw->fc.requested_mode = e1000_fc_rx_pause;
367		else if (!pause->rx_pause && pause->tx_pause)
368			hw->fc.requested_mode = e1000_fc_tx_pause;
369		else if (!pause->rx_pause && !pause->tx_pause)
370			hw->fc.requested_mode = e1000_fc_none;
371
372		hw->fc.current_mode = hw->fc.requested_mode;
373
374		retval = ((hw->phy.media_type == e1000_media_type_copper) ?
375			  igb_force_mac_fc(hw) : igb_setup_link(hw));
376	}
377
378	clear_bit(__IGB_RESETTING, &adapter->state);
379	return retval;
380}
381
382static u32 igb_get_msglevel(struct net_device *netdev)
383{
384	struct igb_adapter *adapter = netdev_priv(netdev);
385	return adapter->msg_enable;
386}
387
388static void igb_set_msglevel(struct net_device *netdev, u32 data)
389{
390	struct igb_adapter *adapter = netdev_priv(netdev);
391	adapter->msg_enable = data;
392}
393
394static int igb_get_regs_len(struct net_device *netdev)
395{
396#define IGB_REGS_LEN 739
397	return IGB_REGS_LEN * sizeof(u32);
398}
399
400static void igb_get_regs(struct net_device *netdev,
401			 struct ethtool_regs *regs, void *p)
402{
403	struct igb_adapter *adapter = netdev_priv(netdev);
404	struct e1000_hw *hw = &adapter->hw;
405	u32 *regs_buff = p;
406	u8 i;
407
408	memset(p, 0, IGB_REGS_LEN * sizeof(u32));
409
410	regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
411
412	/* General Registers */
413	regs_buff[0] = rd32(E1000_CTRL);
414	regs_buff[1] = rd32(E1000_STATUS);
415	regs_buff[2] = rd32(E1000_CTRL_EXT);
416	regs_buff[3] = rd32(E1000_MDIC);
417	regs_buff[4] = rd32(E1000_SCTL);
418	regs_buff[5] = rd32(E1000_CONNSW);
419	regs_buff[6] = rd32(E1000_VET);
420	regs_buff[7] = rd32(E1000_LEDCTL);
421	regs_buff[8] = rd32(E1000_PBA);
422	regs_buff[9] = rd32(E1000_PBS);
423	regs_buff[10] = rd32(E1000_FRTIMER);
424	regs_buff[11] = rd32(E1000_TCPTIMER);
425
426	/* NVM Register */
427	regs_buff[12] = rd32(E1000_EECD);
428
429	/* Interrupt */
430	/* Reading EICS for EICR because they read the
431	 * same but EICS does not clear on read */
432	regs_buff[13] = rd32(E1000_EICS);
433	regs_buff[14] = rd32(E1000_EICS);
434	regs_buff[15] = rd32(E1000_EIMS);
435	regs_buff[16] = rd32(E1000_EIMC);
436	regs_buff[17] = rd32(E1000_EIAC);
437	regs_buff[18] = rd32(E1000_EIAM);
438	/* Reading ICS for ICR because they read the
439	 * same but ICS does not clear on read */
440	regs_buff[19] = rd32(E1000_ICS);
441	regs_buff[20] = rd32(E1000_ICS);
442	regs_buff[21] = rd32(E1000_IMS);
443	regs_buff[22] = rd32(E1000_IMC);
444	regs_buff[23] = rd32(E1000_IAC);
445	regs_buff[24] = rd32(E1000_IAM);
446	regs_buff[25] = rd32(E1000_IMIRVP);
447
448	/* Flow Control */
449	regs_buff[26] = rd32(E1000_FCAL);
450	regs_buff[27] = rd32(E1000_FCAH);
451	regs_buff[28] = rd32(E1000_FCTTV);
452	regs_buff[29] = rd32(E1000_FCRTL);
453	regs_buff[30] = rd32(E1000_FCRTH);
454	regs_buff[31] = rd32(E1000_FCRTV);
455
456	/* Receive */
457	regs_buff[32] = rd32(E1000_RCTL);
458	regs_buff[33] = rd32(E1000_RXCSUM);
459	regs_buff[34] = rd32(E1000_RLPML);
460	regs_buff[35] = rd32(E1000_RFCTL);
461	regs_buff[36] = rd32(E1000_MRQC);
462	regs_buff[37] = rd32(E1000_VT_CTL);
463
464	/* Transmit */
465	regs_buff[38] = rd32(E1000_TCTL);
466	regs_buff[39] = rd32(E1000_TCTL_EXT);
467	regs_buff[40] = rd32(E1000_TIPG);
468	regs_buff[41] = rd32(E1000_DTXCTL);
469
470	/* Wake Up */
471	regs_buff[42] = rd32(E1000_WUC);
472	regs_buff[43] = rd32(E1000_WUFC);
473	regs_buff[44] = rd32(E1000_WUS);
474	regs_buff[45] = rd32(E1000_IPAV);
475	regs_buff[46] = rd32(E1000_WUPL);
476
477	/* MAC */
478	regs_buff[47] = rd32(E1000_PCS_CFG0);
479	regs_buff[48] = rd32(E1000_PCS_LCTL);
480	regs_buff[49] = rd32(E1000_PCS_LSTAT);
481	regs_buff[50] = rd32(E1000_PCS_ANADV);
482	regs_buff[51] = rd32(E1000_PCS_LPAB);
483	regs_buff[52] = rd32(E1000_PCS_NPTX);
484	regs_buff[53] = rd32(E1000_PCS_LPABNP);
485
486	/* Statistics */
487	regs_buff[54] = adapter->stats.crcerrs;
488	regs_buff[55] = adapter->stats.algnerrc;
489	regs_buff[56] = adapter->stats.symerrs;
490	regs_buff[57] = adapter->stats.rxerrc;
491	regs_buff[58] = adapter->stats.mpc;
492	regs_buff[59] = adapter->stats.scc;
493	regs_buff[60] = adapter->stats.ecol;
494	regs_buff[61] = adapter->stats.mcc;
495	regs_buff[62] = adapter->stats.latecol;
496	regs_buff[63] = adapter->stats.colc;
497	regs_buff[64] = adapter->stats.dc;
498	regs_buff[65] = adapter->stats.tncrs;
499	regs_buff[66] = adapter->stats.sec;
500	regs_buff[67] = adapter->stats.htdpmc;
501	regs_buff[68] = adapter->stats.rlec;
502	regs_buff[69] = adapter->stats.xonrxc;
503	regs_buff[70] = adapter->stats.xontxc;
504	regs_buff[71] = adapter->stats.xoffrxc;
505	regs_buff[72] = adapter->stats.xofftxc;
506	regs_buff[73] = adapter->stats.fcruc;
507	regs_buff[74] = adapter->stats.prc64;
508	regs_buff[75] = adapter->stats.prc127;
509	regs_buff[76] = adapter->stats.prc255;
510	regs_buff[77] = adapter->stats.prc511;
511	regs_buff[78] = adapter->stats.prc1023;
512	regs_buff[79] = adapter->stats.prc1522;
513	regs_buff[80] = adapter->stats.gprc;
514	regs_buff[81] = adapter->stats.bprc;
515	regs_buff[82] = adapter->stats.mprc;
516	regs_buff[83] = adapter->stats.gptc;
517	regs_buff[84] = adapter->stats.gorc;
518	regs_buff[86] = adapter->stats.gotc;
519	regs_buff[88] = adapter->stats.rnbc;
520	regs_buff[89] = adapter->stats.ruc;
521	regs_buff[90] = adapter->stats.rfc;
522	regs_buff[91] = adapter->stats.roc;
523	regs_buff[92] = adapter->stats.rjc;
524	regs_buff[93] = adapter->stats.mgprc;
525	regs_buff[94] = adapter->stats.mgpdc;
526	regs_buff[95] = adapter->stats.mgptc;
527	regs_buff[96] = adapter->stats.tor;
528	regs_buff[98] = adapter->stats.tot;
529	regs_buff[100] = adapter->stats.tpr;
530	regs_buff[101] = adapter->stats.tpt;
531	regs_buff[102] = adapter->stats.ptc64;
532	regs_buff[103] = adapter->stats.ptc127;
533	regs_buff[104] = adapter->stats.ptc255;
534	regs_buff[105] = adapter->stats.ptc511;
535	regs_buff[106] = adapter->stats.ptc1023;
536	regs_buff[107] = adapter->stats.ptc1522;
537	regs_buff[108] = adapter->stats.mptc;
538	regs_buff[109] = adapter->stats.bptc;
539	regs_buff[110] = adapter->stats.tsctc;
540	regs_buff[111] = adapter->stats.iac;
541	regs_buff[112] = adapter->stats.rpthc;
542	regs_buff[113] = adapter->stats.hgptc;
543	regs_buff[114] = adapter->stats.hgorc;
544	regs_buff[116] = adapter->stats.hgotc;
545	regs_buff[118] = adapter->stats.lenerrs;
546	regs_buff[119] = adapter->stats.scvpc;
547	regs_buff[120] = adapter->stats.hrmpc;
548
549	for (i = 0; i < 4; i++)
550		regs_buff[121 + i] = rd32(E1000_SRRCTL(i));
551	for (i = 0; i < 4; i++)
552		regs_buff[125 + i] = rd32(E1000_PSRTYPE(i));
553	for (i = 0; i < 4; i++)
554		regs_buff[129 + i] = rd32(E1000_RDBAL(i));
555	for (i = 0; i < 4; i++)
556		regs_buff[133 + i] = rd32(E1000_RDBAH(i));
557	for (i = 0; i < 4; i++)
558		regs_buff[137 + i] = rd32(E1000_RDLEN(i));
559	for (i = 0; i < 4; i++)
560		regs_buff[141 + i] = rd32(E1000_RDH(i));
561	for (i = 0; i < 4; i++)
562		regs_buff[145 + i] = rd32(E1000_RDT(i));
563	for (i = 0; i < 4; i++)
564		regs_buff[149 + i] = rd32(E1000_RXDCTL(i));
565
566	for (i = 0; i < 10; i++)
567		regs_buff[153 + i] = rd32(E1000_EITR(i));
568	for (i = 0; i < 8; i++)
569		regs_buff[163 + i] = rd32(E1000_IMIR(i));
570	for (i = 0; i < 8; i++)
571		regs_buff[171 + i] = rd32(E1000_IMIREXT(i));
572	for (i = 0; i < 16; i++)
573		regs_buff[179 + i] = rd32(E1000_RAL(i));
574	for (i = 0; i < 16; i++)
575		regs_buff[195 + i] = rd32(E1000_RAH(i));
576
577	for (i = 0; i < 4; i++)
578		regs_buff[211 + i] = rd32(E1000_TDBAL(i));
579	for (i = 0; i < 4; i++)
580		regs_buff[215 + i] = rd32(E1000_TDBAH(i));
581	for (i = 0; i < 4; i++)
582		regs_buff[219 + i] = rd32(E1000_TDLEN(i));
583	for (i = 0; i < 4; i++)
584		regs_buff[223 + i] = rd32(E1000_TDH(i));
585	for (i = 0; i < 4; i++)
586		regs_buff[227 + i] = rd32(E1000_TDT(i));
587	for (i = 0; i < 4; i++)
588		regs_buff[231 + i] = rd32(E1000_TXDCTL(i));
589	for (i = 0; i < 4; i++)
590		regs_buff[235 + i] = rd32(E1000_TDWBAL(i));
591	for (i = 0; i < 4; i++)
592		regs_buff[239 + i] = rd32(E1000_TDWBAH(i));
593	for (i = 0; i < 4; i++)
594		regs_buff[243 + i] = rd32(E1000_DCA_TXCTRL(i));
595
596	for (i = 0; i < 4; i++)
597		regs_buff[247 + i] = rd32(E1000_IP4AT_REG(i));
598	for (i = 0; i < 4; i++)
599		regs_buff[251 + i] = rd32(E1000_IP6AT_REG(i));
600	for (i = 0; i < 32; i++)
601		regs_buff[255 + i] = rd32(E1000_WUPM_REG(i));
602	for (i = 0; i < 128; i++)
603		regs_buff[287 + i] = rd32(E1000_FFMT_REG(i));
604	for (i = 0; i < 128; i++)
605		regs_buff[415 + i] = rd32(E1000_FFVT_REG(i));
606	for (i = 0; i < 4; i++)
607		regs_buff[543 + i] = rd32(E1000_FFLT_REG(i));
608
609	regs_buff[547] = rd32(E1000_TDFH);
610	regs_buff[548] = rd32(E1000_TDFT);
611	regs_buff[549] = rd32(E1000_TDFHS);
612	regs_buff[550] = rd32(E1000_TDFPC);
613
614	if (hw->mac.type > e1000_82580) {
615		regs_buff[551] = adapter->stats.o2bgptc;
616		regs_buff[552] = adapter->stats.b2ospc;
617		regs_buff[553] = adapter->stats.o2bspc;
618		regs_buff[554] = adapter->stats.b2ogprc;
619	}
620
621	if (hw->mac.type != e1000_82576)
622		return;
623	for (i = 0; i < 12; i++)
624		regs_buff[555 + i] = rd32(E1000_SRRCTL(i + 4));
625	for (i = 0; i < 4; i++)
626		regs_buff[567 + i] = rd32(E1000_PSRTYPE(i + 4));
627	for (i = 0; i < 12; i++)
628		regs_buff[571 + i] = rd32(E1000_RDBAL(i + 4));
629	for (i = 0; i < 12; i++)
630		regs_buff[583 + i] = rd32(E1000_RDBAH(i + 4));
631	for (i = 0; i < 12; i++)
632		regs_buff[595 + i] = rd32(E1000_RDLEN(i + 4));
633	for (i = 0; i < 12; i++)
634		regs_buff[607 + i] = rd32(E1000_RDH(i + 4));
635	for (i = 0; i < 12; i++)
636		regs_buff[619 + i] = rd32(E1000_RDT(i + 4));
637	for (i = 0; i < 12; i++)
638		regs_buff[631 + i] = rd32(E1000_RXDCTL(i + 4));
639
640	for (i = 0; i < 12; i++)
641		regs_buff[643 + i] = rd32(E1000_TDBAL(i + 4));
642	for (i = 0; i < 12; i++)
643		regs_buff[655 + i] = rd32(E1000_TDBAH(i + 4));
644	for (i = 0; i < 12; i++)
645		regs_buff[667 + i] = rd32(E1000_TDLEN(i + 4));
646	for (i = 0; i < 12; i++)
647		regs_buff[679 + i] = rd32(E1000_TDH(i + 4));
648	for (i = 0; i < 12; i++)
649		regs_buff[691 + i] = rd32(E1000_TDT(i + 4));
650	for (i = 0; i < 12; i++)
651		regs_buff[703 + i] = rd32(E1000_TXDCTL(i + 4));
652	for (i = 0; i < 12; i++)
653		regs_buff[715 + i] = rd32(E1000_TDWBAL(i + 4));
654	for (i = 0; i < 12; i++)
655		regs_buff[727 + i] = rd32(E1000_TDWBAH(i + 4));
656}
657
658static int igb_get_eeprom_len(struct net_device *netdev)
659{
660	struct igb_adapter *adapter = netdev_priv(netdev);
661	return adapter->hw.nvm.word_size * 2;
662}
663
664static int igb_get_eeprom(struct net_device *netdev,
665			  struct ethtool_eeprom *eeprom, u8 *bytes)
666{
667	struct igb_adapter *adapter = netdev_priv(netdev);
668	struct e1000_hw *hw = &adapter->hw;
669	u16 *eeprom_buff;
670	int first_word, last_word;
671	int ret_val = 0;
672	u16 i;
673
674	if (eeprom->len == 0)
675		return -EINVAL;
676
677	eeprom->magic = hw->vendor_id | (hw->device_id << 16);
678
679	first_word = eeprom->offset >> 1;
680	last_word = (eeprom->offset + eeprom->len - 1) >> 1;
681
682	eeprom_buff = kmalloc(sizeof(u16) *
683			(last_word - first_word + 1), GFP_KERNEL);
684	if (!eeprom_buff)
685		return -ENOMEM;
686
687	if (hw->nvm.type == e1000_nvm_eeprom_spi)
688		ret_val = hw->nvm.ops.read(hw, first_word,
689					    last_word - first_word + 1,
690					    eeprom_buff);
691	else {
692		for (i = 0; i < last_word - first_word + 1; i++) {
693			ret_val = hw->nvm.ops.read(hw, first_word + i, 1,
694						    &eeprom_buff[i]);
695			if (ret_val)
696				break;
697		}
698	}
699
700	/* Device's eeprom is always little-endian, word addressable */
701	for (i = 0; i < last_word - first_word + 1; i++)
702		le16_to_cpus(&eeprom_buff[i]);
703
704	memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
705			eeprom->len);
706	kfree(eeprom_buff);
707
708	return ret_val;
709}
710
711static int igb_set_eeprom(struct net_device *netdev,
712			  struct ethtool_eeprom *eeprom, u8 *bytes)
713{
714	struct igb_adapter *adapter = netdev_priv(netdev);
715	struct e1000_hw *hw = &adapter->hw;
716	u16 *eeprom_buff;
717	void *ptr;
718	int max_len, first_word, last_word, ret_val = 0;
719	u16 i;
720
721	if (eeprom->len == 0)
722		return -EOPNOTSUPP;
723
724	if (hw->mac.type == e1000_i211)
725		return -EOPNOTSUPP;
726
727	if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
728		return -EFAULT;
729
730	max_len = hw->nvm.word_size * 2;
731
732	first_word = eeprom->offset >> 1;
733	last_word = (eeprom->offset + eeprom->len - 1) >> 1;
734	eeprom_buff = kmalloc(max_len, GFP_KERNEL);
735	if (!eeprom_buff)
736		return -ENOMEM;
737
738	ptr = (void *)eeprom_buff;
739
740	if (eeprom->offset & 1) {
741		/* need read/modify/write of first changed EEPROM word */
742		/* only the second byte of the word is being modified */
743		ret_val = hw->nvm.ops.read(hw, first_word, 1,
744					    &eeprom_buff[0]);
745		ptr++;
746	}
747	if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
748		/* need read/modify/write of last changed EEPROM word */
749		/* only the first byte of the word is being modified */
750		ret_val = hw->nvm.ops.read(hw, last_word, 1,
751				   &eeprom_buff[last_word - first_word]);
752	}
753
754	/* Device's eeprom is always little-endian, word addressable */
755	for (i = 0; i < last_word - first_word + 1; i++)
756		le16_to_cpus(&eeprom_buff[i]);
757
758	memcpy(ptr, bytes, eeprom->len);
759
760	for (i = 0; i < last_word - first_word + 1; i++)
761		eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
762
763	ret_val = hw->nvm.ops.write(hw, first_word,
764				     last_word - first_word + 1, eeprom_buff);
765
766	/* Update the checksum over the first part of the EEPROM if needed
767	 * and flush shadow RAM for 82573 controllers */
768	if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG)))
769		hw->nvm.ops.update(hw);
770
771	igb_set_fw_version(adapter);
772	kfree(eeprom_buff);
773	return ret_val;
774}
775
776static void igb_get_drvinfo(struct net_device *netdev,
777			    struct ethtool_drvinfo *drvinfo)
778{
779	struct igb_adapter *adapter = netdev_priv(netdev);
780
781	strlcpy(drvinfo->driver,  igb_driver_name, sizeof(drvinfo->driver));
782	strlcpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version));
783
784	/*
785	 * EEPROM image version # is reported as firmware version # for
786	 * 82575 controllers
787	 */
788	strlcpy(drvinfo->fw_version, adapter->fw_version,
789		sizeof(drvinfo->fw_version));
790	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
791		sizeof(drvinfo->bus_info));
792	drvinfo->n_stats = IGB_STATS_LEN;
793	drvinfo->testinfo_len = IGB_TEST_LEN;
794	drvinfo->regdump_len = igb_get_regs_len(netdev);
795	drvinfo->eedump_len = igb_get_eeprom_len(netdev);
796}
797
798static void igb_get_ringparam(struct net_device *netdev,
799			      struct ethtool_ringparam *ring)
800{
801	struct igb_adapter *adapter = netdev_priv(netdev);
802
803	ring->rx_max_pending = IGB_MAX_RXD;
804	ring->tx_max_pending = IGB_MAX_TXD;
805	ring->rx_pending = adapter->rx_ring_count;
806	ring->tx_pending = adapter->tx_ring_count;
807}
808
809static int igb_set_ringparam(struct net_device *netdev,
810			     struct ethtool_ringparam *ring)
811{
812	struct igb_adapter *adapter = netdev_priv(netdev);
813	struct igb_ring *temp_ring;
814	int i, err = 0;
815	u16 new_rx_count, new_tx_count;
816
817	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
818		return -EINVAL;
819
820	new_rx_count = min_t(u32, ring->rx_pending, IGB_MAX_RXD);
821	new_rx_count = max_t(u16, new_rx_count, IGB_MIN_RXD);
822	new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
823
824	new_tx_count = min_t(u32, ring->tx_pending, IGB_MAX_TXD);
825	new_tx_count = max_t(u16, new_tx_count, IGB_MIN_TXD);
826	new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
827
828	if ((new_tx_count == adapter->tx_ring_count) &&
829	    (new_rx_count == adapter->rx_ring_count)) {
830		/* nothing to do */
831		return 0;
832	}
833
834	while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
835		msleep(1);
836
837	if (!netif_running(adapter->netdev)) {
838		for (i = 0; i < adapter->num_tx_queues; i++)
839			adapter->tx_ring[i]->count = new_tx_count;
840		for (i = 0; i < adapter->num_rx_queues; i++)
841			adapter->rx_ring[i]->count = new_rx_count;
842		adapter->tx_ring_count = new_tx_count;
843		adapter->rx_ring_count = new_rx_count;
844		goto clear_reset;
845	}
846
847	if (adapter->num_tx_queues > adapter->num_rx_queues)
848		temp_ring = vmalloc(adapter->num_tx_queues * sizeof(struct igb_ring));
849	else
850		temp_ring = vmalloc(adapter->num_rx_queues * sizeof(struct igb_ring));
851
852	if (!temp_ring) {
853		err = -ENOMEM;
854		goto clear_reset;
855	}
856
857	igb_down(adapter);
858
859	/*
860	 * We can't just free everything and then setup again,
861	 * because the ISRs in MSI-X mode get passed pointers
862	 * to the tx and rx ring structs.
863	 */
864	if (new_tx_count != adapter->tx_ring_count) {
865		for (i = 0; i < adapter->num_tx_queues; i++) {
866			memcpy(&temp_ring[i], adapter->tx_ring[i],
867			       sizeof(struct igb_ring));
868
869			temp_ring[i].count = new_tx_count;
870			err = igb_setup_tx_resources(&temp_ring[i]);
871			if (err) {
872				while (i) {
873					i--;
874					igb_free_tx_resources(&temp_ring[i]);
875				}
876				goto err_setup;
877			}
878		}
879
880		for (i = 0; i < adapter->num_tx_queues; i++) {
881			igb_free_tx_resources(adapter->tx_ring[i]);
882
883			memcpy(adapter->tx_ring[i], &temp_ring[i],
884			       sizeof(struct igb_ring));
885		}
886
887		adapter->tx_ring_count = new_tx_count;
888	}
889
890	if (new_rx_count != adapter->rx_ring_count) {
891		for (i = 0; i < adapter->num_rx_queues; i++) {
892			memcpy(&temp_ring[i], adapter->rx_ring[i],
893			       sizeof(struct igb_ring));
894
895			temp_ring[i].count = new_rx_count;
896			err = igb_setup_rx_resources(&temp_ring[i]);
897			if (err) {
898				while (i) {
899					i--;
900					igb_free_rx_resources(&temp_ring[i]);
901				}
902				goto err_setup;
903			}
904
905		}
906
907		for (i = 0; i < adapter->num_rx_queues; i++) {
908			igb_free_rx_resources(adapter->rx_ring[i]);
909
910			memcpy(adapter->rx_ring[i], &temp_ring[i],
911			       sizeof(struct igb_ring));
912		}
913
914		adapter->rx_ring_count = new_rx_count;
915	}
916err_setup:
917	igb_up(adapter);
918	vfree(temp_ring);
919clear_reset:
920	clear_bit(__IGB_RESETTING, &adapter->state);
921	return err;
922}
923
924/* ethtool register test data */
925struct igb_reg_test {
926	u16 reg;
927	u16 reg_offset;
928	u16 array_len;
929	u16 test_type;
930	u32 mask;
931	u32 write;
932};
933
934/* In the hardware, registers are laid out either singly, in arrays
935 * spaced 0x100 bytes apart, or in contiguous tables.  We assume
936 * most tests take place on arrays or single registers (handled
937 * as a single-element array) and special-case the tables.
938 * Table tests are always pattern tests.
939 *
940 * We also make provision for some required setup steps by specifying
941 * registers to be written without any read-back testing.
942 */
943
944#define PATTERN_TEST	1
945#define SET_READ_TEST	2
946#define WRITE_NO_TEST	3
947#define TABLE32_TEST	4
948#define TABLE64_TEST_LO	5
949#define TABLE64_TEST_HI	6
950
951/* i210 reg test */
952static struct igb_reg_test reg_test_i210[] = {
953	{ E1000_FCAL,	   0x100, 1,  PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
954	{ E1000_FCAH,	   0x100, 1,  PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
955	{ E1000_FCT,	   0x100, 1,  PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
956	{ E1000_RDBAL(0),  0x100, 4,  PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
957	{ E1000_RDBAH(0),  0x100, 4,  PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
958	{ E1000_RDLEN(0),  0x100, 4,  PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
959	/* RDH is read-only for i210, only test RDT. */
960	{ E1000_RDT(0),	   0x100, 4,  PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
961	{ E1000_FCRTH,	   0x100, 1,  PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
962	{ E1000_FCTTV,	   0x100, 1,  PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
963	{ E1000_TIPG,	   0x100, 1,  PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
964	{ E1000_TDBAL(0),  0x100, 4,  PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
965	{ E1000_TDBAH(0),  0x100, 4,  PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
966	{ E1000_TDLEN(0),  0x100, 4,  PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
967	{ E1000_TDT(0),	   0x100, 4,  PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
968	{ E1000_RCTL,	   0x100, 1,  SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
969	{ E1000_RCTL,	   0x100, 1,  SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
970	{ E1000_RCTL,	   0x100, 1,  SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
971	{ E1000_TCTL,	   0x100, 1,  SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
972	{ E1000_RA,	   0, 16, TABLE64_TEST_LO,
973						0xFFFFFFFF, 0xFFFFFFFF },
974	{ E1000_RA,	   0, 16, TABLE64_TEST_HI,
975						0x900FFFFF, 0xFFFFFFFF },
976	{ E1000_MTA,	   0, 128, TABLE32_TEST,
977						0xFFFFFFFF, 0xFFFFFFFF },
978	{ 0, 0, 0, 0, 0 }
979};
980
981/* i350 reg test */
982static struct igb_reg_test reg_test_i350[] = {
983	{ E1000_FCAL,	   0x100, 1,  PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
984	{ E1000_FCAH,	   0x100, 1,  PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
985	{ E1000_FCT,	   0x100, 1,  PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
986	{ E1000_VET,	   0x100, 1,  PATTERN_TEST, 0xFFFF0000, 0xFFFF0000 },
987	{ E1000_RDBAL(0),  0x100, 4,  PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
988	{ E1000_RDBAH(0),  0x100, 4,  PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
989	{ E1000_RDLEN(0),  0x100, 4,  PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
990	{ E1000_RDBAL(4),  0x40,  4,  PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
991	{ E1000_RDBAH(4),  0x40,  4,  PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
992	{ E1000_RDLEN(4),  0x40,  4,  PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
993	/* RDH is read-only for i350, only test RDT. */
994	{ E1000_RDT(0),	   0x100, 4,  PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
995	{ E1000_RDT(4),	   0x40,  4,  PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
996	{ E1000_FCRTH,	   0x100, 1,  PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
997	{ E1000_FCTTV,	   0x100, 1,  PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
998	{ E1000_TIPG,	   0x100, 1,  PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
999	{ E1000_TDBAL(0),  0x100, 4,  PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1000	{ E1000_TDBAH(0),  0x100, 4,  PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1001	{ E1000_TDLEN(0),  0x100, 4,  PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1002	{ E1000_TDBAL(4),  0x40,  4,  PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1003	{ E1000_TDBAH(4),  0x40,  4,  PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1004	{ E1000_TDLEN(4),  0x40,  4,  PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1005	{ E1000_TDT(0),	   0x100, 4,  PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1006	{ E1000_TDT(4),	   0x40,  4,  PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1007	{ E1000_RCTL,	   0x100, 1,  SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
1008	{ E1000_RCTL, 	   0x100, 1,  SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
1009	{ E1000_RCTL, 	   0x100, 1,  SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
1010	{ E1000_TCTL,	   0x100, 1,  SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
1011	{ E1000_RA,	   0, 16, TABLE64_TEST_LO,
1012						0xFFFFFFFF, 0xFFFFFFFF },
1013	{ E1000_RA,	   0, 16, TABLE64_TEST_HI,
1014						0xC3FFFFFF, 0xFFFFFFFF },
1015	{ E1000_RA2,	   0, 16, TABLE64_TEST_LO,
1016						0xFFFFFFFF, 0xFFFFFFFF },
1017	{ E1000_RA2,	   0, 16, TABLE64_TEST_HI,
1018						0xC3FFFFFF, 0xFFFFFFFF },
1019	{ E1000_MTA,	   0, 128, TABLE32_TEST,
1020						0xFFFFFFFF, 0xFFFFFFFF },
1021	{ 0, 0, 0, 0 }
1022};
1023
1024/* 82580 reg test */
1025static struct igb_reg_test reg_test_82580[] = {
1026	{ E1000_FCAL,	   0x100, 1,  PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1027	{ E1000_FCAH,	   0x100, 1,  PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
1028	{ E1000_FCT,	   0x100, 1,  PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
1029	{ E1000_VET,	   0x100, 1,  PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1030	{ E1000_RDBAL(0),  0x100, 4,  PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1031	{ E1000_RDBAH(0),  0x100, 4,  PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1032	{ E1000_RDLEN(0),  0x100, 4,  PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
1033	{ E1000_RDBAL(4),  0x40,  4,  PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1034	{ E1000_RDBAH(4),  0x40,  4,  PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1035	{ E1000_RDLEN(4),  0x40,  4,  PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
1036	/* RDH is read-only for 82580, only test RDT. */
1037	{ E1000_RDT(0),	   0x100, 4,  PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1038	{ E1000_RDT(4),	   0x40,  4,  PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1039	{ E1000_FCRTH,	   0x100, 1,  PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
1040	{ E1000_FCTTV,	   0x100, 1,  PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1041	{ E1000_TIPG,	   0x100, 1,  PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
1042	{ E1000_TDBAL(0),  0x100, 4,  PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1043	{ E1000_TDBAH(0),  0x100, 4,  PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1044	{ E1000_TDLEN(0),  0x100, 4,  PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
1045	{ E1000_TDBAL(4),  0x40,  4,  PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1046	{ E1000_TDBAH(4),  0x40,  4,  PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1047	{ E1000_TDLEN(4),  0x40,  4,  PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
1048	{ E1000_TDT(0),	   0x100, 4,  PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1049	{ E1000_TDT(4),	   0x40,  4,  PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1050	{ E1000_RCTL,	   0x100, 1,  SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
1051	{ E1000_RCTL, 	   0x100, 1,  SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
1052	{ E1000_RCTL, 	   0x100, 1,  SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
1053	{ E1000_TCTL,	   0x100, 1,  SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
1054	{ E1000_RA,	   0, 16, TABLE64_TEST_LO,
1055						0xFFFFFFFF, 0xFFFFFFFF },
1056	{ E1000_RA,	   0, 16, TABLE64_TEST_HI,
1057						0x83FFFFFF, 0xFFFFFFFF },
1058	{ E1000_RA2,	   0, 8, TABLE64_TEST_LO,
1059						0xFFFFFFFF, 0xFFFFFFFF },
1060	{ E1000_RA2,	   0, 8, TABLE64_TEST_HI,
1061						0x83FFFFFF, 0xFFFFFFFF },
1062	{ E1000_MTA,	   0, 128, TABLE32_TEST,
1063						0xFFFFFFFF, 0xFFFFFFFF },
1064	{ 0, 0, 0, 0 }
1065};
1066
1067/* 82576 reg test */
1068static struct igb_reg_test reg_test_82576[] = {
1069	{ E1000_FCAL,	   0x100, 1,  PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1070	{ E1000_FCAH,	   0x100, 1,  PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
1071	{ E1000_FCT,	   0x100, 1,  PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
1072	{ E1000_VET,	   0x100, 1,  PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1073	{ E1000_RDBAL(0),  0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1074	{ E1000_RDBAH(0),  0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1075	{ E1000_RDLEN(0),  0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
1076	{ E1000_RDBAL(4),  0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1077	{ E1000_RDBAH(4),  0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1078	{ E1000_RDLEN(4),  0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
1079	/* Enable all RX queues before testing. */
1080	{ E1000_RXDCTL(0), 0x100, 4,  WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
1081	{ E1000_RXDCTL(4), 0x40, 12,  WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
1082	/* RDH is read-only for 82576, only test RDT. */
1083	{ E1000_RDT(0),	   0x100, 4,  PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1084	{ E1000_RDT(4),	   0x40, 12,  PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1085	{ E1000_RXDCTL(0), 0x100, 4,  WRITE_NO_TEST, 0, 0 },
1086	{ E1000_RXDCTL(4), 0x40, 12,  WRITE_NO_TEST, 0, 0 },
1087	{ E1000_FCRTH,	   0x100, 1,  PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
1088	{ E1000_FCTTV,	   0x100, 1,  PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1089	{ E1000_TIPG,	   0x100, 1,  PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
1090	{ E1000_TDBAL(0),  0x100, 4,  PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1091	{ E1000_TDBAH(0),  0x100, 4,  PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1092	{ E1000_TDLEN(0),  0x100, 4,  PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
1093	{ E1000_TDBAL(4),  0x40, 12,  PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1094	{ E1000_TDBAH(4),  0x40, 12,  PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1095	{ E1000_TDLEN(4),  0x40, 12,  PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
1096	{ E1000_RCTL,	   0x100, 1,  SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
1097	{ E1000_RCTL, 	   0x100, 1,  SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
1098	{ E1000_RCTL, 	   0x100, 1,  SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
1099	{ E1000_TCTL,	   0x100, 1,  SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
1100	{ E1000_RA,	   0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1101	{ E1000_RA,	   0, 16, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF },
1102	{ E1000_RA2,	   0, 8, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1103	{ E1000_RA2,	   0, 8, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF },
1104	{ E1000_MTA,	   0, 128,TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1105	{ 0, 0, 0, 0 }
1106};
1107
1108/* 82575 register test */
1109static struct igb_reg_test reg_test_82575[] = {
1110	{ E1000_FCAL,      0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1111	{ E1000_FCAH,      0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
1112	{ E1000_FCT,       0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
1113	{ E1000_VET,       0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1114	{ E1000_RDBAL(0),  0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1115	{ E1000_RDBAH(0),  0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1116	{ E1000_RDLEN(0),  0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1117	/* Enable all four RX queues before testing. */
1118	{ E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
1119	/* RDH is read-only for 82575, only test RDT. */
1120	{ E1000_RDT(0),    0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1121	{ E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
1122	{ E1000_FCRTH,     0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
1123	{ E1000_FCTTV,     0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1124	{ E1000_TIPG,      0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
1125	{ E1000_TDBAL(0),  0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1126	{ E1000_TDBAH(0),  0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1127	{ E1000_TDLEN(0),  0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1128	{ E1000_RCTL,      0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
1129	{ E1000_RCTL,      0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0x003FFFFB },
1130	{ E1000_RCTL,      0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0xFFFFFFFF },
1131	{ E1000_TCTL,      0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
1132	{ E1000_TXCW,      0x100, 1, PATTERN_TEST, 0xC000FFFF, 0x0000FFFF },
1133	{ E1000_RA,        0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1134	{ E1000_RA,        0, 16, TABLE64_TEST_HI, 0x800FFFFF, 0xFFFFFFFF },
1135	{ E1000_MTA,       0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1136	{ 0, 0, 0, 0 }
1137};
1138
1139static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
1140			     int reg, u32 mask, u32 write)
1141{
1142	struct e1000_hw *hw = &adapter->hw;
1143	u32 pat, val;
1144	static const u32 _test[] =
1145		{0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
1146	for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
1147		wr32(reg, (_test[pat] & write));
1148		val = rd32(reg) & mask;
1149		if (val != (_test[pat] & write & mask)) {
1150			dev_err(&adapter->pdev->dev,
1151				"pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
1152				reg, val, (_test[pat] & write & mask));
1153			*data = reg;
1154			return 1;
1155		}
1156	}
1157
1158	return 0;
1159}
1160
1161static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
1162			      int reg, u32 mask, u32 write)
1163{
1164	struct e1000_hw *hw = &adapter->hw;
1165	u32 val;
1166	wr32(reg, write & mask);
1167	val = rd32(reg);
1168	if ((write & mask) != (val & mask)) {
1169		dev_err(&adapter->pdev->dev,
1170			"set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", reg,
1171			(val & mask), (write & mask));
1172		*data = reg;
1173		return 1;
1174	}
1175
1176	return 0;
1177}
1178
1179#define REG_PATTERN_TEST(reg, mask, write) \
1180	do { \
1181		if (reg_pattern_test(adapter, data, reg, mask, write)) \
1182			return 1; \
1183	} while (0)
1184
1185#define REG_SET_AND_CHECK(reg, mask, write) \
1186	do { \
1187		if (reg_set_and_check(adapter, data, reg, mask, write)) \
1188			return 1; \
1189	} while (0)
1190
1191static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
1192{
1193	struct e1000_hw *hw = &adapter->hw;
1194	struct igb_reg_test *test;
1195	u32 value, before, after;
1196	u32 i, toggle;
1197
1198	switch (adapter->hw.mac.type) {
1199	case e1000_i350:
1200		test = reg_test_i350;
1201		toggle = 0x7FEFF3FF;
1202		break;
1203	case e1000_i210:
1204	case e1000_i211:
1205		test = reg_test_i210;
1206		toggle = 0x7FEFF3FF;
1207		break;
1208	case e1000_82580:
1209		test = reg_test_82580;
1210		toggle = 0x7FEFF3FF;
1211		break;
1212	case e1000_82576:
1213		test = reg_test_82576;
1214		toggle = 0x7FFFF3FF;
1215		break;
1216	default:
1217		test = reg_test_82575;
1218		toggle = 0x7FFFF3FF;
1219		break;
1220	}
1221
1222	/* Because the status register is such a special case,
1223	 * we handle it separately from the rest of the register
1224	 * tests.  Some bits are read-only, some toggle, and some
1225	 * are writable on newer MACs.
1226	 */
1227	before = rd32(E1000_STATUS);
1228	value = (rd32(E1000_STATUS) & toggle);
1229	wr32(E1000_STATUS, toggle);
1230	after = rd32(E1000_STATUS) & toggle;
1231	if (value != after) {
1232		dev_err(&adapter->pdev->dev,
1233			"failed STATUS register test got: 0x%08X expected: 0x%08X\n",
1234			after, value);
1235		*data = 1;
1236		return 1;
1237	}
1238	/* restore previous status */
1239	wr32(E1000_STATUS, before);
1240
1241	/* Perform the remainder of the register test, looping through
1242	 * the test table until we either fail or reach the null entry.
1243	 */
1244	while (test->reg) {
1245		for (i = 0; i < test->array_len; i++) {
1246			switch (test->test_type) {
1247			case PATTERN_TEST:
1248				REG_PATTERN_TEST(test->reg +
1249						(i * test->reg_offset),
1250						test->mask,
1251						test->write);
1252				break;
1253			case SET_READ_TEST:
1254				REG_SET_AND_CHECK(test->reg +
1255						(i * test->reg_offset),
1256						test->mask,
1257						test->write);
1258				break;
1259			case WRITE_NO_TEST:
1260				writel(test->write,
1261				    (adapter->hw.hw_addr + test->reg)
1262					+ (i * test->reg_offset));
1263				break;
1264			case TABLE32_TEST:
1265				REG_PATTERN_TEST(test->reg + (i * 4),
1266						test->mask,
1267						test->write);
1268				break;
1269			case TABLE64_TEST_LO:
1270				REG_PATTERN_TEST(test->reg + (i * 8),
1271						test->mask,
1272						test->write);
1273				break;
1274			case TABLE64_TEST_HI:
1275				REG_PATTERN_TEST((test->reg + 4) + (i * 8),
1276						test->mask,
1277						test->write);
1278				break;
1279			}
1280		}
1281		test++;
1282	}
1283
1284	*data = 0;
1285	return 0;
1286}
1287
1288static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
1289{
1290	*data = 0;
1291
1292	/* Validate eeprom on all parts but i211 */
1293	if (adapter->hw.mac.type != e1000_i211) {
1294		if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0)
1295			*data = 2;
1296	}
1297
1298	return *data;
1299}
1300
1301static irqreturn_t igb_test_intr(int irq, void *data)
1302{
1303	struct igb_adapter *adapter = (struct igb_adapter *) data;
1304	struct e1000_hw *hw = &adapter->hw;
1305
1306	adapter->test_icr |= rd32(E1000_ICR);
1307
1308	return IRQ_HANDLED;
1309}
1310
1311static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1312{
1313	struct e1000_hw *hw = &adapter->hw;
1314	struct net_device *netdev = adapter->netdev;
1315	u32 mask, ics_mask, i = 0, shared_int = true;
1316	u32 irq = adapter->pdev->irq;
1317
1318	*data = 0;
1319
1320	/* Hook up test interrupt handler just for this test */
1321	if (adapter->msix_entries) {
1322		if (request_irq(adapter->msix_entries[0].vector,
1323		                igb_test_intr, 0, netdev->name, adapter)) {
1324			*data = 1;
1325			return -1;
1326		}
1327	} else if (adapter->flags & IGB_FLAG_HAS_MSI) {
1328		shared_int = false;
1329		if (request_irq(irq,
1330		                igb_test_intr, 0, netdev->name, adapter)) {
1331			*data = 1;
1332			return -1;
1333		}
1334	} else if (!request_irq(irq, igb_test_intr, IRQF_PROBE_SHARED,
1335				netdev->name, adapter)) {
1336		shared_int = false;
1337	} else if (request_irq(irq, igb_test_intr, IRQF_SHARED,
1338		 netdev->name, adapter)) {
1339		*data = 1;
1340		return -1;
1341	}
1342	dev_info(&adapter->pdev->dev, "testing %s interrupt\n",
1343		(shared_int ? "shared" : "unshared"));
1344
1345	/* Disable all the interrupts */
1346	wr32(E1000_IMC, ~0);
1347	wrfl();
1348	msleep(10);
1349
1350	/* Define all writable bits for ICS */
1351	switch (hw->mac.type) {
1352	case e1000_82575:
1353		ics_mask = 0x37F47EDD;
1354		break;
1355	case e1000_82576:
1356		ics_mask = 0x77D4FBFD;
1357		break;
1358	case e1000_82580:
1359		ics_mask = 0x77DCFED5;
1360		break;
1361	case e1000_i350:
1362	case e1000_i210:
1363	case e1000_i211:
1364		ics_mask = 0x77DCFED5;
1365		break;
1366	default:
1367		ics_mask = 0x7FFFFFFF;
1368		break;
1369	}
1370
1371	/* Test each interrupt */
1372	for (; i < 31; i++) {
1373		/* Interrupt to test */
1374		mask = 1 << i;
1375
1376		if (!(mask & ics_mask))
1377			continue;
1378
1379		if (!shared_int) {
1380			/* Disable the interrupt to be reported in
1381			 * the cause register and then force the same
1382			 * interrupt and see if one gets posted.  If
1383			 * an interrupt was posted to the bus, the
1384			 * test failed.
1385			 */
1386			adapter->test_icr = 0;
1387
1388			/* Flush any pending interrupts */
1389			wr32(E1000_ICR, ~0);
1390
1391			wr32(E1000_IMC, mask);
1392			wr32(E1000_ICS, mask);
1393			wrfl();
1394			msleep(10);
1395
1396			if (adapter->test_icr & mask) {
1397				*data = 3;
1398				break;
1399			}
1400		}
1401
1402		/* Enable the interrupt to be reported in
1403		 * the cause register and then force the same
1404		 * interrupt and see if one gets posted.  If
1405		 * an interrupt was not posted to the bus, the
1406		 * test failed.
1407		 */
1408		adapter->test_icr = 0;
1409
1410		/* Flush any pending interrupts */
1411		wr32(E1000_ICR, ~0);
1412
1413		wr32(E1000_IMS, mask);
1414		wr32(E1000_ICS, mask);
1415		wrfl();
1416		msleep(10);
1417
1418		if (!(adapter->test_icr & mask)) {
1419			*data = 4;
1420			break;
1421		}
1422
1423		if (!shared_int) {
1424			/* Disable the other interrupts to be reported in
1425			 * the cause register and then force the other
1426			 * interrupts and see if any get posted.  If
1427			 * an interrupt was posted to the bus, the
1428			 * test failed.
1429			 */
1430			adapter->test_icr = 0;
1431
1432			/* Flush any pending interrupts */
1433			wr32(E1000_ICR, ~0);
1434
1435			wr32(E1000_IMC, ~mask);
1436			wr32(E1000_ICS, ~mask);
1437			wrfl();
1438			msleep(10);
1439
1440			if (adapter->test_icr & mask) {
1441				*data = 5;
1442				break;
1443			}
1444		}
1445	}
1446
1447	/* Disable all the interrupts */
1448	wr32(E1000_IMC, ~0);
1449	wrfl();
1450	msleep(10);
1451
1452	/* Unhook test interrupt handler */
1453	if (adapter->msix_entries)
1454		free_irq(adapter->msix_entries[0].vector, adapter);
1455	else
1456		free_irq(irq, adapter);
1457
1458	return *data;
1459}
1460
1461static void igb_free_desc_rings(struct igb_adapter *adapter)
1462{
1463	igb_free_tx_resources(&adapter->test_tx_ring);
1464	igb_free_rx_resources(&adapter->test_rx_ring);
1465}
1466
1467static int igb_setup_desc_rings(struct igb_adapter *adapter)
1468{
1469	struct igb_ring *tx_ring = &adapter->test_tx_ring;
1470	struct igb_ring *rx_ring = &adapter->test_rx_ring;
1471	struct e1000_hw *hw = &adapter->hw;
1472	int ret_val;
1473
1474	/* Setup Tx descriptor ring and Tx buffers */
1475	tx_ring->count = IGB_DEFAULT_TXD;
1476	tx_ring->dev = &adapter->pdev->dev;
1477	tx_ring->netdev = adapter->netdev;
1478	tx_ring->reg_idx = adapter->vfs_allocated_count;
1479
1480	if (igb_setup_tx_resources(tx_ring)) {
1481		ret_val = 1;
1482		goto err_nomem;
1483	}
1484
1485	igb_setup_tctl(adapter);
1486	igb_configure_tx_ring(adapter, tx_ring);
1487
1488	/* Setup Rx descriptor ring and Rx buffers */
1489	rx_ring->count = IGB_DEFAULT_RXD;
1490	rx_ring->dev = &adapter->pdev->dev;
1491	rx_ring->netdev = adapter->netdev;
1492	rx_ring->reg_idx = adapter->vfs_allocated_count;
1493
1494	if (igb_setup_rx_resources(rx_ring)) {
1495		ret_val = 3;
1496		goto err_nomem;
1497	}
1498
1499	/* set the default queue to queue 0 of PF */
1500	wr32(E1000_MRQC, adapter->vfs_allocated_count << 3);
1501
1502	/* enable receive ring */
1503	igb_setup_rctl(adapter);
1504	igb_configure_rx_ring(adapter, rx_ring);
1505
1506	igb_alloc_rx_buffers(rx_ring, igb_desc_unused(rx_ring));
1507
1508	return 0;
1509
1510err_nomem:
1511	igb_free_desc_rings(adapter);
1512	return ret_val;
1513}
1514
1515static void igb_phy_disable_receiver(struct igb_adapter *adapter)
1516{
1517	struct e1000_hw *hw = &adapter->hw;
1518
1519	/* Write out to PHY registers 29 and 30 to disable the Receiver. */
1520	igb_write_phy_reg(hw, 29, 0x001F);
1521	igb_write_phy_reg(hw, 30, 0x8FFC);
1522	igb_write_phy_reg(hw, 29, 0x001A);
1523	igb_write_phy_reg(hw, 30, 0x8FF0);
1524}
1525
1526static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
1527{
1528	struct e1000_hw *hw = &adapter->hw;
1529	u32 ctrl_reg = 0;
1530
1531	hw->mac.autoneg = false;
1532
1533	if (hw->phy.type == e1000_phy_m88) {
1534		if (hw->phy.id != I210_I_PHY_ID) {
1535			/* Auto-MDI/MDIX Off */
1536			igb_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
1537			/* reset to update Auto-MDI/MDIX */
1538			igb_write_phy_reg(hw, PHY_CONTROL, 0x9140);
1539			/* autoneg off */
1540			igb_write_phy_reg(hw, PHY_CONTROL, 0x8140);
1541		} else {
1542			/* force 1000, set loopback  */
1543			igb_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0);
1544			igb_write_phy_reg(hw, PHY_CONTROL, 0x4140);
1545		}
1546	}
1547
1548	/* add small delay to avoid loopback test failure */
1549	msleep(50);
1550
1551	/* force 1000, set loopback */
1552	igb_write_phy_reg(hw, PHY_CONTROL, 0x4140);
1553
1554	/* Now set up the MAC to the same speed/duplex as the PHY. */
1555	ctrl_reg = rd32(E1000_CTRL);
1556	ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
1557	ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
1558		     E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
1559		     E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
1560		     E1000_CTRL_FD |	 /* Force Duplex to FULL */
1561		     E1000_CTRL_SLU);	 /* Set link up enable bit */
1562
1563	if (hw->phy.type == e1000_phy_m88)
1564		ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
1565
1566	wr32(E1000_CTRL, ctrl_reg);
1567
1568	/* Disable the receiver on the PHY so when a cable is plugged in, the
1569	 * PHY does not begin to autoneg when a cable is reconnected to the NIC.
1570	 */
1571	if (hw->phy.type == e1000_phy_m88)
1572		igb_phy_disable_receiver(adapter);
1573
1574	mdelay(500);
1575	return 0;
1576}
1577
1578static int igb_set_phy_loopback(struct igb_adapter *adapter)
1579{
1580	return igb_integrated_phy_loopback(adapter);
1581}
1582
1583static int igb_setup_loopback_test(struct igb_adapter *adapter)
1584{
1585	struct e1000_hw *hw = &adapter->hw;
1586	u32 reg;
1587
1588	reg = rd32(E1000_CTRL_EXT);
1589
1590	/* use CTRL_EXT to identify link type as SGMII can appear as copper */
1591	if (reg & E1000_CTRL_EXT_LINK_MODE_MASK) {
1592		if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
1593		(hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
1594		(hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
1595		(hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) {
1596
1597			/* Enable DH89xxCC MPHY for near end loopback */
1598			reg = rd32(E1000_MPHY_ADDR_CTL);
1599			reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) |
1600			E1000_MPHY_PCS_CLK_REG_OFFSET;
1601			wr32(E1000_MPHY_ADDR_CTL, reg);
1602
1603			reg = rd32(E1000_MPHY_DATA);
1604			reg |= E1000_MPHY_PCS_CLK_REG_DIGINELBEN;
1605			wr32(E1000_MPHY_DATA, reg);
1606		}
1607
1608		reg = rd32(E1000_RCTL);
1609		reg |= E1000_RCTL_LBM_TCVR;
1610		wr32(E1000_RCTL, reg);
1611
1612		wr32(E1000_SCTL, E1000_ENABLE_SERDES_LOOPBACK);
1613
1614		reg = rd32(E1000_CTRL);
1615		reg &= ~(E1000_CTRL_RFCE |
1616			 E1000_CTRL_TFCE |
1617			 E1000_CTRL_LRST);
1618		reg |= E1000_CTRL_SLU |
1619		       E1000_CTRL_FD;
1620		wr32(E1000_CTRL, reg);
1621
1622		/* Unset switch control to serdes energy detect */
1623		reg = rd32(E1000_CONNSW);
1624		reg &= ~E1000_CONNSW_ENRGSRC;
1625		wr32(E1000_CONNSW, reg);
1626
1627		/* Set PCS register for forced speed */
1628		reg = rd32(E1000_PCS_LCTL);
1629		reg &= ~E1000_PCS_LCTL_AN_ENABLE;     /* Disable Autoneg*/
1630		reg |= E1000_PCS_LCTL_FLV_LINK_UP |   /* Force link up */
1631		       E1000_PCS_LCTL_FSV_1000 |      /* Force 1000    */
1632		       E1000_PCS_LCTL_FDV_FULL |      /* SerDes Full duplex */
1633		       E1000_PCS_LCTL_FSD |           /* Force Speed */
1634		       E1000_PCS_LCTL_FORCE_LINK;     /* Force Link */
1635		wr32(E1000_PCS_LCTL, reg);
1636
1637		return 0;
1638	}
1639
1640	return igb_set_phy_loopback(adapter);
1641}
1642
1643static void igb_loopback_cleanup(struct igb_adapter *adapter)
1644{
1645	struct e1000_hw *hw = &adapter->hw;
1646	u32 rctl;
1647	u16 phy_reg;
1648
1649	if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
1650	(hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
1651	(hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
1652	(hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) {
1653		u32 reg;
1654
1655		/* Disable near end loopback on DH89xxCC */
1656		reg = rd32(E1000_MPHY_ADDR_CTL);
1657		reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) |
1658		E1000_MPHY_PCS_CLK_REG_OFFSET;
1659		wr32(E1000_MPHY_ADDR_CTL, reg);
1660
1661		reg = rd32(E1000_MPHY_DATA);
1662		reg &= ~E1000_MPHY_PCS_CLK_REG_DIGINELBEN;
1663		wr32(E1000_MPHY_DATA, reg);
1664	}
1665
1666	rctl = rd32(E1000_RCTL);
1667	rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
1668	wr32(E1000_RCTL, rctl);
1669
1670	hw->mac.autoneg = true;
1671	igb_read_phy_reg(hw, PHY_CONTROL, &phy_reg);
1672	if (phy_reg & MII_CR_LOOPBACK) {
1673		phy_reg &= ~MII_CR_LOOPBACK;
1674		igb_write_phy_reg(hw, PHY_CONTROL, phy_reg);
1675		igb_phy_sw_reset(hw);
1676	}
1677}
1678
1679static void igb_create_lbtest_frame(struct sk_buff *skb,
1680				    unsigned int frame_size)
1681{
1682	memset(skb->data, 0xFF, frame_size);
1683	frame_size /= 2;
1684	memset(&skb->data[frame_size], 0xAA, frame_size - 1);
1685	memset(&skb->data[frame_size + 10], 0xBE, 1);
1686	memset(&skb->data[frame_size + 12], 0xAF, 1);
1687}
1688
1689static int igb_check_lbtest_frame(struct igb_rx_buffer *rx_buffer,
1690				  unsigned int frame_size)
1691{
1692	unsigned char *data;
1693	bool match = true;
1694
1695	frame_size >>= 1;
1696
1697	data = kmap(rx_buffer->page);
1698
1699	if (data[3] != 0xFF ||
1700	    data[frame_size + 10] != 0xBE ||
1701	    data[frame_size + 12] != 0xAF)
1702		match = false;
1703
1704	kunmap(rx_buffer->page);
1705
1706	return match;
1707}
1708
1709static int igb_clean_test_rings(struct igb_ring *rx_ring,
1710                                struct igb_ring *tx_ring,
1711                                unsigned int size)
1712{
1713	union e1000_adv_rx_desc *rx_desc;
1714	struct igb_rx_buffer *rx_buffer_info;
1715	struct igb_tx_buffer *tx_buffer_info;
1716	u16 rx_ntc, tx_ntc, count = 0;
1717
1718	/* initialize next to clean and descriptor values */
1719	rx_ntc = rx_ring->next_to_clean;
1720	tx_ntc = tx_ring->next_to_clean;
1721	rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
1722
1723	while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) {
1724		/* check rx buffer */
1725		rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
1726
1727		/* sync Rx buffer for CPU read */
1728		dma_sync_single_for_cpu(rx_ring->dev,
1729					rx_buffer_info->dma,
1730					IGB_RX_BUFSZ,
1731					DMA_FROM_DEVICE);
1732
1733		/* verify contents of skb */
1734		if (igb_check_lbtest_frame(rx_buffer_info, size))
1735			count++;
1736
1737		/* sync Rx buffer for device write */
1738		dma_sync_single_for_device(rx_ring->dev,
1739					   rx_buffer_info->dma,
1740					   IGB_RX_BUFSZ,
1741					   DMA_FROM_DEVICE);
1742
1743		/* unmap buffer on tx side */
1744		tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
1745		igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
1746
1747		/* increment rx/tx next to clean counters */
1748		rx_ntc++;
1749		if (rx_ntc == rx_ring->count)
1750			rx_ntc = 0;
1751		tx_ntc++;
1752		if (tx_ntc == tx_ring->count)
1753			tx_ntc = 0;
1754
1755		/* fetch next descriptor */
1756		rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
1757	}
1758
1759	netdev_tx_reset_queue(txring_txq(tx_ring));
1760
1761	/* re-map buffers to ring, store next to clean values */
1762	igb_alloc_rx_buffers(rx_ring, count);
1763	rx_ring->next_to_clean = rx_ntc;
1764	tx_ring->next_to_clean = tx_ntc;
1765
1766	return count;
1767}
1768
1769static int igb_run_loopback_test(struct igb_adapter *adapter)
1770{
1771	struct igb_ring *tx_ring = &adapter->test_tx_ring;
1772	struct igb_ring *rx_ring = &adapter->test_rx_ring;
1773	u16 i, j, lc, good_cnt;
1774	int ret_val = 0;
1775	unsigned int size = IGB_RX_HDR_LEN;
1776	netdev_tx_t tx_ret_val;
1777	struct sk_buff *skb;
1778
1779	/* allocate test skb */
1780	skb = alloc_skb(size, GFP_KERNEL);
1781	if (!skb)
1782		return 11;
1783
1784	/* place data into test skb */
1785	igb_create_lbtest_frame(skb, size);
1786	skb_put(skb, size);
1787
1788	/*
1789	 * Calculate the loop count based on the largest descriptor ring
1790	 * The idea is to wrap the largest ring a number of times using 64
1791	 * send/receive pairs during each loop
1792	 */
1793
1794	if (rx_ring->count <= tx_ring->count)
1795		lc = ((tx_ring->count / 64) * 2) + 1;
1796	else
1797		lc = ((rx_ring->count / 64) * 2) + 1;
1798
1799	for (j = 0; j <= lc; j++) { /* loop count loop */
1800		/* reset count of good packets */
1801		good_cnt = 0;
1802
1803		/* place 64 packets on the transmit queue*/
1804		for (i = 0; i < 64; i++) {
1805			skb_get(skb);
1806			tx_ret_val = igb_xmit_frame_ring(skb, tx_ring);
1807			if (tx_ret_val == NETDEV_TX_OK)
1808				good_cnt++;
1809		}
1810
1811		if (good_cnt != 64) {
1812			ret_val = 12;
1813			break;
1814		}
1815
1816		/* allow 200 milliseconds for packets to go from tx to rx */
1817		msleep(200);
1818
1819		good_cnt = igb_clean_test_rings(rx_ring, tx_ring, size);
1820		if (good_cnt != 64) {
1821			ret_val = 13;
1822			break;
1823		}
1824	} /* end loop count loop */
1825
1826	/* free the original skb */
1827	kfree_skb(skb);
1828
1829	return ret_val;
1830}
1831
1832static int igb_loopback_test(struct igb_adapter *adapter, u64 *data)
1833{
1834	/* PHY loopback cannot be performed if SoL/IDER
1835	 * sessions are active */
1836	if (igb_check_reset_block(&adapter->hw)) {
1837		dev_err(&adapter->pdev->dev,
1838			"Cannot do PHY loopback test when SoL/IDER is active.\n");
1839		*data = 0;
1840		goto out;
1841	}
1842	*data = igb_setup_desc_rings(adapter);
1843	if (*data)
1844		goto out;
1845	*data = igb_setup_loopback_test(adapter);
1846	if (*data)
1847		goto err_loopback;
1848	*data = igb_run_loopback_test(adapter);
1849	igb_loopback_cleanup(adapter);
1850
1851err_loopback:
1852	igb_free_desc_rings(adapter);
1853out:
1854	return *data;
1855}
1856
1857static int igb_link_test(struct igb_adapter *adapter, u64 *data)
1858{
1859	struct e1000_hw *hw = &adapter->hw;
1860	*data = 0;
1861	if (hw->phy.media_type == e1000_media_type_internal_serdes) {
1862		int i = 0;
1863		hw->mac.serdes_has_link = false;
1864
1865		/* On some blade server designs, link establishment
1866		 * could take as long as 2-3 minutes */
1867		do {
1868			hw->mac.ops.check_for_link(&adapter->hw);
1869			if (hw->mac.serdes_has_link)
1870				return *data;
1871			msleep(20);
1872		} while (i++ < 3750);
1873
1874		*data = 1;
1875	} else {
1876		hw->mac.ops.check_for_link(&adapter->hw);
1877		if (hw->mac.autoneg)
1878			msleep(4000);
1879
1880		if (!(rd32(E1000_STATUS) & E1000_STATUS_LU))
1881			*data = 1;
1882	}
1883	return *data;
1884}
1885
1886static void igb_diag_test(struct net_device *netdev,
1887			  struct ethtool_test *eth_test, u64 *data)
1888{
1889	struct igb_adapter *adapter = netdev_priv(netdev);
1890	u16 autoneg_advertised;
1891	u8 forced_speed_duplex, autoneg;
1892	bool if_running = netif_running(netdev);
1893
1894	set_bit(__IGB_TESTING, &adapter->state);
1895	if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
1896		/* Offline tests */
1897
1898		/* save speed, duplex, autoneg settings */
1899		autoneg_advertised = adapter->hw.phy.autoneg_advertised;
1900		forced_speed_duplex = adapter->hw.mac.forced_speed_duplex;
1901		autoneg = adapter->hw.mac.autoneg;
1902
1903		dev_info(&adapter->pdev->dev, "offline testing starting\n");
1904
1905		/* power up link for link test */
1906		igb_power_up_link(adapter);
1907
1908		/* Link test performed before hardware reset so autoneg doesn't
1909		 * interfere with test result */
1910		if (igb_link_test(adapter, &data[4]))
1911			eth_test->flags |= ETH_TEST_FL_FAILED;
1912
1913		if (if_running)
1914			/* indicate we're in test mode */
1915			dev_close(netdev);
1916		else
1917			igb_reset(adapter);
1918
1919		if (igb_reg_test(adapter, &data[0]))
1920			eth_test->flags |= ETH_TEST_FL_FAILED;
1921
1922		igb_reset(adapter);
1923		if (igb_eeprom_test(adapter, &data[1]))
1924			eth_test->flags |= ETH_TEST_FL_FAILED;
1925
1926		igb_reset(adapter);
1927		if (igb_intr_test(adapter, &data[2]))
1928			eth_test->flags |= ETH_TEST_FL_FAILED;
1929
1930		igb_reset(adapter);
1931		/* power up link for loopback test */
1932		igb_power_up_link(adapter);
1933		if (igb_loopback_test(adapter, &data[3]))
1934			eth_test->flags |= ETH_TEST_FL_FAILED;
1935
1936		/* restore speed, duplex, autoneg settings */
1937		adapter->hw.phy.autoneg_advertised = autoneg_advertised;
1938		adapter->hw.mac.forced_speed_duplex = forced_speed_duplex;
1939		adapter->hw.mac.autoneg = autoneg;
1940
1941		/* force this routine to wait until autoneg complete/timeout */
1942		adapter->hw.phy.autoneg_wait_to_complete = true;
1943		igb_reset(adapter);
1944		adapter->hw.phy.autoneg_wait_to_complete = false;
1945
1946		clear_bit(__IGB_TESTING, &adapter->state);
1947		if (if_running)
1948			dev_open(netdev);
1949	} else {
1950		dev_info(&adapter->pdev->dev, "online testing starting\n");
1951
1952		/* PHY is powered down when interface is down */
1953		if (if_running && igb_link_test(adapter, &data[4]))
1954			eth_test->flags |= ETH_TEST_FL_FAILED;
1955		else
1956			data[4] = 0;
1957
1958		/* Online tests aren't run; pass by default */
1959		data[0] = 0;
1960		data[1] = 0;
1961		data[2] = 0;
1962		data[3] = 0;
1963
1964		clear_bit(__IGB_TESTING, &adapter->state);
1965	}
1966	msleep_interruptible(4 * 1000);
1967}
1968
1969static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1970{
1971	struct igb_adapter *adapter = netdev_priv(netdev);
1972
1973	wol->supported = WAKE_UCAST | WAKE_MCAST |
1974	                 WAKE_BCAST | WAKE_MAGIC |
1975	                 WAKE_PHY;
1976	wol->wolopts = 0;
1977
1978	if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED))
1979		return;
1980
1981	/* apply any specific unsupported masks here */
1982	switch (adapter->hw.device_id) {
1983	default:
1984		break;
1985	}
1986
1987	if (adapter->wol & E1000_WUFC_EX)
1988		wol->wolopts |= WAKE_UCAST;
1989	if (adapter->wol & E1000_WUFC_MC)
1990		wol->wolopts |= WAKE_MCAST;
1991	if (adapter->wol & E1000_WUFC_BC)
1992		wol->wolopts |= WAKE_BCAST;
1993	if (adapter->wol & E1000_WUFC_MAG)
1994		wol->wolopts |= WAKE_MAGIC;
1995	if (adapter->wol & E1000_WUFC_LNKC)
1996		wol->wolopts |= WAKE_PHY;
1997}
1998
1999static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2000{
2001	struct igb_adapter *adapter = netdev_priv(netdev);
2002
2003	if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))
2004		return -EOPNOTSUPP;
2005
2006	if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED))
2007		return wol->wolopts ? -EOPNOTSUPP : 0;
2008
2009	/* these settings will always override what we currently have */
2010	adapter->wol = 0;
2011
2012	if (wol->wolopts & WAKE_UCAST)
2013		adapter->wol |= E1000_WUFC_EX;
2014	if (wol->wolopts & WAKE_MCAST)
2015		adapter->wol |= E1000_WUFC_MC;
2016	if (wol->wolopts & WAKE_BCAST)
2017		adapter->wol |= E1000_WUFC_BC;
2018	if (wol->wolopts & WAKE_MAGIC)
2019		adapter->wol |= E1000_WUFC_MAG;
2020	if (wol->wolopts & WAKE_PHY)
2021		adapter->wol |= E1000_WUFC_LNKC;
2022	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
2023
2024	return 0;
2025}
2026
2027/* bit defines for adapter->led_status */
2028#define IGB_LED_ON		0
2029
2030static int igb_set_phys_id(struct net_device *netdev,
2031			   enum ethtool_phys_id_state state)
2032{
2033	struct igb_adapter *adapter = netdev_priv(netdev);
2034	struct e1000_hw *hw = &adapter->hw;
2035
2036	switch (state) {
2037	case ETHTOOL_ID_ACTIVE:
2038		igb_blink_led(hw);
2039		return 2;
2040	case ETHTOOL_ID_ON:
2041		igb_blink_led(hw);
2042		break;
2043	case ETHTOOL_ID_OFF:
2044		igb_led_off(hw);
2045		break;
2046	case ETHTOOL_ID_INACTIVE:
2047		igb_led_off(hw);
2048		clear_bit(IGB_LED_ON, &adapter->led_status);
2049		igb_cleanup_led(hw);
2050		break;
2051	}
2052
2053	return 0;
2054}
2055
2056static int igb_set_coalesce(struct net_device *netdev,
2057			    struct ethtool_coalesce *ec)
2058{
2059	struct igb_adapter *adapter = netdev_priv(netdev);
2060	int i;
2061
2062	if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
2063	    ((ec->rx_coalesce_usecs > 3) &&
2064	     (ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
2065	    (ec->rx_coalesce_usecs == 2))
2066		return -EINVAL;
2067
2068	if ((ec->tx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
2069	    ((ec->tx_coalesce_usecs > 3) &&
2070	     (ec->tx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
2071	    (ec->tx_coalesce_usecs == 2))
2072		return -EINVAL;
2073
2074	if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs)
2075		return -EINVAL;
2076
2077	/* If ITR is disabled, disable DMAC */
2078	if (ec->rx_coalesce_usecs == 0) {
2079		if (adapter->flags & IGB_FLAG_DMAC)
2080			adapter->flags &= ~IGB_FLAG_DMAC;
2081	}
2082
2083	/* convert to rate of irq's per second */
2084	if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3)
2085		adapter->rx_itr_setting = ec->rx_coalesce_usecs;
2086	else
2087		adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
2088
2089	/* convert to rate of irq's per second */
2090	if (adapter->flags & IGB_FLAG_QUEUE_PAIRS)
2091		adapter->tx_itr_setting = adapter->rx_itr_setting;
2092	else if (ec->tx_coalesce_usecs && ec->tx_coalesce_usecs <= 3)
2093		adapter->tx_itr_setting = ec->tx_coalesce_usecs;
2094	else
2095		adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
2096
2097	for (i = 0; i < adapter->num_q_vectors; i++) {
2098		struct igb_q_vector *q_vector = adapter->q_vector[i];
2099		q_vector->tx.work_limit = adapter->tx_work_limit;
2100		if (q_vector->rx.ring)
2101			q_vector->itr_val = adapter->rx_itr_setting;
2102		else
2103			q_vector->itr_val = adapter->tx_itr_setting;
2104		if (q_vector->itr_val && q_vector->itr_val <= 3)
2105			q_vector->itr_val = IGB_START_ITR;
2106		q_vector->set_itr = 1;
2107	}
2108
2109	return 0;
2110}
2111
2112static int igb_get_coalesce(struct net_device *netdev,
2113			    struct ethtool_coalesce *ec)
2114{
2115	struct igb_adapter *adapter = netdev_priv(netdev);
2116
2117	if (adapter->rx_itr_setting <= 3)
2118		ec->rx_coalesce_usecs = adapter->rx_itr_setting;
2119	else
2120		ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
2121
2122	if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) {
2123		if (adapter->tx_itr_setting <= 3)
2124			ec->tx_coalesce_usecs = adapter->tx_itr_setting;
2125		else
2126			ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
2127	}
2128
2129	return 0;
2130}
2131
2132static int igb_nway_reset(struct net_device *netdev)
2133{
2134	struct igb_adapter *adapter = netdev_priv(netdev);
2135	if (netif_running(netdev))
2136		igb_reinit_locked(adapter);
2137	return 0;
2138}
2139
2140static int igb_get_sset_count(struct net_device *netdev, int sset)
2141{
2142	switch (sset) {
2143	case ETH_SS_STATS:
2144		return IGB_STATS_LEN;
2145	case ETH_SS_TEST:
2146		return IGB_TEST_LEN;
2147	default:
2148		return -ENOTSUPP;
2149	}
2150}
2151
2152static void igb_get_ethtool_stats(struct net_device *netdev,
2153				  struct ethtool_stats *stats, u64 *data)
2154{
2155	struct igb_adapter *adapter = netdev_priv(netdev);
2156	struct rtnl_link_stats64 *net_stats = &adapter->stats64;
2157	unsigned int start;
2158	struct igb_ring *ring;
2159	int i, j;
2160	char *p;
2161
2162	spin_lock(&adapter->stats64_lock);
2163	igb_update_stats(adapter, net_stats);
2164
2165	for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
2166		p = (char *)adapter + igb_gstrings_stats[i].stat_offset;
2167		data[i] = (igb_gstrings_stats[i].sizeof_stat ==
2168			sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
2169	}
2170	for (j = 0; j < IGB_NETDEV_STATS_LEN; j++, i++) {
2171		p = (char *)net_stats + igb_gstrings_net_stats[j].stat_offset;
2172		data[i] = (igb_gstrings_net_stats[j].sizeof_stat ==
2173			sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
2174	}
2175	for (j = 0; j < adapter->num_tx_queues; j++) {
2176		u64	restart2;
2177
2178		ring = adapter->tx_ring[j];
2179		do {
2180			start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
2181			data[i]   = ring->tx_stats.packets;
2182			data[i+1] = ring->tx_stats.bytes;
2183			data[i+2] = ring->tx_stats.restart_queue;
2184		} while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
2185		do {
2186			start = u64_stats_fetch_begin_bh(&ring->tx_syncp2);
2187			restart2  = ring->tx_stats.restart_queue2;
2188		} while (u64_stats_fetch_retry_bh(&ring->tx_syncp2, start));
2189		data[i+2] += restart2;
2190
2191		i += IGB_TX_QUEUE_STATS_LEN;
2192	}
2193	for (j = 0; j < adapter->num_rx_queues; j++) {
2194		ring = adapter->rx_ring[j];
2195		do {
2196			start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
2197			data[i]   = ring->rx_stats.packets;
2198			data[i+1] = ring->rx_stats.bytes;
2199			data[i+2] = ring->rx_stats.drops;
2200			data[i+3] = ring->rx_stats.csum_err;
2201			data[i+4] = ring->rx_stats.alloc_failed;
2202		} while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
2203		i += IGB_RX_QUEUE_STATS_LEN;
2204	}
2205	spin_unlock(&adapter->stats64_lock);
2206}
2207
2208static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2209{
2210	struct igb_adapter *adapter = netdev_priv(netdev);
2211	u8 *p = data;
2212	int i;
2213
2214	switch (stringset) {
2215	case ETH_SS_TEST:
2216		memcpy(data, *igb_gstrings_test,
2217			IGB_TEST_LEN*ETH_GSTRING_LEN);
2218		break;
2219	case ETH_SS_STATS:
2220		for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
2221			memcpy(p, igb_gstrings_stats[i].stat_string,
2222			       ETH_GSTRING_LEN);
2223			p += ETH_GSTRING_LEN;
2224		}
2225		for (i = 0; i < IGB_NETDEV_STATS_LEN; i++) {
2226			memcpy(p, igb_gstrings_net_stats[i].stat_string,
2227			       ETH_GSTRING_LEN);
2228			p += ETH_GSTRING_LEN;
2229		}
2230		for (i = 0; i < adapter->num_tx_queues; i++) {
2231			sprintf(p, "tx_queue_%u_packets", i);
2232			p += ETH_GSTRING_LEN;
2233			sprintf(p, "tx_queue_%u_bytes", i);
2234			p += ETH_GSTRING_LEN;
2235			sprintf(p, "tx_queue_%u_restart", i);
2236			p += ETH_GSTRING_LEN;
2237		}
2238		for (i = 0; i < adapter->num_rx_queues; i++) {
2239			sprintf(p, "rx_queue_%u_packets", i);
2240			p += ETH_GSTRING_LEN;
2241			sprintf(p, "rx_queue_%u_bytes", i);
2242			p += ETH_GSTRING_LEN;
2243			sprintf(p, "rx_queue_%u_drops", i);
2244			p += ETH_GSTRING_LEN;
2245			sprintf(p, "rx_queue_%u_csum_err", i);
2246			p += ETH_GSTRING_LEN;
2247			sprintf(p, "rx_queue_%u_alloc_failed", i);
2248			p += ETH_GSTRING_LEN;
2249		}
2250/*		BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
2251		break;
2252	}
2253}
2254
2255static int igb_get_ts_info(struct net_device *dev,
2256			   struct ethtool_ts_info *info)
2257{
2258	struct igb_adapter *adapter = netdev_priv(dev);
2259
2260	switch (adapter->hw.mac.type) {
2261	case e1000_82576:
2262	case e1000_82580:
2263	case e1000_i350:
2264	case e1000_i210:
2265	case e1000_i211:
2266		info->so_timestamping =
2267			SOF_TIMESTAMPING_TX_HARDWARE |
2268			SOF_TIMESTAMPING_RX_HARDWARE |
2269			SOF_TIMESTAMPING_RAW_HARDWARE;
2270
2271		if (adapter->ptp_clock)
2272			info->phc_index = ptp_clock_index(adapter->ptp_clock);
2273		else
2274			info->phc_index = -1;
2275
2276		info->tx_types =
2277			(1 << HWTSTAMP_TX_OFF) |
2278			(1 << HWTSTAMP_TX_ON);
2279
2280		info->rx_filters = 1 << HWTSTAMP_FILTER_NONE;
2281
2282		/* 82576 does not support timestamping all packets. */
2283		if (adapter->hw.mac.type >= e1000_82580)
2284			info->rx_filters |= 1 << HWTSTAMP_FILTER_ALL;
2285		else
2286			info->rx_filters |=
2287				(1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
2288				(1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
2289				(1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
2290				(1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
2291				(1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
2292				(1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
2293				(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
2294
2295		return 0;
2296	default:
2297		return -EOPNOTSUPP;
2298	}
2299}
2300
2301static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
2302				 struct ethtool_rxnfc *cmd)
2303{
2304	cmd->data = 0;
2305
2306	/* Report default options for RSS on igb */
2307	switch (cmd->flow_type) {
2308	case TCP_V4_FLOW:
2309		cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2310	case UDP_V4_FLOW:
2311		if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
2312			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2313	case SCTP_V4_FLOW:
2314	case AH_ESP_V4_FLOW:
2315	case AH_V4_FLOW:
2316	case ESP_V4_FLOW:
2317	case IPV4_FLOW:
2318		cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2319		break;
2320	case TCP_V6_FLOW:
2321		cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2322	case UDP_V6_FLOW:
2323		if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
2324			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2325	case SCTP_V6_FLOW:
2326	case AH_ESP_V6_FLOW:
2327	case AH_V6_FLOW:
2328	case ESP_V6_FLOW:
2329	case IPV6_FLOW:
2330		cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2331		break;
2332	default:
2333		return -EINVAL;
2334	}
2335
2336	return 0;
2337}
2338
2339static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
2340			   u32 *rule_locs)
2341{
2342	struct igb_adapter *adapter = netdev_priv(dev);
2343	int ret = -EOPNOTSUPP;
2344
2345	switch (cmd->cmd) {
2346	case ETHTOOL_GRXRINGS:
2347		cmd->data = adapter->num_rx_queues;
2348		ret = 0;
2349		break;
2350	case ETHTOOL_GRXFH:
2351		ret = igb_get_rss_hash_opts(adapter, cmd);
2352		break;
2353	default:
2354		break;
2355	}
2356
2357	return ret;
2358}
2359
2360#define UDP_RSS_FLAGS (IGB_FLAG_RSS_FIELD_IPV4_UDP | \
2361		       IGB_FLAG_RSS_FIELD_IPV6_UDP)
2362static int igb_set_rss_hash_opt(struct igb_adapter *adapter,
2363				struct ethtool_rxnfc *nfc)
2364{
2365	u32 flags = adapter->flags;
2366
2367	/* RSS does not support anything other than hashing
2368	 * to queues on src and dst IPs and ports
2369	 */
2370	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
2371			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
2372		return -EINVAL;
2373
2374	switch (nfc->flow_type) {
2375	case TCP_V4_FLOW:
2376	case TCP_V6_FLOW:
2377		if (!(nfc->data & RXH_IP_SRC) ||
2378		    !(nfc->data & RXH_IP_DST) ||
2379		    !(nfc->data & RXH_L4_B_0_1) ||
2380		    !(nfc->data & RXH_L4_B_2_3))
2381			return -EINVAL;
2382		break;
2383	case UDP_V4_FLOW:
2384		if (!(nfc->data & RXH_IP_SRC) ||
2385		    !(nfc->data & RXH_IP_DST))
2386			return -EINVAL;
2387		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2388		case 0:
2389			flags &= ~IGB_FLAG_RSS_FIELD_IPV4_UDP;
2390			break;
2391		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2392			flags |= IGB_FLAG_RSS_FIELD_IPV4_UDP;
2393			break;
2394		default:
2395			return -EINVAL;
2396		}
2397		break;
2398	case UDP_V6_FLOW:
2399		if (!(nfc->data & RXH_IP_SRC) ||
2400		    !(nfc->data & RXH_IP_DST))
2401			return -EINVAL;
2402		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2403		case 0:
2404			flags &= ~IGB_FLAG_RSS_FIELD_IPV6_UDP;
2405			break;
2406		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2407			flags |= IGB_FLAG_RSS_FIELD_IPV6_UDP;
2408			break;
2409		default:
2410			return -EINVAL;
2411		}
2412		break;
2413	case AH_ESP_V4_FLOW:
2414	case AH_V4_FLOW:
2415	case ESP_V4_FLOW:
2416	case SCTP_V4_FLOW:
2417	case AH_ESP_V6_FLOW:
2418	case AH_V6_FLOW:
2419	case ESP_V6_FLOW:
2420	case SCTP_V6_FLOW:
2421		if (!(nfc->data & RXH_IP_SRC) ||
2422		    !(nfc->data & RXH_IP_DST) ||
2423		    (nfc->data & RXH_L4_B_0_1) ||
2424		    (nfc->data & RXH_L4_B_2_3))
2425			return -EINVAL;
2426		break;
2427	default:
2428		return -EINVAL;
2429	}
2430
2431	/* if we changed something we need to update flags */
2432	if (flags != adapter->flags) {
2433		struct e1000_hw *hw = &adapter->hw;
2434		u32 mrqc = rd32(E1000_MRQC);
2435
2436		if ((flags & UDP_RSS_FLAGS) &&
2437		    !(adapter->flags & UDP_RSS_FLAGS))
2438			dev_err(&adapter->pdev->dev,
2439				"enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
2440
2441		adapter->flags = flags;
2442
2443		/* Perform hash on these packet types */
2444		mrqc |= E1000_MRQC_RSS_FIELD_IPV4 |
2445			E1000_MRQC_RSS_FIELD_IPV4_TCP |
2446			E1000_MRQC_RSS_FIELD_IPV6 |
2447			E1000_MRQC_RSS_FIELD_IPV6_TCP;
2448
2449		mrqc &= ~(E1000_MRQC_RSS_FIELD_IPV4_UDP |
2450			  E1000_MRQC_RSS_FIELD_IPV6_UDP);
2451
2452		if (flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
2453			mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
2454
2455		if (flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
2456			mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
2457
2458		wr32(E1000_MRQC, mrqc);
2459	}
2460
2461	return 0;
2462}
2463
2464static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
2465{
2466	struct igb_adapter *adapter = netdev_priv(dev);
2467	int ret = -EOPNOTSUPP;
2468
2469	switch (cmd->cmd) {
2470	case ETHTOOL_SRXFH:
2471		ret = igb_set_rss_hash_opt(adapter, cmd);
2472		break;
2473	default:
2474		break;
2475	}
2476
2477	return ret;
2478}
2479
2480static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
2481{
2482	struct igb_adapter *adapter = netdev_priv(netdev);
2483	struct e1000_hw *hw = &adapter->hw;
2484	u32 ipcnfg, eeer;
2485
2486	if ((hw->mac.type < e1000_i350) ||
2487	    (hw->phy.media_type != e1000_media_type_copper))
2488		return -EOPNOTSUPP;
2489
2490	edata->supported = (SUPPORTED_1000baseT_Full |
2491			    SUPPORTED_100baseT_Full);
2492
2493	ipcnfg = rd32(E1000_IPCNFG);
2494	eeer = rd32(E1000_EEER);
2495
2496	/* EEE status on negotiated link */
2497	if (ipcnfg & E1000_IPCNFG_EEE_1G_AN)
2498		edata->advertised = ADVERTISED_1000baseT_Full;
2499
2500	if (ipcnfg & E1000_IPCNFG_EEE_100M_AN)
2501		edata->advertised |= ADVERTISED_100baseT_Full;
2502
2503	if (eeer & E1000_EEER_EEE_NEG)
2504		edata->eee_active = true;
2505
2506	edata->eee_enabled = !hw->dev_spec._82575.eee_disable;
2507
2508	if (eeer & E1000_EEER_TX_LPI_EN)
2509		edata->tx_lpi_enabled = true;
2510
2511	/* Report correct negotiated EEE status for devices that
2512	 * wrongly report EEE at half-duplex
2513	 */
2514	if (adapter->link_duplex == HALF_DUPLEX) {
2515		edata->eee_enabled = false;
2516		edata->eee_active = false;
2517		edata->tx_lpi_enabled = false;
2518		edata->advertised &= ~edata->advertised;
2519	}
2520
2521	return 0;
2522}
2523
2524static int igb_set_eee(struct net_device *netdev,
2525		       struct ethtool_eee *edata)
2526{
2527	struct igb_adapter *adapter = netdev_priv(netdev);
2528	struct e1000_hw *hw = &adapter->hw;
2529	struct ethtool_eee eee_curr;
2530	s32 ret_val;
2531
2532	if ((hw->mac.type < e1000_i350) ||
2533	    (hw->phy.media_type != e1000_media_type_copper))
2534		return -EOPNOTSUPP;
2535
2536	ret_val = igb_get_eee(netdev, &eee_curr);
2537	if (ret_val)
2538		return ret_val;
2539
2540	if (eee_curr.eee_enabled) {
2541		if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) {
2542			dev_err(&adapter->pdev->dev,
2543				"Setting EEE tx-lpi is not supported\n");
2544			return -EINVAL;
2545		}
2546
2547		/* Tx LPI timer is not implemented currently */
2548		if (edata->tx_lpi_timer) {
2549			dev_err(&adapter->pdev->dev,
2550				"Setting EEE Tx LPI timer is not supported\n");
2551			return -EINVAL;
2552		}
2553
2554		if (eee_curr.advertised != edata->advertised) {
2555			dev_err(&adapter->pdev->dev,
2556				"Setting EEE Advertisement is not supported\n");
2557			return -EINVAL;
2558		}
2559
2560	} else if (!edata->eee_enabled) {
2561		dev_err(&adapter->pdev->dev,
2562			"Setting EEE options are not supported with EEE disabled\n");
2563			return -EINVAL;
2564		}
2565
2566	if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) {
2567		hw->dev_spec._82575.eee_disable = !edata->eee_enabled;
2568		igb_set_eee_i350(hw);
2569
2570		/* reset link */
2571		if (!netif_running(netdev))
2572			igb_reset(adapter);
2573	}
2574
2575	return 0;
2576}
2577
2578static int igb_ethtool_begin(struct net_device *netdev)
2579{
2580	struct igb_adapter *adapter = netdev_priv(netdev);
2581	pm_runtime_get_sync(&adapter->pdev->dev);
2582	return 0;
2583}
2584
2585static void igb_ethtool_complete(struct net_device *netdev)
2586{
2587	struct igb_adapter *adapter = netdev_priv(netdev);
2588	pm_runtime_put(&adapter->pdev->dev);
2589}
2590
2591static const struct ethtool_ops igb_ethtool_ops = {
2592	.get_settings           = igb_get_settings,
2593	.set_settings           = igb_set_settings,
2594	.get_drvinfo            = igb_get_drvinfo,
2595	.get_regs_len           = igb_get_regs_len,
2596	.get_regs               = igb_get_regs,
2597	.get_wol                = igb_get_wol,
2598	.set_wol                = igb_set_wol,
2599	.get_msglevel           = igb_get_msglevel,
2600	.set_msglevel           = igb_set_msglevel,
2601	.nway_reset             = igb_nway_reset,
2602	.get_link               = igb_get_link,
2603	.get_eeprom_len         = igb_get_eeprom_len,
2604	.get_eeprom             = igb_get_eeprom,
2605	.set_eeprom             = igb_set_eeprom,
2606	.get_ringparam          = igb_get_ringparam,
2607	.set_ringparam          = igb_set_ringparam,
2608	.get_pauseparam         = igb_get_pauseparam,
2609	.set_pauseparam         = igb_set_pauseparam,
2610	.self_test              = igb_diag_test,
2611	.get_strings            = igb_get_strings,
2612	.set_phys_id            = igb_set_phys_id,
2613	.get_sset_count         = igb_get_sset_count,
2614	.get_ethtool_stats      = igb_get_ethtool_stats,
2615	.get_coalesce           = igb_get_coalesce,
2616	.set_coalesce           = igb_set_coalesce,
2617	.get_ts_info            = igb_get_ts_info,
2618	.get_rxnfc		= igb_get_rxnfc,
2619	.set_rxnfc		= igb_set_rxnfc,
2620	.get_eee		= igb_get_eee,
2621	.set_eee		= igb_set_eee,
2622	.begin			= igb_ethtool_begin,
2623	.complete		= igb_ethtool_complete,
2624};
2625
2626void igb_set_ethtool_ops(struct net_device *netdev)
2627{
2628	SET_ETHTOOL_OPS(netdev, &igb_ethtool_ops);
2629}
2630