igb_main.c revision f9d40f6a9921cc7d9385f64362314054e22152bd
1/*******************************************************************************
2
3  Intel(R) Gigabit Ethernet Linux driver
4  Copyright(c) 2007-2013 Intel Corporation.
5
6  This program is free software; you can redistribute it and/or modify it
7  under the terms and conditions of the GNU General Public License,
8  version 2, as published by the Free Software Foundation.
9
10  This program is distributed in the hope it will be useful, but WITHOUT
11  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  more details.
14
15  You should have received a copy of the GNU General Public License along with
16  this program; if not, write to the Free Software Foundation, Inc.,
17  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19  The full GNU General Public License is included in this distribution in
20  the file called "COPYING".
21
22  Contact Information:
23  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29
30#include <linux/module.h>
31#include <linux/types.h>
32#include <linux/init.h>
33#include <linux/bitops.h>
34#include <linux/vmalloc.h>
35#include <linux/pagemap.h>
36#include <linux/netdevice.h>
37#include <linux/ipv6.h>
38#include <linux/slab.h>
39#include <net/checksum.h>
40#include <net/ip6_checksum.h>
41#include <linux/net_tstamp.h>
42#include <linux/mii.h>
43#include <linux/ethtool.h>
44#include <linux/if.h>
45#include <linux/if_vlan.h>
46#include <linux/pci.h>
47#include <linux/pci-aspm.h>
48#include <linux/delay.h>
49#include <linux/interrupt.h>
50#include <linux/ip.h>
51#include <linux/tcp.h>
52#include <linux/sctp.h>
53#include <linux/if_ether.h>
54#include <linux/aer.h>
55#include <linux/prefetch.h>
56#include <linux/pm_runtime.h>
57#ifdef CONFIG_IGB_DCA
58#include <linux/dca.h>
59#endif
60#include <linux/i2c.h>
61#include "igb.h"
62
63#define MAJ 4
64#define MIN 1
65#define BUILD 2
66#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
67__stringify(BUILD) "-k"
68char igb_driver_name[] = "igb";
69char igb_driver_version[] = DRV_VERSION;
70static const char igb_driver_string[] =
71				"Intel(R) Gigabit Ethernet Network Driver";
72static const char igb_copyright[] =
73				"Copyright (c) 2007-2013 Intel Corporation.";
74
75static const struct e1000_info *igb_info_tbl[] = {
76	[board_82575] = &e1000_82575_info,
77};
78
79static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
80	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 },
81	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 },
82	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
83	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 },
84	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 },
85	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
86	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
87	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
88	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
89	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
90	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
91	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
92	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
93	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
94	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
95	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
96	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
97	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
98	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
99	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
100	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
101	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
102	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
103	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
104	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
105	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
106	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
107	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
108	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
109	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
110	/* required last entry */
111	{0, }
112};
113
114MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
115
116void igb_reset(struct igb_adapter *);
117static int igb_setup_all_tx_resources(struct igb_adapter *);
118static int igb_setup_all_rx_resources(struct igb_adapter *);
119static void igb_free_all_tx_resources(struct igb_adapter *);
120static void igb_free_all_rx_resources(struct igb_adapter *);
121static void igb_setup_mrqc(struct igb_adapter *);
122static int igb_probe(struct pci_dev *, const struct pci_device_id *);
123static void igb_remove(struct pci_dev *pdev);
124static int igb_sw_init(struct igb_adapter *);
125static int igb_open(struct net_device *);
126static int igb_close(struct net_device *);
127static void igb_configure(struct igb_adapter *);
128static void igb_configure_tx(struct igb_adapter *);
129static void igb_configure_rx(struct igb_adapter *);
130static void igb_clean_all_tx_rings(struct igb_adapter *);
131static void igb_clean_all_rx_rings(struct igb_adapter *);
132static void igb_clean_tx_ring(struct igb_ring *);
133static void igb_clean_rx_ring(struct igb_ring *);
134static void igb_set_rx_mode(struct net_device *);
135static void igb_update_phy_info(unsigned long);
136static void igb_watchdog(unsigned long);
137static void igb_watchdog_task(struct work_struct *);
138static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
139static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
140						 struct rtnl_link_stats64 *stats);
141static int igb_change_mtu(struct net_device *, int);
142static int igb_set_mac(struct net_device *, void *);
143static void igb_set_uta(struct igb_adapter *adapter);
144static irqreturn_t igb_intr(int irq, void *);
145static irqreturn_t igb_intr_msi(int irq, void *);
146static irqreturn_t igb_msix_other(int irq, void *);
147static irqreturn_t igb_msix_ring(int irq, void *);
148#ifdef CONFIG_IGB_DCA
149static void igb_update_dca(struct igb_q_vector *);
150static void igb_setup_dca(struct igb_adapter *);
151#endif /* CONFIG_IGB_DCA */
152static int igb_poll(struct napi_struct *, int);
153static bool igb_clean_tx_irq(struct igb_q_vector *);
154static bool igb_clean_rx_irq(struct igb_q_vector *, int);
155static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
156static void igb_tx_timeout(struct net_device *);
157static void igb_reset_task(struct work_struct *);
158static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features);
159static int igb_vlan_rx_add_vid(struct net_device *, u16);
160static int igb_vlan_rx_kill_vid(struct net_device *, u16);
161static void igb_restore_vlan(struct igb_adapter *);
162static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
163static void igb_ping_all_vfs(struct igb_adapter *);
164static void igb_msg_task(struct igb_adapter *);
165static void igb_vmm_control(struct igb_adapter *);
166static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
167static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
168static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
169static int igb_ndo_set_vf_vlan(struct net_device *netdev,
170			       int vf, u16 vlan, u8 qos);
171static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
172static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
173				 struct ifla_vf_info *ivi);
174static void igb_check_vf_rate_limit(struct igb_adapter *);
175
176#ifdef CONFIG_PCI_IOV
177static int igb_vf_configure(struct igb_adapter *adapter, int vf);
178static bool igb_vfs_are_assigned(struct igb_adapter *adapter);
179#endif
180
181#ifdef CONFIG_PM
182#ifdef CONFIG_PM_SLEEP
183static int igb_suspend(struct device *);
184#endif
185static int igb_resume(struct device *);
186#ifdef CONFIG_PM_RUNTIME
187static int igb_runtime_suspend(struct device *dev);
188static int igb_runtime_resume(struct device *dev);
189static int igb_runtime_idle(struct device *dev);
190#endif
191static const struct dev_pm_ops igb_pm_ops = {
192	SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
193	SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
194			igb_runtime_idle)
195};
196#endif
197static void igb_shutdown(struct pci_dev *);
198static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
199#ifdef CONFIG_IGB_DCA
200static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
201static struct notifier_block dca_notifier = {
202	.notifier_call	= igb_notify_dca,
203	.next		= NULL,
204	.priority	= 0
205};
206#endif
207#ifdef CONFIG_NET_POLL_CONTROLLER
208/* for netdump / net console */
209static void igb_netpoll(struct net_device *);
210#endif
211#ifdef CONFIG_PCI_IOV
212static unsigned int max_vfs = 0;
213module_param(max_vfs, uint, 0);
214MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
215                 "per physical function");
216#endif /* CONFIG_PCI_IOV */
217
218static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
219		     pci_channel_state_t);
220static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
221static void igb_io_resume(struct pci_dev *);
222
223static const struct pci_error_handlers igb_err_handler = {
224	.error_detected = igb_io_error_detected,
225	.slot_reset = igb_io_slot_reset,
226	.resume = igb_io_resume,
227};
228
229static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
230
231static struct pci_driver igb_driver = {
232	.name     = igb_driver_name,
233	.id_table = igb_pci_tbl,
234	.probe    = igb_probe,
235	.remove   = igb_remove,
236#ifdef CONFIG_PM
237	.driver.pm = &igb_pm_ops,
238#endif
239	.shutdown = igb_shutdown,
240	.sriov_configure = igb_pci_sriov_configure,
241	.err_handler = &igb_err_handler
242};
243
244MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
245MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
246MODULE_LICENSE("GPL");
247MODULE_VERSION(DRV_VERSION);
248
249#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
250static int debug = -1;
251module_param(debug, int, 0);
252MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
253
254struct igb_reg_info {
255	u32 ofs;
256	char *name;
257};
258
259static const struct igb_reg_info igb_reg_info_tbl[] = {
260
261	/* General Registers */
262	{E1000_CTRL, "CTRL"},
263	{E1000_STATUS, "STATUS"},
264	{E1000_CTRL_EXT, "CTRL_EXT"},
265
266	/* Interrupt Registers */
267	{E1000_ICR, "ICR"},
268
269	/* RX Registers */
270	{E1000_RCTL, "RCTL"},
271	{E1000_RDLEN(0), "RDLEN"},
272	{E1000_RDH(0), "RDH"},
273	{E1000_RDT(0), "RDT"},
274	{E1000_RXDCTL(0), "RXDCTL"},
275	{E1000_RDBAL(0), "RDBAL"},
276	{E1000_RDBAH(0), "RDBAH"},
277
278	/* TX Registers */
279	{E1000_TCTL, "TCTL"},
280	{E1000_TDBAL(0), "TDBAL"},
281	{E1000_TDBAH(0), "TDBAH"},
282	{E1000_TDLEN(0), "TDLEN"},
283	{E1000_TDH(0), "TDH"},
284	{E1000_TDT(0), "TDT"},
285	{E1000_TXDCTL(0), "TXDCTL"},
286	{E1000_TDFH, "TDFH"},
287	{E1000_TDFT, "TDFT"},
288	{E1000_TDFHS, "TDFHS"},
289	{E1000_TDFPC, "TDFPC"},
290
291	/* List Terminator */
292	{}
293};
294
295/*
296 * igb_regdump - register printout routine
297 */
298static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
299{
300	int n = 0;
301	char rname[16];
302	u32 regs[8];
303
304	switch (reginfo->ofs) {
305	case E1000_RDLEN(0):
306		for (n = 0; n < 4; n++)
307			regs[n] = rd32(E1000_RDLEN(n));
308		break;
309	case E1000_RDH(0):
310		for (n = 0; n < 4; n++)
311			regs[n] = rd32(E1000_RDH(n));
312		break;
313	case E1000_RDT(0):
314		for (n = 0; n < 4; n++)
315			regs[n] = rd32(E1000_RDT(n));
316		break;
317	case E1000_RXDCTL(0):
318		for (n = 0; n < 4; n++)
319			regs[n] = rd32(E1000_RXDCTL(n));
320		break;
321	case E1000_RDBAL(0):
322		for (n = 0; n < 4; n++)
323			regs[n] = rd32(E1000_RDBAL(n));
324		break;
325	case E1000_RDBAH(0):
326		for (n = 0; n < 4; n++)
327			regs[n] = rd32(E1000_RDBAH(n));
328		break;
329	case E1000_TDBAL(0):
330		for (n = 0; n < 4; n++)
331			regs[n] = rd32(E1000_RDBAL(n));
332		break;
333	case E1000_TDBAH(0):
334		for (n = 0; n < 4; n++)
335			regs[n] = rd32(E1000_TDBAH(n));
336		break;
337	case E1000_TDLEN(0):
338		for (n = 0; n < 4; n++)
339			regs[n] = rd32(E1000_TDLEN(n));
340		break;
341	case E1000_TDH(0):
342		for (n = 0; n < 4; n++)
343			regs[n] = rd32(E1000_TDH(n));
344		break;
345	case E1000_TDT(0):
346		for (n = 0; n < 4; n++)
347			regs[n] = rd32(E1000_TDT(n));
348		break;
349	case E1000_TXDCTL(0):
350		for (n = 0; n < 4; n++)
351			regs[n] = rd32(E1000_TXDCTL(n));
352		break;
353	default:
354		pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
355		return;
356	}
357
358	snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
359	pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
360		regs[2], regs[3]);
361}
362
363/*
364 * igb_dump - Print registers, tx-rings and rx-rings
365 */
366static void igb_dump(struct igb_adapter *adapter)
367{
368	struct net_device *netdev = adapter->netdev;
369	struct e1000_hw *hw = &adapter->hw;
370	struct igb_reg_info *reginfo;
371	struct igb_ring *tx_ring;
372	union e1000_adv_tx_desc *tx_desc;
373	struct my_u0 { u64 a; u64 b; } *u0;
374	struct igb_ring *rx_ring;
375	union e1000_adv_rx_desc *rx_desc;
376	u32 staterr;
377	u16 i, n;
378
379	if (!netif_msg_hw(adapter))
380		return;
381
382	/* Print netdevice Info */
383	if (netdev) {
384		dev_info(&adapter->pdev->dev, "Net device Info\n");
385		pr_info("Device Name     state            trans_start      "
386			"last_rx\n");
387		pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
388			netdev->state, netdev->trans_start, netdev->last_rx);
389	}
390
391	/* Print Registers */
392	dev_info(&adapter->pdev->dev, "Register Dump\n");
393	pr_info(" Register Name   Value\n");
394	for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
395	     reginfo->name; reginfo++) {
396		igb_regdump(hw, reginfo);
397	}
398
399	/* Print TX Ring Summary */
400	if (!netdev || !netif_running(netdev))
401		goto exit;
402
403	dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
404	pr_info("Queue [NTU] [NTC] [bi(ntc)->dma  ] leng ntw timestamp\n");
405	for (n = 0; n < adapter->num_tx_queues; n++) {
406		struct igb_tx_buffer *buffer_info;
407		tx_ring = adapter->tx_ring[n];
408		buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
409		pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
410			n, tx_ring->next_to_use, tx_ring->next_to_clean,
411			(u64)dma_unmap_addr(buffer_info, dma),
412			dma_unmap_len(buffer_info, len),
413			buffer_info->next_to_watch,
414			(u64)buffer_info->time_stamp);
415	}
416
417	/* Print TX Rings */
418	if (!netif_msg_tx_done(adapter))
419		goto rx_ring_summary;
420
421	dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
422
423	/* Transmit Descriptor Formats
424	 *
425	 * Advanced Transmit Descriptor
426	 *   +--------------------------------------------------------------+
427	 * 0 |         Buffer Address [63:0]                                |
428	 *   +--------------------------------------------------------------+
429	 * 8 | PAYLEN  | PORTS  |CC|IDX | STA | DCMD  |DTYP|MAC|RSV| DTALEN |
430	 *   +--------------------------------------------------------------+
431	 *   63      46 45    40 39 38 36 35 32 31   24             15       0
432	 */
433
434	for (n = 0; n < adapter->num_tx_queues; n++) {
435		tx_ring = adapter->tx_ring[n];
436		pr_info("------------------------------------\n");
437		pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
438		pr_info("------------------------------------\n");
439		pr_info("T [desc]     [address 63:0  ] [PlPOCIStDDM Ln] "
440			"[bi->dma       ] leng  ntw timestamp        "
441			"bi->skb\n");
442
443		for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
444			const char *next_desc;
445			struct igb_tx_buffer *buffer_info;
446			tx_desc = IGB_TX_DESC(tx_ring, i);
447			buffer_info = &tx_ring->tx_buffer_info[i];
448			u0 = (struct my_u0 *)tx_desc;
449			if (i == tx_ring->next_to_use &&
450			    i == tx_ring->next_to_clean)
451				next_desc = " NTC/U";
452			else if (i == tx_ring->next_to_use)
453				next_desc = " NTU";
454			else if (i == tx_ring->next_to_clean)
455				next_desc = " NTC";
456			else
457				next_desc = "";
458
459			pr_info("T [0x%03X]    %016llX %016llX %016llX"
460				" %04X  %p %016llX %p%s\n", i,
461				le64_to_cpu(u0->a),
462				le64_to_cpu(u0->b),
463				(u64)dma_unmap_addr(buffer_info, dma),
464				dma_unmap_len(buffer_info, len),
465				buffer_info->next_to_watch,
466				(u64)buffer_info->time_stamp,
467				buffer_info->skb, next_desc);
468
469			if (netif_msg_pktdata(adapter) && buffer_info->skb)
470				print_hex_dump(KERN_INFO, "",
471					DUMP_PREFIX_ADDRESS,
472					16, 1, buffer_info->skb->data,
473					dma_unmap_len(buffer_info, len),
474					true);
475		}
476	}
477
478	/* Print RX Rings Summary */
479rx_ring_summary:
480	dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
481	pr_info("Queue [NTU] [NTC]\n");
482	for (n = 0; n < adapter->num_rx_queues; n++) {
483		rx_ring = adapter->rx_ring[n];
484		pr_info(" %5d %5X %5X\n",
485			n, rx_ring->next_to_use, rx_ring->next_to_clean);
486	}
487
488	/* Print RX Rings */
489	if (!netif_msg_rx_status(adapter))
490		goto exit;
491
492	dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
493
494	/* Advanced Receive Descriptor (Read) Format
495	 *    63                                           1        0
496	 *    +-----------------------------------------------------+
497	 *  0 |       Packet Buffer Address [63:1]           |A0/NSE|
498	 *    +----------------------------------------------+------+
499	 *  8 |       Header Buffer Address [63:1]           |  DD  |
500	 *    +-----------------------------------------------------+
501	 *
502	 *
503	 * Advanced Receive Descriptor (Write-Back) Format
504	 *
505	 *   63       48 47    32 31  30      21 20 17 16   4 3     0
506	 *   +------------------------------------------------------+
507	 * 0 | Packet     IP     |SPH| HDR_LEN   | RSV|Packet|  RSS |
508	 *   | Checksum   Ident  |   |           |    | Type | Type |
509	 *   +------------------------------------------------------+
510	 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
511	 *   +------------------------------------------------------+
512	 *   63       48 47    32 31            20 19               0
513	 */
514
515	for (n = 0; n < adapter->num_rx_queues; n++) {
516		rx_ring = adapter->rx_ring[n];
517		pr_info("------------------------------------\n");
518		pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
519		pr_info("------------------------------------\n");
520		pr_info("R  [desc]      [ PktBuf     A0] [  HeadBuf   DD] "
521			"[bi->dma       ] [bi->skb] <-- Adv Rx Read format\n");
522		pr_info("RWB[desc]      [PcsmIpSHl PtRs] [vl er S cks ln] -----"
523			"----------- [bi->skb] <-- Adv Rx Write-Back format\n");
524
525		for (i = 0; i < rx_ring->count; i++) {
526			const char *next_desc;
527			struct igb_rx_buffer *buffer_info;
528			buffer_info = &rx_ring->rx_buffer_info[i];
529			rx_desc = IGB_RX_DESC(rx_ring, i);
530			u0 = (struct my_u0 *)rx_desc;
531			staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
532
533			if (i == rx_ring->next_to_use)
534				next_desc = " NTU";
535			else if (i == rx_ring->next_to_clean)
536				next_desc = " NTC";
537			else
538				next_desc = "";
539
540			if (staterr & E1000_RXD_STAT_DD) {
541				/* Descriptor Done */
542				pr_info("%s[0x%03X]     %016llX %016llX ---------------- %s\n",
543					"RWB", i,
544					le64_to_cpu(u0->a),
545					le64_to_cpu(u0->b),
546					next_desc);
547			} else {
548				pr_info("%s[0x%03X]     %016llX %016llX %016llX %s\n",
549					"R  ", i,
550					le64_to_cpu(u0->a),
551					le64_to_cpu(u0->b),
552					(u64)buffer_info->dma,
553					next_desc);
554
555				if (netif_msg_pktdata(adapter) &&
556				    buffer_info->dma && buffer_info->page) {
557					print_hex_dump(KERN_INFO, "",
558					  DUMP_PREFIX_ADDRESS,
559					  16, 1,
560					  page_address(buffer_info->page) +
561						      buffer_info->page_offset,
562					  IGB_RX_BUFSZ, true);
563				}
564			}
565		}
566	}
567
568exit:
569	return;
570}
571
572/*  igb_get_i2c_data - Reads the I2C SDA data bit
573 *  @hw: pointer to hardware structure
574 *  @i2cctl: Current value of I2CCTL register
575 *
576 *  Returns the I2C data bit value
577 */
578static int igb_get_i2c_data(void *data)
579{
580	struct igb_adapter *adapter = (struct igb_adapter *)data;
581	struct e1000_hw *hw = &adapter->hw;
582	s32 i2cctl = rd32(E1000_I2CPARAMS);
583
584	return ((i2cctl & E1000_I2C_DATA_IN) != 0);
585}
586
587/* igb_set_i2c_data - Sets the I2C data bit
588 *  @data: pointer to hardware structure
589 *  @state: I2C data value (0 or 1) to set
590 *
591 *  Sets the I2C data bit
592 */
593static void igb_set_i2c_data(void *data, int state)
594{
595	struct igb_adapter *adapter = (struct igb_adapter *)data;
596	struct e1000_hw *hw = &adapter->hw;
597	s32 i2cctl = rd32(E1000_I2CPARAMS);
598
599	if (state)
600		i2cctl |= E1000_I2C_DATA_OUT;
601	else
602		i2cctl &= ~E1000_I2C_DATA_OUT;
603
604	i2cctl &= ~E1000_I2C_DATA_OE_N;
605	i2cctl |= E1000_I2C_CLK_OE_N;
606	wr32(E1000_I2CPARAMS, i2cctl);
607	wrfl();
608
609}
610
611/* igb_set_i2c_clk - Sets the I2C SCL clock
612 *  @data: pointer to hardware structure
613 *  @state: state to set clock
614 *
615 *  Sets the I2C clock line to state
616 */
617static void igb_set_i2c_clk(void *data, int state)
618{
619	struct igb_adapter *adapter = (struct igb_adapter *)data;
620	struct e1000_hw *hw = &adapter->hw;
621	s32 i2cctl = rd32(E1000_I2CPARAMS);
622
623	if (state) {
624		i2cctl |= E1000_I2C_CLK_OUT;
625		i2cctl &= ~E1000_I2C_CLK_OE_N;
626	} else {
627		i2cctl &= ~E1000_I2C_CLK_OUT;
628		i2cctl &= ~E1000_I2C_CLK_OE_N;
629	}
630	wr32(E1000_I2CPARAMS, i2cctl);
631	wrfl();
632}
633
634/* igb_get_i2c_clk - Gets the I2C SCL clock state
635 *  @data: pointer to hardware structure
636 *
637 *  Gets the I2C clock state
638 */
639static int igb_get_i2c_clk(void *data)
640{
641	struct igb_adapter *adapter = (struct igb_adapter *)data;
642	struct e1000_hw *hw = &adapter->hw;
643	s32 i2cctl = rd32(E1000_I2CPARAMS);
644
645	return ((i2cctl & E1000_I2C_CLK_IN) != 0);
646}
647
648static const struct i2c_algo_bit_data igb_i2c_algo = {
649	.setsda		= igb_set_i2c_data,
650	.setscl		= igb_set_i2c_clk,
651	.getsda		= igb_get_i2c_data,
652	.getscl		= igb_get_i2c_clk,
653	.udelay		= 5,
654	.timeout	= 20,
655};
656
657/**
658 * igb_get_hw_dev - return device
659 * used by hardware layer to print debugging information
660 **/
661struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
662{
663	struct igb_adapter *adapter = hw->back;
664	return adapter->netdev;
665}
666
667/**
668 * igb_init_module - Driver Registration Routine
669 *
670 * igb_init_module is the first routine called when the driver is
671 * loaded. All it does is register with the PCI subsystem.
672 **/
673static int __init igb_init_module(void)
674{
675	int ret;
676	pr_info("%s - version %s\n",
677	       igb_driver_string, igb_driver_version);
678
679	pr_info("%s\n", igb_copyright);
680
681#ifdef CONFIG_IGB_DCA
682	dca_register_notify(&dca_notifier);
683#endif
684	ret = pci_register_driver(&igb_driver);
685	return ret;
686}
687
688module_init(igb_init_module);
689
690/**
691 * igb_exit_module - Driver Exit Cleanup Routine
692 *
693 * igb_exit_module is called just before the driver is removed
694 * from memory.
695 **/
696static void __exit igb_exit_module(void)
697{
698#ifdef CONFIG_IGB_DCA
699	dca_unregister_notify(&dca_notifier);
700#endif
701	pci_unregister_driver(&igb_driver);
702}
703
704module_exit(igb_exit_module);
705
706#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
707/**
708 * igb_cache_ring_register - Descriptor ring to register mapping
709 * @adapter: board private structure to initialize
710 *
711 * Once we know the feature-set enabled for the device, we'll cache
712 * the register offset the descriptor ring is assigned to.
713 **/
714static void igb_cache_ring_register(struct igb_adapter *adapter)
715{
716	int i = 0, j = 0;
717	u32 rbase_offset = adapter->vfs_allocated_count;
718
719	switch (adapter->hw.mac.type) {
720	case e1000_82576:
721		/* The queues are allocated for virtualization such that VF 0
722		 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
723		 * In order to avoid collision we start at the first free queue
724		 * and continue consuming queues in the same sequence
725		 */
726		if (adapter->vfs_allocated_count) {
727			for (; i < adapter->rss_queues; i++)
728				adapter->rx_ring[i]->reg_idx = rbase_offset +
729				                               Q_IDX_82576(i);
730		}
731	case e1000_82575:
732	case e1000_82580:
733	case e1000_i350:
734	case e1000_i210:
735	case e1000_i211:
736	default:
737		for (; i < adapter->num_rx_queues; i++)
738			adapter->rx_ring[i]->reg_idx = rbase_offset + i;
739		for (; j < adapter->num_tx_queues; j++)
740			adapter->tx_ring[j]->reg_idx = rbase_offset + j;
741		break;
742	}
743}
744
745/**
746 *  igb_write_ivar - configure ivar for given MSI-X vector
747 *  @hw: pointer to the HW structure
748 *  @msix_vector: vector number we are allocating to a given ring
749 *  @index: row index of IVAR register to write within IVAR table
750 *  @offset: column offset of in IVAR, should be multiple of 8
751 *
752 *  This function is intended to handle the writing of the IVAR register
753 *  for adapters 82576 and newer.  The IVAR table consists of 2 columns,
754 *  each containing an cause allocation for an Rx and Tx ring, and a
755 *  variable number of rows depending on the number of queues supported.
756 **/
757static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
758			   int index, int offset)
759{
760	u32 ivar = array_rd32(E1000_IVAR0, index);
761
762	/* clear any bits that are currently set */
763	ivar &= ~((u32)0xFF << offset);
764
765	/* write vector and valid bit */
766	ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
767
768	array_wr32(E1000_IVAR0, index, ivar);
769}
770
771#define IGB_N0_QUEUE -1
772static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
773{
774	struct igb_adapter *adapter = q_vector->adapter;
775	struct e1000_hw *hw = &adapter->hw;
776	int rx_queue = IGB_N0_QUEUE;
777	int tx_queue = IGB_N0_QUEUE;
778	u32 msixbm = 0;
779
780	if (q_vector->rx.ring)
781		rx_queue = q_vector->rx.ring->reg_idx;
782	if (q_vector->tx.ring)
783		tx_queue = q_vector->tx.ring->reg_idx;
784
785	switch (hw->mac.type) {
786	case e1000_82575:
787		/* The 82575 assigns vectors using a bitmask, which matches the
788		   bitmask for the EICR/EIMS/EIMC registers.  To assign one
789		   or more queues to a vector, we write the appropriate bits
790		   into the MSIXBM register for that vector. */
791		if (rx_queue > IGB_N0_QUEUE)
792			msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
793		if (tx_queue > IGB_N0_QUEUE)
794			msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
795		if (!adapter->msix_entries && msix_vector == 0)
796			msixbm |= E1000_EIMS_OTHER;
797		array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
798		q_vector->eims_value = msixbm;
799		break;
800	case e1000_82576:
801		/*
802		 * 82576 uses a table that essentially consists of 2 columns
803		 * with 8 rows.  The ordering is column-major so we use the
804		 * lower 3 bits as the row index, and the 4th bit as the
805		 * column offset.
806		 */
807		if (rx_queue > IGB_N0_QUEUE)
808			igb_write_ivar(hw, msix_vector,
809				       rx_queue & 0x7,
810				       (rx_queue & 0x8) << 1);
811		if (tx_queue > IGB_N0_QUEUE)
812			igb_write_ivar(hw, msix_vector,
813				       tx_queue & 0x7,
814				       ((tx_queue & 0x8) << 1) + 8);
815		q_vector->eims_value = 1 << msix_vector;
816		break;
817	case e1000_82580:
818	case e1000_i350:
819	case e1000_i210:
820	case e1000_i211:
821		/*
822		 * On 82580 and newer adapters the scheme is similar to 82576
823		 * however instead of ordering column-major we have things
824		 * ordered row-major.  So we traverse the table by using
825		 * bit 0 as the column offset, and the remaining bits as the
826		 * row index.
827		 */
828		if (rx_queue > IGB_N0_QUEUE)
829			igb_write_ivar(hw, msix_vector,
830				       rx_queue >> 1,
831				       (rx_queue & 0x1) << 4);
832		if (tx_queue > IGB_N0_QUEUE)
833			igb_write_ivar(hw, msix_vector,
834				       tx_queue >> 1,
835				       ((tx_queue & 0x1) << 4) + 8);
836		q_vector->eims_value = 1 << msix_vector;
837		break;
838	default:
839		BUG();
840		break;
841	}
842
843	/* add q_vector eims value to global eims_enable_mask */
844	adapter->eims_enable_mask |= q_vector->eims_value;
845
846	/* configure q_vector to set itr on first interrupt */
847	q_vector->set_itr = 1;
848}
849
850/**
851 * igb_configure_msix - Configure MSI-X hardware
852 *
853 * igb_configure_msix sets up the hardware to properly
854 * generate MSI-X interrupts.
855 **/
856static void igb_configure_msix(struct igb_adapter *adapter)
857{
858	u32 tmp;
859	int i, vector = 0;
860	struct e1000_hw *hw = &adapter->hw;
861
862	adapter->eims_enable_mask = 0;
863
864	/* set vector for other causes, i.e. link changes */
865	switch (hw->mac.type) {
866	case e1000_82575:
867		tmp = rd32(E1000_CTRL_EXT);
868		/* enable MSI-X PBA support*/
869		tmp |= E1000_CTRL_EXT_PBA_CLR;
870
871		/* Auto-Mask interrupts upon ICR read. */
872		tmp |= E1000_CTRL_EXT_EIAME;
873		tmp |= E1000_CTRL_EXT_IRCA;
874
875		wr32(E1000_CTRL_EXT, tmp);
876
877		/* enable msix_other interrupt */
878		array_wr32(E1000_MSIXBM(0), vector++,
879		                      E1000_EIMS_OTHER);
880		adapter->eims_other = E1000_EIMS_OTHER;
881
882		break;
883
884	case e1000_82576:
885	case e1000_82580:
886	case e1000_i350:
887	case e1000_i210:
888	case e1000_i211:
889		/* Turn on MSI-X capability first, or our settings
890		 * won't stick.  And it will take days to debug. */
891		wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
892		                E1000_GPIE_PBA | E1000_GPIE_EIAME |
893		                E1000_GPIE_NSICR);
894
895		/* enable msix_other interrupt */
896		adapter->eims_other = 1 << vector;
897		tmp = (vector++ | E1000_IVAR_VALID) << 8;
898
899		wr32(E1000_IVAR_MISC, tmp);
900		break;
901	default:
902		/* do nothing, since nothing else supports MSI-X */
903		break;
904	} /* switch (hw->mac.type) */
905
906	adapter->eims_enable_mask |= adapter->eims_other;
907
908	for (i = 0; i < adapter->num_q_vectors; i++)
909		igb_assign_vector(adapter->q_vector[i], vector++);
910
911	wrfl();
912}
913
914/**
915 * igb_request_msix - Initialize MSI-X interrupts
916 *
917 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
918 * kernel.
919 **/
920static int igb_request_msix(struct igb_adapter *adapter)
921{
922	struct net_device *netdev = adapter->netdev;
923	struct e1000_hw *hw = &adapter->hw;
924	int i, err = 0, vector = 0, free_vector = 0;
925
926	err = request_irq(adapter->msix_entries[vector].vector,
927	                  igb_msix_other, 0, netdev->name, adapter);
928	if (err)
929		goto err_out;
930
931	for (i = 0; i < adapter->num_q_vectors; i++) {
932		struct igb_q_vector *q_vector = adapter->q_vector[i];
933
934		vector++;
935
936		q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
937
938		if (q_vector->rx.ring && q_vector->tx.ring)
939			sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
940				q_vector->rx.ring->queue_index);
941		else if (q_vector->tx.ring)
942			sprintf(q_vector->name, "%s-tx-%u", netdev->name,
943				q_vector->tx.ring->queue_index);
944		else if (q_vector->rx.ring)
945			sprintf(q_vector->name, "%s-rx-%u", netdev->name,
946				q_vector->rx.ring->queue_index);
947		else
948			sprintf(q_vector->name, "%s-unused", netdev->name);
949
950		err = request_irq(adapter->msix_entries[vector].vector,
951		                  igb_msix_ring, 0, q_vector->name,
952		                  q_vector);
953		if (err)
954			goto err_free;
955	}
956
957	igb_configure_msix(adapter);
958	return 0;
959
960err_free:
961	/* free already assigned IRQs */
962	free_irq(adapter->msix_entries[free_vector++].vector, adapter);
963
964	vector--;
965	for (i = 0; i < vector; i++) {
966		free_irq(adapter->msix_entries[free_vector++].vector,
967			 adapter->q_vector[i]);
968	}
969err_out:
970	return err;
971}
972
973static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
974{
975	if (adapter->msix_entries) {
976		pci_disable_msix(adapter->pdev);
977		kfree(adapter->msix_entries);
978		adapter->msix_entries = NULL;
979	} else if (adapter->flags & IGB_FLAG_HAS_MSI) {
980		pci_disable_msi(adapter->pdev);
981	}
982}
983
984/**
985 * igb_free_q_vector - Free memory allocated for specific interrupt vector
986 * @adapter: board private structure to initialize
987 * @v_idx: Index of vector to be freed
988 *
989 * This function frees the memory allocated to the q_vector.  In addition if
990 * NAPI is enabled it will delete any references to the NAPI struct prior
991 * to freeing the q_vector.
992 **/
993static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
994{
995	struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
996
997	if (q_vector->tx.ring)
998		adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
999
1000	if (q_vector->rx.ring)
1001		adapter->tx_ring[q_vector->rx.ring->queue_index] = NULL;
1002
1003	adapter->q_vector[v_idx] = NULL;
1004	netif_napi_del(&q_vector->napi);
1005
1006	/*
1007	 * ixgbe_get_stats64() might access the rings on this vector,
1008	 * we must wait a grace period before freeing it.
1009	 */
1010	kfree_rcu(q_vector, rcu);
1011}
1012
1013/**
1014 * igb_free_q_vectors - Free memory allocated for interrupt vectors
1015 * @adapter: board private structure to initialize
1016 *
1017 * This function frees the memory allocated to the q_vectors.  In addition if
1018 * NAPI is enabled it will delete any references to the NAPI struct prior
1019 * to freeing the q_vector.
1020 **/
1021static void igb_free_q_vectors(struct igb_adapter *adapter)
1022{
1023	int v_idx = adapter->num_q_vectors;
1024
1025	adapter->num_tx_queues = 0;
1026	adapter->num_rx_queues = 0;
1027	adapter->num_q_vectors = 0;
1028
1029	while (v_idx--)
1030		igb_free_q_vector(adapter, v_idx);
1031}
1032
1033/**
1034 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
1035 *
1036 * This function resets the device so that it has 0 rx queues, tx queues, and
1037 * MSI-X interrupts allocated.
1038 */
1039static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
1040{
1041	igb_free_q_vectors(adapter);
1042	igb_reset_interrupt_capability(adapter);
1043}
1044
1045/**
1046 * igb_set_interrupt_capability - set MSI or MSI-X if supported
1047 *
1048 * Attempt to configure interrupts using the best available
1049 * capabilities of the hardware and kernel.
1050 **/
1051static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
1052{
1053	int err;
1054	int numvecs, i;
1055
1056	if (!msix)
1057		goto msi_only;
1058
1059	/* Number of supported queues. */
1060	adapter->num_rx_queues = adapter->rss_queues;
1061	if (adapter->vfs_allocated_count)
1062		adapter->num_tx_queues = 1;
1063	else
1064		adapter->num_tx_queues = adapter->rss_queues;
1065
1066	/* start with one vector for every rx queue */
1067	numvecs = adapter->num_rx_queues;
1068
1069	/* if tx handler is separate add 1 for every tx queue */
1070	if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1071		numvecs += adapter->num_tx_queues;
1072
1073	/* store the number of vectors reserved for queues */
1074	adapter->num_q_vectors = numvecs;
1075
1076	/* add 1 vector for link status interrupts */
1077	numvecs++;
1078	adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
1079					GFP_KERNEL);
1080
1081	if (!adapter->msix_entries)
1082		goto msi_only;
1083
1084	for (i = 0; i < numvecs; i++)
1085		adapter->msix_entries[i].entry = i;
1086
1087	err = pci_enable_msix(adapter->pdev,
1088			      adapter->msix_entries,
1089			      numvecs);
1090	if (err == 0)
1091		return;
1092
1093	igb_reset_interrupt_capability(adapter);
1094
1095	/* If we can't do MSI-X, try MSI */
1096msi_only:
1097#ifdef CONFIG_PCI_IOV
1098	/* disable SR-IOV for non MSI-X configurations */
1099	if (adapter->vf_data) {
1100		struct e1000_hw *hw = &adapter->hw;
1101		/* disable iov and allow time for transactions to clear */
1102		pci_disable_sriov(adapter->pdev);
1103		msleep(500);
1104
1105		kfree(adapter->vf_data);
1106		adapter->vf_data = NULL;
1107		wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
1108		wrfl();
1109		msleep(100);
1110		dev_info(&adapter->pdev->dev, "IOV Disabled\n");
1111	}
1112#endif
1113	adapter->vfs_allocated_count = 0;
1114	adapter->rss_queues = 1;
1115	adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
1116	adapter->num_rx_queues = 1;
1117	adapter->num_tx_queues = 1;
1118	adapter->num_q_vectors = 1;
1119	if (!pci_enable_msi(adapter->pdev))
1120		adapter->flags |= IGB_FLAG_HAS_MSI;
1121}
1122
1123static void igb_add_ring(struct igb_ring *ring,
1124			 struct igb_ring_container *head)
1125{
1126	head->ring = ring;
1127	head->count++;
1128}
1129
1130/**
1131 * igb_alloc_q_vector - Allocate memory for a single interrupt vector
1132 * @adapter: board private structure to initialize
1133 * @v_count: q_vectors allocated on adapter, used for ring interleaving
1134 * @v_idx: index of vector in adapter struct
1135 * @txr_count: total number of Tx rings to allocate
1136 * @txr_idx: index of first Tx ring to allocate
1137 * @rxr_count: total number of Rx rings to allocate
1138 * @rxr_idx: index of first Rx ring to allocate
1139 *
1140 * We allocate one q_vector.  If allocation fails we return -ENOMEM.
1141 **/
1142static int igb_alloc_q_vector(struct igb_adapter *adapter,
1143			      int v_count, int v_idx,
1144			      int txr_count, int txr_idx,
1145			      int rxr_count, int rxr_idx)
1146{
1147	struct igb_q_vector *q_vector;
1148	struct igb_ring *ring;
1149	int ring_count, size;
1150
1151	/* igb only supports 1 Tx and/or 1 Rx queue per vector */
1152	if (txr_count > 1 || rxr_count > 1)
1153		return -ENOMEM;
1154
1155	ring_count = txr_count + rxr_count;
1156	size = sizeof(struct igb_q_vector) +
1157	       (sizeof(struct igb_ring) * ring_count);
1158
1159	/* allocate q_vector and rings */
1160	q_vector = kzalloc(size, GFP_KERNEL);
1161	if (!q_vector)
1162		return -ENOMEM;
1163
1164	/* initialize NAPI */
1165	netif_napi_add(adapter->netdev, &q_vector->napi,
1166		       igb_poll, 64);
1167
1168	/* tie q_vector and adapter together */
1169	adapter->q_vector[v_idx] = q_vector;
1170	q_vector->adapter = adapter;
1171
1172	/* initialize work limits */
1173	q_vector->tx.work_limit = adapter->tx_work_limit;
1174
1175	/* initialize ITR configuration */
1176	q_vector->itr_register = adapter->hw.hw_addr + E1000_EITR(0);
1177	q_vector->itr_val = IGB_START_ITR;
1178
1179	/* initialize pointer to rings */
1180	ring = q_vector->ring;
1181
1182	if (txr_count) {
1183		/* assign generic ring traits */
1184		ring->dev = &adapter->pdev->dev;
1185		ring->netdev = adapter->netdev;
1186
1187		/* configure backlink on ring */
1188		ring->q_vector = q_vector;
1189
1190		/* update q_vector Tx values */
1191		igb_add_ring(ring, &q_vector->tx);
1192
1193		/* For 82575, context index must be unique per ring. */
1194		if (adapter->hw.mac.type == e1000_82575)
1195			set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
1196
1197		/* apply Tx specific ring traits */
1198		ring->count = adapter->tx_ring_count;
1199		ring->queue_index = txr_idx;
1200
1201		/* assign ring to adapter */
1202		adapter->tx_ring[txr_idx] = ring;
1203
1204		/* push pointer to next ring */
1205		ring++;
1206	}
1207
1208	if (rxr_count) {
1209		/* assign generic ring traits */
1210		ring->dev = &adapter->pdev->dev;
1211		ring->netdev = adapter->netdev;
1212
1213		/* configure backlink on ring */
1214		ring->q_vector = q_vector;
1215
1216		/* update q_vector Rx values */
1217		igb_add_ring(ring, &q_vector->rx);
1218
1219		/* set flag indicating ring supports SCTP checksum offload */
1220		if (adapter->hw.mac.type >= e1000_82576)
1221			set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
1222
1223		/*
1224		 * On i350, i210, and i211, loopback VLAN packets
1225		 * have the tag byte-swapped.
1226		 * */
1227		if (adapter->hw.mac.type >= e1000_i350)
1228			set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
1229
1230		/* apply Rx specific ring traits */
1231		ring->count = adapter->rx_ring_count;
1232		ring->queue_index = rxr_idx;
1233
1234		/* assign ring to adapter */
1235		adapter->rx_ring[rxr_idx] = ring;
1236	}
1237
1238	return 0;
1239}
1240
1241
1242/**
1243 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
1244 * @adapter: board private structure to initialize
1245 *
1246 * We allocate one q_vector per queue interrupt.  If allocation fails we
1247 * return -ENOMEM.
1248 **/
1249static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1250{
1251	int q_vectors = adapter->num_q_vectors;
1252	int rxr_remaining = adapter->num_rx_queues;
1253	int txr_remaining = adapter->num_tx_queues;
1254	int rxr_idx = 0, txr_idx = 0, v_idx = 0;
1255	int err;
1256
1257	if (q_vectors >= (rxr_remaining + txr_remaining)) {
1258		for (; rxr_remaining; v_idx++) {
1259			err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1260						 0, 0, 1, rxr_idx);
1261
1262			if (err)
1263				goto err_out;
1264
1265			/* update counts and index */
1266			rxr_remaining--;
1267			rxr_idx++;
1268		}
1269	}
1270
1271	for (; v_idx < q_vectors; v_idx++) {
1272		int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1273		int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
1274		err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1275					 tqpv, txr_idx, rqpv, rxr_idx);
1276
1277		if (err)
1278			goto err_out;
1279
1280		/* update counts and index */
1281		rxr_remaining -= rqpv;
1282		txr_remaining -= tqpv;
1283		rxr_idx++;
1284		txr_idx++;
1285	}
1286
1287	return 0;
1288
1289err_out:
1290	adapter->num_tx_queues = 0;
1291	adapter->num_rx_queues = 0;
1292	adapter->num_q_vectors = 0;
1293
1294	while (v_idx--)
1295		igb_free_q_vector(adapter, v_idx);
1296
1297	return -ENOMEM;
1298}
1299
1300/**
1301 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
1302 *
1303 * This function initializes the interrupts and allocates all of the queues.
1304 **/
1305static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix)
1306{
1307	struct pci_dev *pdev = adapter->pdev;
1308	int err;
1309
1310	igb_set_interrupt_capability(adapter, msix);
1311
1312	err = igb_alloc_q_vectors(adapter);
1313	if (err) {
1314		dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
1315		goto err_alloc_q_vectors;
1316	}
1317
1318	igb_cache_ring_register(adapter);
1319
1320	return 0;
1321
1322err_alloc_q_vectors:
1323	igb_reset_interrupt_capability(adapter);
1324	return err;
1325}
1326
1327/**
1328 * igb_request_irq - initialize interrupts
1329 *
1330 * Attempts to configure interrupts using the best available
1331 * capabilities of the hardware and kernel.
1332 **/
1333static int igb_request_irq(struct igb_adapter *adapter)
1334{
1335	struct net_device *netdev = adapter->netdev;
1336	struct pci_dev *pdev = adapter->pdev;
1337	int err = 0;
1338
1339	if (adapter->msix_entries) {
1340		err = igb_request_msix(adapter);
1341		if (!err)
1342			goto request_done;
1343		/* fall back to MSI */
1344		igb_free_all_tx_resources(adapter);
1345		igb_free_all_rx_resources(adapter);
1346
1347		igb_clear_interrupt_scheme(adapter);
1348		err = igb_init_interrupt_scheme(adapter, false);
1349		if (err)
1350			goto request_done;
1351
1352		igb_setup_all_tx_resources(adapter);
1353		igb_setup_all_rx_resources(adapter);
1354		igb_configure(adapter);
1355	}
1356
1357	igb_assign_vector(adapter->q_vector[0], 0);
1358
1359	if (adapter->flags & IGB_FLAG_HAS_MSI) {
1360		err = request_irq(pdev->irq, igb_intr_msi, 0,
1361				  netdev->name, adapter);
1362		if (!err)
1363			goto request_done;
1364
1365		/* fall back to legacy interrupts */
1366		igb_reset_interrupt_capability(adapter);
1367		adapter->flags &= ~IGB_FLAG_HAS_MSI;
1368	}
1369
1370	err = request_irq(pdev->irq, igb_intr, IRQF_SHARED,
1371			  netdev->name, adapter);
1372
1373	if (err)
1374		dev_err(&pdev->dev, "Error %d getting interrupt\n",
1375			err);
1376
1377request_done:
1378	return err;
1379}
1380
1381static void igb_free_irq(struct igb_adapter *adapter)
1382{
1383	if (adapter->msix_entries) {
1384		int vector = 0, i;
1385
1386		free_irq(adapter->msix_entries[vector++].vector, adapter);
1387
1388		for (i = 0; i < adapter->num_q_vectors; i++)
1389			free_irq(adapter->msix_entries[vector++].vector,
1390				 adapter->q_vector[i]);
1391	} else {
1392		free_irq(adapter->pdev->irq, adapter);
1393	}
1394}
1395
1396/**
1397 * igb_irq_disable - Mask off interrupt generation on the NIC
1398 * @adapter: board private structure
1399 **/
1400static void igb_irq_disable(struct igb_adapter *adapter)
1401{
1402	struct e1000_hw *hw = &adapter->hw;
1403
1404	/*
1405	 * we need to be careful when disabling interrupts.  The VFs are also
1406	 * mapped into these registers and so clearing the bits can cause
1407	 * issues on the VF drivers so we only need to clear what we set
1408	 */
1409	if (adapter->msix_entries) {
1410		u32 regval = rd32(E1000_EIAM);
1411		wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1412		wr32(E1000_EIMC, adapter->eims_enable_mask);
1413		regval = rd32(E1000_EIAC);
1414		wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
1415	}
1416
1417	wr32(E1000_IAM, 0);
1418	wr32(E1000_IMC, ~0);
1419	wrfl();
1420	if (adapter->msix_entries) {
1421		int i;
1422		for (i = 0; i < adapter->num_q_vectors; i++)
1423			synchronize_irq(adapter->msix_entries[i].vector);
1424	} else {
1425		synchronize_irq(adapter->pdev->irq);
1426	}
1427}
1428
1429/**
1430 * igb_irq_enable - Enable default interrupt generation settings
1431 * @adapter: board private structure
1432 **/
1433static void igb_irq_enable(struct igb_adapter *adapter)
1434{
1435	struct e1000_hw *hw = &adapter->hw;
1436
1437	if (adapter->msix_entries) {
1438		u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
1439		u32 regval = rd32(E1000_EIAC);
1440		wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1441		regval = rd32(E1000_EIAM);
1442		wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
1443		wr32(E1000_EIMS, adapter->eims_enable_mask);
1444		if (adapter->vfs_allocated_count) {
1445			wr32(E1000_MBVFIMR, 0xFF);
1446			ims |= E1000_IMS_VMMB;
1447		}
1448		wr32(E1000_IMS, ims);
1449	} else {
1450		wr32(E1000_IMS, IMS_ENABLE_MASK |
1451				E1000_IMS_DRSTA);
1452		wr32(E1000_IAM, IMS_ENABLE_MASK |
1453				E1000_IMS_DRSTA);
1454	}
1455}
1456
1457static void igb_update_mng_vlan(struct igb_adapter *adapter)
1458{
1459	struct e1000_hw *hw = &adapter->hw;
1460	u16 vid = adapter->hw.mng_cookie.vlan_id;
1461	u16 old_vid = adapter->mng_vlan_id;
1462
1463	if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1464		/* add VID to filter table */
1465		igb_vfta_set(hw, vid, true);
1466		adapter->mng_vlan_id = vid;
1467	} else {
1468		adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1469	}
1470
1471	if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1472	    (vid != old_vid) &&
1473	    !test_bit(old_vid, adapter->active_vlans)) {
1474		/* remove VID from filter table */
1475		igb_vfta_set(hw, old_vid, false);
1476	}
1477}
1478
1479/**
1480 * igb_release_hw_control - release control of the h/w to f/w
1481 * @adapter: address of board private structure
1482 *
1483 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1484 * For ASF and Pass Through versions of f/w this means that the
1485 * driver is no longer loaded.
1486 *
1487 **/
1488static void igb_release_hw_control(struct igb_adapter *adapter)
1489{
1490	struct e1000_hw *hw = &adapter->hw;
1491	u32 ctrl_ext;
1492
1493	/* Let firmware take over control of h/w */
1494	ctrl_ext = rd32(E1000_CTRL_EXT);
1495	wr32(E1000_CTRL_EXT,
1496			ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1497}
1498
1499/**
1500 * igb_get_hw_control - get control of the h/w from f/w
1501 * @adapter: address of board private structure
1502 *
1503 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1504 * For ASF and Pass Through versions of f/w this means that
1505 * the driver is loaded.
1506 *
1507 **/
1508static void igb_get_hw_control(struct igb_adapter *adapter)
1509{
1510	struct e1000_hw *hw = &adapter->hw;
1511	u32 ctrl_ext;
1512
1513	/* Let firmware know the driver has taken over */
1514	ctrl_ext = rd32(E1000_CTRL_EXT);
1515	wr32(E1000_CTRL_EXT,
1516			ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1517}
1518
1519/**
1520 * igb_configure - configure the hardware for RX and TX
1521 * @adapter: private board structure
1522 **/
1523static void igb_configure(struct igb_adapter *adapter)
1524{
1525	struct net_device *netdev = adapter->netdev;
1526	int i;
1527
1528	igb_get_hw_control(adapter);
1529	igb_set_rx_mode(netdev);
1530
1531	igb_restore_vlan(adapter);
1532
1533	igb_setup_tctl(adapter);
1534	igb_setup_mrqc(adapter);
1535	igb_setup_rctl(adapter);
1536
1537	igb_configure_tx(adapter);
1538	igb_configure_rx(adapter);
1539
1540	igb_rx_fifo_flush_82575(&adapter->hw);
1541
1542	/* call igb_desc_unused which always leaves
1543	 * at least 1 descriptor unused to make sure
1544	 * next_to_use != next_to_clean */
1545	for (i = 0; i < adapter->num_rx_queues; i++) {
1546		struct igb_ring *ring = adapter->rx_ring[i];
1547		igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
1548	}
1549}
1550
1551/**
1552 * igb_power_up_link - Power up the phy/serdes link
1553 * @adapter: address of board private structure
1554 **/
1555void igb_power_up_link(struct igb_adapter *adapter)
1556{
1557	igb_reset_phy(&adapter->hw);
1558
1559	if (adapter->hw.phy.media_type == e1000_media_type_copper)
1560		igb_power_up_phy_copper(&adapter->hw);
1561	else
1562		igb_power_up_serdes_link_82575(&adapter->hw);
1563}
1564
1565/**
1566 * igb_power_down_link - Power down the phy/serdes link
1567 * @adapter: address of board private structure
1568 */
1569static void igb_power_down_link(struct igb_adapter *adapter)
1570{
1571	if (adapter->hw.phy.media_type == e1000_media_type_copper)
1572		igb_power_down_phy_copper_82575(&adapter->hw);
1573	else
1574		igb_shutdown_serdes_link_82575(&adapter->hw);
1575}
1576
1577/**
1578 * igb_up - Open the interface and prepare it to handle traffic
1579 * @adapter: board private structure
1580 **/
1581int igb_up(struct igb_adapter *adapter)
1582{
1583	struct e1000_hw *hw = &adapter->hw;
1584	int i;
1585
1586	/* hardware has been reset, we need to reload some things */
1587	igb_configure(adapter);
1588
1589	clear_bit(__IGB_DOWN, &adapter->state);
1590
1591	for (i = 0; i < adapter->num_q_vectors; i++)
1592		napi_enable(&(adapter->q_vector[i]->napi));
1593
1594	if (adapter->msix_entries)
1595		igb_configure_msix(adapter);
1596	else
1597		igb_assign_vector(adapter->q_vector[0], 0);
1598
1599	/* Clear any pending interrupts. */
1600	rd32(E1000_ICR);
1601	igb_irq_enable(adapter);
1602
1603	/* notify VFs that reset has been completed */
1604	if (adapter->vfs_allocated_count) {
1605		u32 reg_data = rd32(E1000_CTRL_EXT);
1606		reg_data |= E1000_CTRL_EXT_PFRSTD;
1607		wr32(E1000_CTRL_EXT, reg_data);
1608	}
1609
1610	netif_tx_start_all_queues(adapter->netdev);
1611
1612	/* start the watchdog. */
1613	hw->mac.get_link_status = 1;
1614	schedule_work(&adapter->watchdog_task);
1615
1616	return 0;
1617}
1618
1619void igb_down(struct igb_adapter *adapter)
1620{
1621	struct net_device *netdev = adapter->netdev;
1622	struct e1000_hw *hw = &adapter->hw;
1623	u32 tctl, rctl;
1624	int i;
1625
1626	/* signal that we're down so the interrupt handler does not
1627	 * reschedule our watchdog timer */
1628	set_bit(__IGB_DOWN, &adapter->state);
1629
1630	/* disable receives in the hardware */
1631	rctl = rd32(E1000_RCTL);
1632	wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1633	/* flush and sleep below */
1634
1635	netif_tx_stop_all_queues(netdev);
1636
1637	/* disable transmits in the hardware */
1638	tctl = rd32(E1000_TCTL);
1639	tctl &= ~E1000_TCTL_EN;
1640	wr32(E1000_TCTL, tctl);
1641	/* flush both disables and wait for them to finish */
1642	wrfl();
1643	msleep(10);
1644
1645	for (i = 0; i < adapter->num_q_vectors; i++)
1646		napi_disable(&(adapter->q_vector[i]->napi));
1647
1648	igb_irq_disable(adapter);
1649
1650	del_timer_sync(&adapter->watchdog_timer);
1651	del_timer_sync(&adapter->phy_info_timer);
1652
1653	netif_carrier_off(netdev);
1654
1655	/* record the stats before reset*/
1656	spin_lock(&adapter->stats64_lock);
1657	igb_update_stats(adapter, &adapter->stats64);
1658	spin_unlock(&adapter->stats64_lock);
1659
1660	adapter->link_speed = 0;
1661	adapter->link_duplex = 0;
1662
1663	if (!pci_channel_offline(adapter->pdev))
1664		igb_reset(adapter);
1665	igb_clean_all_tx_rings(adapter);
1666	igb_clean_all_rx_rings(adapter);
1667#ifdef CONFIG_IGB_DCA
1668
1669	/* since we reset the hardware DCA settings were cleared */
1670	igb_setup_dca(adapter);
1671#endif
1672}
1673
1674void igb_reinit_locked(struct igb_adapter *adapter)
1675{
1676	WARN_ON(in_interrupt());
1677	while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
1678		msleep(1);
1679	igb_down(adapter);
1680	igb_up(adapter);
1681	clear_bit(__IGB_RESETTING, &adapter->state);
1682}
1683
1684void igb_reset(struct igb_adapter *adapter)
1685{
1686	struct pci_dev *pdev = adapter->pdev;
1687	struct e1000_hw *hw = &adapter->hw;
1688	struct e1000_mac_info *mac = &hw->mac;
1689	struct e1000_fc_info *fc = &hw->fc;
1690	u32 pba = 0, tx_space, min_tx_space, min_rx_space, hwm;
1691
1692	/* Repartition Pba for greater than 9k mtu
1693	 * To take effect CTRL.RST is required.
1694	 */
1695	switch (mac->type) {
1696	case e1000_i350:
1697	case e1000_82580:
1698		pba = rd32(E1000_RXPBS);
1699		pba = igb_rxpbs_adjust_82580(pba);
1700		break;
1701	case e1000_82576:
1702		pba = rd32(E1000_RXPBS);
1703		pba &= E1000_RXPBS_SIZE_MASK_82576;
1704		break;
1705	case e1000_82575:
1706	case e1000_i210:
1707	case e1000_i211:
1708	default:
1709		pba = E1000_PBA_34K;
1710		break;
1711	}
1712
1713	if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
1714	    (mac->type < e1000_82576)) {
1715		/* adjust PBA for jumbo frames */
1716		wr32(E1000_PBA, pba);
1717
1718		/* To maintain wire speed transmits, the Tx FIFO should be
1719		 * large enough to accommodate two full transmit packets,
1720		 * rounded up to the next 1KB and expressed in KB.  Likewise,
1721		 * the Rx FIFO should be large enough to accommodate at least
1722		 * one full receive packet and is similarly rounded up and
1723		 * expressed in KB. */
1724		pba = rd32(E1000_PBA);
1725		/* upper 16 bits has Tx packet buffer allocation size in KB */
1726		tx_space = pba >> 16;
1727		/* lower 16 bits has Rx packet buffer allocation size in KB */
1728		pba &= 0xffff;
1729		/* the tx fifo also stores 16 bytes of information about the tx
1730		 * but don't include ethernet FCS because hardware appends it */
1731		min_tx_space = (adapter->max_frame_size +
1732				sizeof(union e1000_adv_tx_desc) -
1733				ETH_FCS_LEN) * 2;
1734		min_tx_space = ALIGN(min_tx_space, 1024);
1735		min_tx_space >>= 10;
1736		/* software strips receive CRC, so leave room for it */
1737		min_rx_space = adapter->max_frame_size;
1738		min_rx_space = ALIGN(min_rx_space, 1024);
1739		min_rx_space >>= 10;
1740
1741		/* If current Tx allocation is less than the min Tx FIFO size,
1742		 * and the min Tx FIFO size is less than the current Rx FIFO
1743		 * allocation, take space away from current Rx allocation */
1744		if (tx_space < min_tx_space &&
1745		    ((min_tx_space - tx_space) < pba)) {
1746			pba = pba - (min_tx_space - tx_space);
1747
1748			/* if short on rx space, rx wins and must trump tx
1749			 * adjustment */
1750			if (pba < min_rx_space)
1751				pba = min_rx_space;
1752		}
1753		wr32(E1000_PBA, pba);
1754	}
1755
1756	/* flow control settings */
1757	/* The high water mark must be low enough to fit one full frame
1758	 * (or the size used for early receive) above it in the Rx FIFO.
1759	 * Set it to the lower of:
1760	 * - 90% of the Rx FIFO size, or
1761	 * - the full Rx FIFO size minus one full frame */
1762	hwm = min(((pba << 10) * 9 / 10),
1763			((pba << 10) - 2 * adapter->max_frame_size));
1764
1765	fc->high_water = hwm & 0xFFFFFFF0;	/* 16-byte granularity */
1766	fc->low_water = fc->high_water - 16;
1767	fc->pause_time = 0xFFFF;
1768	fc->send_xon = 1;
1769	fc->current_mode = fc->requested_mode;
1770
1771	/* disable receive for all VFs and wait one second */
1772	if (adapter->vfs_allocated_count) {
1773		int i;
1774		for (i = 0 ; i < adapter->vfs_allocated_count; i++)
1775			adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
1776
1777		/* ping all the active vfs to let them know we are going down */
1778		igb_ping_all_vfs(adapter);
1779
1780		/* disable transmits and receives */
1781		wr32(E1000_VFRE, 0);
1782		wr32(E1000_VFTE, 0);
1783	}
1784
1785	/* Allow time for pending master requests to run */
1786	hw->mac.ops.reset_hw(hw);
1787	wr32(E1000_WUC, 0);
1788
1789	if (hw->mac.ops.init_hw(hw))
1790		dev_err(&pdev->dev, "Hardware Error\n");
1791
1792	/*
1793	 * Flow control settings reset on hardware reset, so guarantee flow
1794	 * control is off when forcing speed.
1795	 */
1796	if (!hw->mac.autoneg)
1797		igb_force_mac_fc(hw);
1798
1799	igb_init_dmac(adapter, pba);
1800#ifdef CONFIG_IGB_HWMON
1801	/* Re-initialize the thermal sensor on i350 devices. */
1802	if (!test_bit(__IGB_DOWN, &adapter->state)) {
1803		if (mac->type == e1000_i350 && hw->bus.func == 0) {
1804			/* If present, re-initialize the external thermal sensor
1805			 * interface.
1806			 */
1807			if (adapter->ets)
1808				mac->ops.init_thermal_sensor_thresh(hw);
1809		}
1810	}
1811#endif
1812	if (!netif_running(adapter->netdev))
1813		igb_power_down_link(adapter);
1814
1815	igb_update_mng_vlan(adapter);
1816
1817	/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1818	wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1819
1820	/* Re-enable PTP, where applicable. */
1821	igb_ptp_reset(adapter);
1822
1823	igb_get_phy_info(hw);
1824}
1825
1826static netdev_features_t igb_fix_features(struct net_device *netdev,
1827	netdev_features_t features)
1828{
1829	/*
1830	 * Since there is no support for separate rx/tx vlan accel
1831	 * enable/disable make sure tx flag is always in same state as rx.
1832	 */
1833	if (features & NETIF_F_HW_VLAN_RX)
1834		features |= NETIF_F_HW_VLAN_TX;
1835	else
1836		features &= ~NETIF_F_HW_VLAN_TX;
1837
1838	return features;
1839}
1840
1841static int igb_set_features(struct net_device *netdev,
1842	netdev_features_t features)
1843{
1844	netdev_features_t changed = netdev->features ^ features;
1845	struct igb_adapter *adapter = netdev_priv(netdev);
1846
1847	if (changed & NETIF_F_HW_VLAN_RX)
1848		igb_vlan_mode(netdev, features);
1849
1850	if (!(changed & NETIF_F_RXALL))
1851		return 0;
1852
1853	netdev->features = features;
1854
1855	if (netif_running(netdev))
1856		igb_reinit_locked(adapter);
1857	else
1858		igb_reset(adapter);
1859
1860	return 0;
1861}
1862
1863static const struct net_device_ops igb_netdev_ops = {
1864	.ndo_open		= igb_open,
1865	.ndo_stop		= igb_close,
1866	.ndo_start_xmit		= igb_xmit_frame,
1867	.ndo_get_stats64	= igb_get_stats64,
1868	.ndo_set_rx_mode	= igb_set_rx_mode,
1869	.ndo_set_mac_address	= igb_set_mac,
1870	.ndo_change_mtu		= igb_change_mtu,
1871	.ndo_do_ioctl		= igb_ioctl,
1872	.ndo_tx_timeout		= igb_tx_timeout,
1873	.ndo_validate_addr	= eth_validate_addr,
1874	.ndo_vlan_rx_add_vid	= igb_vlan_rx_add_vid,
1875	.ndo_vlan_rx_kill_vid	= igb_vlan_rx_kill_vid,
1876	.ndo_set_vf_mac		= igb_ndo_set_vf_mac,
1877	.ndo_set_vf_vlan	= igb_ndo_set_vf_vlan,
1878	.ndo_set_vf_tx_rate	= igb_ndo_set_vf_bw,
1879	.ndo_get_vf_config	= igb_ndo_get_vf_config,
1880#ifdef CONFIG_NET_POLL_CONTROLLER
1881	.ndo_poll_controller	= igb_netpoll,
1882#endif
1883	.ndo_fix_features	= igb_fix_features,
1884	.ndo_set_features	= igb_set_features,
1885};
1886
1887/**
1888 * igb_set_fw_version - Configure version string for ethtool
1889 * @adapter: adapter struct
1890 *
1891 **/
1892void igb_set_fw_version(struct igb_adapter *adapter)
1893{
1894	struct e1000_hw *hw = &adapter->hw;
1895	struct e1000_fw_version fw;
1896
1897	igb_get_fw_version(hw, &fw);
1898
1899	switch (hw->mac.type) {
1900	case e1000_i211:
1901		snprintf(adapter->fw_version, sizeof(adapter->fw_version),
1902			 "%2d.%2d-%d",
1903			 fw.invm_major, fw.invm_minor, fw.invm_img_type);
1904		break;
1905
1906	default:
1907		/* if option is rom valid, display its version too */
1908		if (fw.or_valid) {
1909			snprintf(adapter->fw_version,
1910				 sizeof(adapter->fw_version),
1911				 "%d.%d, 0x%08x, %d.%d.%d",
1912				 fw.eep_major, fw.eep_minor, fw.etrack_id,
1913				 fw.or_major, fw.or_build, fw.or_patch);
1914		/* no option rom */
1915		} else {
1916			snprintf(adapter->fw_version,
1917				 sizeof(adapter->fw_version),
1918				 "%d.%d, 0x%08x",
1919				 fw.eep_major, fw.eep_minor, fw.etrack_id);
1920		}
1921		break;
1922	}
1923	return;
1924}
1925
1926/*  igb_init_i2c - Init I2C interface
1927 *  @adapter: pointer to adapter structure
1928 *
1929 */
1930static s32 igb_init_i2c(struct igb_adapter *adapter)
1931{
1932	s32 status = E1000_SUCCESS;
1933
1934	/* I2C interface supported on i350 devices */
1935	if (adapter->hw.mac.type != e1000_i350)
1936		return E1000_SUCCESS;
1937
1938	/* Initialize the i2c bus which is controlled by the registers.
1939	 * This bus will use the i2c_algo_bit structue that implements
1940	 * the protocol through toggling of the 4 bits in the register.
1941	 */
1942	adapter->i2c_adap.owner = THIS_MODULE;
1943	adapter->i2c_algo = igb_i2c_algo;
1944	adapter->i2c_algo.data = adapter;
1945	adapter->i2c_adap.algo_data = &adapter->i2c_algo;
1946	adapter->i2c_adap.dev.parent = &adapter->pdev->dev;
1947	strlcpy(adapter->i2c_adap.name, "igb BB",
1948		sizeof(adapter->i2c_adap.name));
1949	status = i2c_bit_add_bus(&adapter->i2c_adap);
1950	return status;
1951}
1952
1953/**
1954 * igb_probe - Device Initialization Routine
1955 * @pdev: PCI device information struct
1956 * @ent: entry in igb_pci_tbl
1957 *
1958 * Returns 0 on success, negative on failure
1959 *
1960 * igb_probe initializes an adapter identified by a pci_dev structure.
1961 * The OS initialization, configuring of the adapter private structure,
1962 * and a hardware reset occur.
1963 **/
1964static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1965{
1966	struct net_device *netdev;
1967	struct igb_adapter *adapter;
1968	struct e1000_hw *hw;
1969	u16 eeprom_data = 0;
1970	s32 ret_val;
1971	static int global_quad_port_a; /* global quad port a indication */
1972	const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1973	unsigned long mmio_start, mmio_len;
1974	int err, pci_using_dac;
1975	u8 part_str[E1000_PBANUM_LENGTH];
1976
1977	/* Catch broken hardware that put the wrong VF device ID in
1978	 * the PCIe SR-IOV capability.
1979	 */
1980	if (pdev->is_virtfn) {
1981		WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
1982			pci_name(pdev), pdev->vendor, pdev->device);
1983		return -EINVAL;
1984	}
1985
1986	err = pci_enable_device_mem(pdev);
1987	if (err)
1988		return err;
1989
1990	pci_using_dac = 0;
1991	err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
1992	if (!err) {
1993		err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
1994		if (!err)
1995			pci_using_dac = 1;
1996	} else {
1997		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1998		if (err) {
1999			err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
2000			if (err) {
2001				dev_err(&pdev->dev, "No usable DMA "
2002					"configuration, aborting\n");
2003				goto err_dma;
2004			}
2005		}
2006	}
2007
2008	err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
2009	                                   IORESOURCE_MEM),
2010	                                   igb_driver_name);
2011	if (err)
2012		goto err_pci_reg;
2013
2014	pci_enable_pcie_error_reporting(pdev);
2015
2016	pci_set_master(pdev);
2017	pci_save_state(pdev);
2018
2019	err = -ENOMEM;
2020	netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
2021				   IGB_MAX_TX_QUEUES);
2022	if (!netdev)
2023		goto err_alloc_etherdev;
2024
2025	SET_NETDEV_DEV(netdev, &pdev->dev);
2026
2027	pci_set_drvdata(pdev, netdev);
2028	adapter = netdev_priv(netdev);
2029	adapter->netdev = netdev;
2030	adapter->pdev = pdev;
2031	hw = &adapter->hw;
2032	hw->back = adapter;
2033	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
2034
2035	mmio_start = pci_resource_start(pdev, 0);
2036	mmio_len = pci_resource_len(pdev, 0);
2037
2038	err = -EIO;
2039	hw->hw_addr = ioremap(mmio_start, mmio_len);
2040	if (!hw->hw_addr)
2041		goto err_ioremap;
2042
2043	netdev->netdev_ops = &igb_netdev_ops;
2044	igb_set_ethtool_ops(netdev);
2045	netdev->watchdog_timeo = 5 * HZ;
2046
2047	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
2048
2049	netdev->mem_start = mmio_start;
2050	netdev->mem_end = mmio_start + mmio_len;
2051
2052	/* PCI config space info */
2053	hw->vendor_id = pdev->vendor;
2054	hw->device_id = pdev->device;
2055	hw->revision_id = pdev->revision;
2056	hw->subsystem_vendor_id = pdev->subsystem_vendor;
2057	hw->subsystem_device_id = pdev->subsystem_device;
2058
2059	/* Copy the default MAC, PHY and NVM function pointers */
2060	memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
2061	memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
2062	memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
2063	/* Initialize skew-specific constants */
2064	err = ei->get_invariants(hw);
2065	if (err)
2066		goto err_sw_init;
2067
2068	/* setup the private structure */
2069	err = igb_sw_init(adapter);
2070	if (err)
2071		goto err_sw_init;
2072
2073	igb_get_bus_info_pcie(hw);
2074
2075	hw->phy.autoneg_wait_to_complete = false;
2076
2077	/* Copper options */
2078	if (hw->phy.media_type == e1000_media_type_copper) {
2079		hw->phy.mdix = AUTO_ALL_MODES;
2080		hw->phy.disable_polarity_correction = false;
2081		hw->phy.ms_type = e1000_ms_hw_default;
2082	}
2083
2084	if (igb_check_reset_block(hw))
2085		dev_info(&pdev->dev,
2086			"PHY reset is blocked due to SOL/IDER session.\n");
2087
2088	/*
2089	 * features is initialized to 0 in allocation, it might have bits
2090	 * set by igb_sw_init so we should use an or instead of an
2091	 * assignment.
2092	 */
2093	netdev->features |= NETIF_F_SG |
2094			    NETIF_F_IP_CSUM |
2095			    NETIF_F_IPV6_CSUM |
2096			    NETIF_F_TSO |
2097			    NETIF_F_TSO6 |
2098			    NETIF_F_RXHASH |
2099			    NETIF_F_RXCSUM |
2100			    NETIF_F_HW_VLAN_RX |
2101			    NETIF_F_HW_VLAN_TX;
2102
2103	/* copy netdev features into list of user selectable features */
2104	netdev->hw_features |= netdev->features;
2105	netdev->hw_features |= NETIF_F_RXALL;
2106
2107	/* set this bit last since it cannot be part of hw_features */
2108	netdev->features |= NETIF_F_HW_VLAN_FILTER;
2109
2110	netdev->vlan_features |= NETIF_F_TSO |
2111				 NETIF_F_TSO6 |
2112				 NETIF_F_IP_CSUM |
2113				 NETIF_F_IPV6_CSUM |
2114				 NETIF_F_SG;
2115
2116	netdev->priv_flags |= IFF_SUPP_NOFCS;
2117
2118	if (pci_using_dac) {
2119		netdev->features |= NETIF_F_HIGHDMA;
2120		netdev->vlan_features |= NETIF_F_HIGHDMA;
2121	}
2122
2123	if (hw->mac.type >= e1000_82576) {
2124		netdev->hw_features |= NETIF_F_SCTP_CSUM;
2125		netdev->features |= NETIF_F_SCTP_CSUM;
2126	}
2127
2128	netdev->priv_flags |= IFF_UNICAST_FLT;
2129
2130	adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
2131
2132	/* before reading the NVM, reset the controller to put the device in a
2133	 * known good starting state */
2134	hw->mac.ops.reset_hw(hw);
2135
2136	/*
2137	 * make sure the NVM is good , i211 parts have special NVM that
2138	 * doesn't contain a checksum
2139	 */
2140	if (hw->mac.type != e1000_i211) {
2141		if (hw->nvm.ops.validate(hw) < 0) {
2142			dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
2143			err = -EIO;
2144			goto err_eeprom;
2145		}
2146	}
2147
2148	/* copy the MAC address out of the NVM */
2149	if (hw->mac.ops.read_mac_addr(hw))
2150		dev_err(&pdev->dev, "NVM Read Error\n");
2151
2152	memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
2153
2154	if (!is_valid_ether_addr(netdev->dev_addr)) {
2155		dev_err(&pdev->dev, "Invalid MAC Address\n");
2156		err = -EIO;
2157		goto err_eeprom;
2158	}
2159
2160	/* get firmware version for ethtool -i */
2161	igb_set_fw_version(adapter);
2162
2163	setup_timer(&adapter->watchdog_timer, igb_watchdog,
2164	            (unsigned long) adapter);
2165	setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
2166	            (unsigned long) adapter);
2167
2168	INIT_WORK(&adapter->reset_task, igb_reset_task);
2169	INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
2170
2171	/* Initialize link properties that are user-changeable */
2172	adapter->fc_autoneg = true;
2173	hw->mac.autoneg = true;
2174	hw->phy.autoneg_advertised = 0x2f;
2175
2176	hw->fc.requested_mode = e1000_fc_default;
2177	hw->fc.current_mode = e1000_fc_default;
2178
2179	igb_validate_mdi_setting(hw);
2180
2181	/* By default, support wake on port A */
2182	if (hw->bus.func == 0)
2183		adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
2184
2185	/* Check the NVM for wake support on non-port A ports */
2186	if (hw->mac.type >= e1000_82580)
2187		hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
2188		                 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
2189		                 &eeprom_data);
2190	else if (hw->bus.func == 1)
2191		hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
2192
2193	if (eeprom_data & IGB_EEPROM_APME)
2194		adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
2195
2196	/* now that we have the eeprom settings, apply the special cases where
2197	 * the eeprom may be wrong or the board simply won't support wake on
2198	 * lan on a particular port */
2199	switch (pdev->device) {
2200	case E1000_DEV_ID_82575GB_QUAD_COPPER:
2201		adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
2202		break;
2203	case E1000_DEV_ID_82575EB_FIBER_SERDES:
2204	case E1000_DEV_ID_82576_FIBER:
2205	case E1000_DEV_ID_82576_SERDES:
2206		/* Wake events only supported on port A for dual fiber
2207		 * regardless of eeprom setting */
2208		if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
2209			adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
2210		break;
2211	case E1000_DEV_ID_82576_QUAD_COPPER:
2212	case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
2213		/* if quad port adapter, disable WoL on all but port A */
2214		if (global_quad_port_a != 0)
2215			adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
2216		else
2217			adapter->flags |= IGB_FLAG_QUAD_PORT_A;
2218		/* Reset for multiple quad port adapters */
2219		if (++global_quad_port_a == 4)
2220			global_quad_port_a = 0;
2221		break;
2222	default:
2223		/* If the device can't wake, don't set software support */
2224		if (!device_can_wakeup(&adapter->pdev->dev))
2225			adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
2226	}
2227
2228	/* initialize the wol settings based on the eeprom settings */
2229	if (adapter->flags & IGB_FLAG_WOL_SUPPORTED)
2230		adapter->wol |= E1000_WUFC_MAG;
2231
2232	/* Some vendors want WoL disabled by default, but still supported */
2233	if ((hw->mac.type == e1000_i350) &&
2234	    (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
2235		adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
2236		adapter->wol = 0;
2237	}
2238
2239	device_set_wakeup_enable(&adapter->pdev->dev,
2240				 adapter->flags & IGB_FLAG_WOL_SUPPORTED);
2241
2242	/* reset the hardware with the new settings */
2243	igb_reset(adapter);
2244
2245	/* Init the I2C interface */
2246	err = igb_init_i2c(adapter);
2247	if (err) {
2248		dev_err(&pdev->dev, "failed to init i2c interface\n");
2249		goto err_eeprom;
2250	}
2251
2252	/* let the f/w know that the h/w is now under the control of the
2253	 * driver. */
2254	igb_get_hw_control(adapter);
2255
2256	strcpy(netdev->name, "eth%d");
2257	err = register_netdev(netdev);
2258	if (err)
2259		goto err_register;
2260
2261	/* carrier off reporting is important to ethtool even BEFORE open */
2262	netif_carrier_off(netdev);
2263
2264#ifdef CONFIG_IGB_DCA
2265	if (dca_add_requester(&pdev->dev) == 0) {
2266		adapter->flags |= IGB_FLAG_DCA_ENABLED;
2267		dev_info(&pdev->dev, "DCA enabled\n");
2268		igb_setup_dca(adapter);
2269	}
2270
2271#endif
2272#ifdef CONFIG_IGB_HWMON
2273	/* Initialize the thermal sensor on i350 devices. */
2274	if (hw->mac.type == e1000_i350 && hw->bus.func == 0) {
2275		u16 ets_word;
2276
2277		/*
2278		 * Read the NVM to determine if this i350 device supports an
2279		 * external thermal sensor.
2280		 */
2281		hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word);
2282		if (ets_word != 0x0000 && ets_word != 0xFFFF)
2283			adapter->ets = true;
2284		else
2285			adapter->ets = false;
2286		if (igb_sysfs_init(adapter))
2287			dev_err(&pdev->dev,
2288				"failed to allocate sysfs resources\n");
2289	} else {
2290		adapter->ets = false;
2291	}
2292#endif
2293	/* do hw tstamp init after resetting */
2294	igb_ptp_init(adapter);
2295
2296	dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
2297	/* print bus type/speed/width info */
2298	dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
2299		 netdev->name,
2300		 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
2301		  (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
2302		                                            "unknown"),
2303		 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
2304		  (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
2305		  (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
2306		   "unknown"),
2307		 netdev->dev_addr);
2308
2309	ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH);
2310	if (ret_val)
2311		strcpy(part_str, "Unknown");
2312	dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
2313	dev_info(&pdev->dev,
2314		"Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
2315		adapter->msix_entries ? "MSI-X" :
2316		(adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
2317		adapter->num_rx_queues, adapter->num_tx_queues);
2318	switch (hw->mac.type) {
2319	case e1000_i350:
2320	case e1000_i210:
2321	case e1000_i211:
2322		igb_set_eee_i350(hw);
2323		break;
2324	default:
2325		break;
2326	}
2327
2328	pm_runtime_put_noidle(&pdev->dev);
2329	return 0;
2330
2331err_register:
2332	igb_release_hw_control(adapter);
2333	memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap));
2334err_eeprom:
2335	if (!igb_check_reset_block(hw))
2336		igb_reset_phy(hw);
2337
2338	if (hw->flash_address)
2339		iounmap(hw->flash_address);
2340err_sw_init:
2341	igb_clear_interrupt_scheme(adapter);
2342	iounmap(hw->hw_addr);
2343err_ioremap:
2344	free_netdev(netdev);
2345err_alloc_etherdev:
2346	pci_release_selected_regions(pdev,
2347	                             pci_select_bars(pdev, IORESOURCE_MEM));
2348err_pci_reg:
2349err_dma:
2350	pci_disable_device(pdev);
2351	return err;
2352}
2353
2354#ifdef CONFIG_PCI_IOV
2355static int  igb_disable_sriov(struct pci_dev *pdev)
2356{
2357	struct net_device *netdev = pci_get_drvdata(pdev);
2358	struct igb_adapter *adapter = netdev_priv(netdev);
2359	struct e1000_hw *hw = &adapter->hw;
2360
2361	/* reclaim resources allocated to VFs */
2362	if (adapter->vf_data) {
2363		/* disable iov and allow time for transactions to clear */
2364		if (igb_vfs_are_assigned(adapter)) {
2365			dev_warn(&pdev->dev,
2366				 "Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\n");
2367			return -EPERM;
2368		} else {
2369			pci_disable_sriov(pdev);
2370			msleep(500);
2371		}
2372
2373		kfree(adapter->vf_data);
2374		adapter->vf_data = NULL;
2375		adapter->vfs_allocated_count = 0;
2376		wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
2377		wrfl();
2378		msleep(100);
2379		dev_info(&pdev->dev, "IOV Disabled\n");
2380
2381		/* Re-enable DMA Coalescing flag since IOV is turned off */
2382		adapter->flags |= IGB_FLAG_DMAC;
2383	}
2384
2385	return 0;
2386}
2387
2388static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
2389{
2390	struct net_device *netdev = pci_get_drvdata(pdev);
2391	struct igb_adapter *adapter = netdev_priv(netdev);
2392	int old_vfs = pci_num_vf(pdev);
2393	int err = 0;
2394	int i;
2395
2396	if (!num_vfs)
2397		goto out;
2398	else if (old_vfs && old_vfs == num_vfs)
2399		goto out;
2400	else if (old_vfs && old_vfs != num_vfs)
2401		err = igb_disable_sriov(pdev);
2402
2403	if (err)
2404		goto out;
2405
2406	if (num_vfs > 7) {
2407		err = -EPERM;
2408		goto out;
2409	}
2410
2411	adapter->vfs_allocated_count = num_vfs;
2412
2413	adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
2414				sizeof(struct vf_data_storage), GFP_KERNEL);
2415
2416	/* if allocation failed then we do not support SR-IOV */
2417	if (!adapter->vf_data) {
2418		adapter->vfs_allocated_count = 0;
2419		dev_err(&pdev->dev,
2420			"Unable to allocate memory for VF Data Storage\n");
2421		err = -ENOMEM;
2422		goto out;
2423	}
2424
2425	err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
2426	if (err)
2427		goto err_out;
2428
2429	dev_info(&pdev->dev, "%d VFs allocated\n",
2430		 adapter->vfs_allocated_count);
2431	for (i = 0; i < adapter->vfs_allocated_count; i++)
2432		igb_vf_configure(adapter, i);
2433
2434	/* DMA Coalescing is not supported in IOV mode. */
2435	adapter->flags &= ~IGB_FLAG_DMAC;
2436	goto out;
2437
2438err_out:
2439	kfree(adapter->vf_data);
2440	adapter->vf_data = NULL;
2441	adapter->vfs_allocated_count = 0;
2442out:
2443	return err;
2444}
2445
2446#endif
2447/*
2448 *  igb_remove_i2c - Cleanup  I2C interface
2449 *  @adapter: pointer to adapter structure
2450 *
2451 */
2452static void igb_remove_i2c(struct igb_adapter *adapter)
2453{
2454
2455	/* free the adapter bus structure */
2456	i2c_del_adapter(&adapter->i2c_adap);
2457}
2458
2459/**
2460 * igb_remove - Device Removal Routine
2461 * @pdev: PCI device information struct
2462 *
2463 * igb_remove is called by the PCI subsystem to alert the driver
2464 * that it should release a PCI device.  The could be caused by a
2465 * Hot-Plug event, or because the driver is going to be removed from
2466 * memory.
2467 **/
2468static void igb_remove(struct pci_dev *pdev)
2469{
2470	struct net_device *netdev = pci_get_drvdata(pdev);
2471	struct igb_adapter *adapter = netdev_priv(netdev);
2472	struct e1000_hw *hw = &adapter->hw;
2473
2474	pm_runtime_get_noresume(&pdev->dev);
2475#ifdef CONFIG_IGB_HWMON
2476	igb_sysfs_exit(adapter);
2477#endif
2478	igb_remove_i2c(adapter);
2479	igb_ptp_stop(adapter);
2480	/*
2481	 * The watchdog timer may be rescheduled, so explicitly
2482	 * disable watchdog from being rescheduled.
2483	 */
2484	set_bit(__IGB_DOWN, &adapter->state);
2485	del_timer_sync(&adapter->watchdog_timer);
2486	del_timer_sync(&adapter->phy_info_timer);
2487
2488	cancel_work_sync(&adapter->reset_task);
2489	cancel_work_sync(&adapter->watchdog_task);
2490
2491#ifdef CONFIG_IGB_DCA
2492	if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
2493		dev_info(&pdev->dev, "DCA disabled\n");
2494		dca_remove_requester(&pdev->dev);
2495		adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
2496		wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
2497	}
2498#endif
2499
2500	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
2501	 * would have already happened in close and is redundant. */
2502	igb_release_hw_control(adapter);
2503
2504	unregister_netdev(netdev);
2505
2506	igb_clear_interrupt_scheme(adapter);
2507
2508#ifdef CONFIG_PCI_IOV
2509	igb_disable_sriov(pdev);
2510#endif
2511
2512	iounmap(hw->hw_addr);
2513	if (hw->flash_address)
2514		iounmap(hw->flash_address);
2515	pci_release_selected_regions(pdev,
2516	                             pci_select_bars(pdev, IORESOURCE_MEM));
2517
2518	kfree(adapter->shadow_vfta);
2519	free_netdev(netdev);
2520
2521	pci_disable_pcie_error_reporting(pdev);
2522
2523	pci_disable_device(pdev);
2524}
2525
2526/**
2527 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
2528 * @adapter: board private structure to initialize
2529 *
2530 * This function initializes the vf specific data storage and then attempts to
2531 * allocate the VFs.  The reason for ordering it this way is because it is much
2532 * mor expensive time wise to disable SR-IOV than it is to allocate and free
2533 * the memory for the VFs.
2534 **/
2535static void igb_probe_vfs(struct igb_adapter *adapter)
2536{
2537#ifdef CONFIG_PCI_IOV
2538	struct pci_dev *pdev = adapter->pdev;
2539	struct e1000_hw *hw = &adapter->hw;
2540
2541	/* Virtualization features not supported on i210 family. */
2542	if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
2543		return;
2544
2545	pci_sriov_set_totalvfs(pdev, 7);
2546	igb_enable_sriov(pdev, max_vfs);
2547
2548#endif /* CONFIG_PCI_IOV */
2549}
2550
2551static void igb_init_queue_configuration(struct igb_adapter *adapter)
2552{
2553	struct e1000_hw *hw = &adapter->hw;
2554	u32 max_rss_queues;
2555
2556	/* Determine the maximum number of RSS queues supported. */
2557	switch (hw->mac.type) {
2558	case e1000_i211:
2559		max_rss_queues = IGB_MAX_RX_QUEUES_I211;
2560		break;
2561	case e1000_82575:
2562	case e1000_i210:
2563		max_rss_queues = IGB_MAX_RX_QUEUES_82575;
2564		break;
2565	case e1000_i350:
2566		/* I350 cannot do RSS and SR-IOV at the same time */
2567		if (!!adapter->vfs_allocated_count) {
2568			max_rss_queues = 1;
2569			break;
2570		}
2571		/* fall through */
2572	case e1000_82576:
2573		if (!!adapter->vfs_allocated_count) {
2574			max_rss_queues = 2;
2575			break;
2576		}
2577		/* fall through */
2578	case e1000_82580:
2579	default:
2580		max_rss_queues = IGB_MAX_RX_QUEUES;
2581		break;
2582	}
2583
2584	adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
2585
2586	/* Determine if we need to pair queues. */
2587	switch (hw->mac.type) {
2588	case e1000_82575:
2589	case e1000_i211:
2590		/* Device supports enough interrupts without queue pairing. */
2591		break;
2592	case e1000_82576:
2593		/*
2594		 * If VFs are going to be allocated with RSS queues then we
2595		 * should pair the queues in order to conserve interrupts due
2596		 * to limited supply.
2597		 */
2598		if ((adapter->rss_queues > 1) &&
2599		    (adapter->vfs_allocated_count > 6))
2600			adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
2601		/* fall through */
2602	case e1000_82580:
2603	case e1000_i350:
2604	case e1000_i210:
2605	default:
2606		/*
2607		 * If rss_queues > half of max_rss_queues, pair the queues in
2608		 * order to conserve interrupts due to limited supply.
2609		 */
2610		if (adapter->rss_queues > (max_rss_queues / 2))
2611			adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
2612		break;
2613	}
2614}
2615
2616/**
2617 * igb_sw_init - Initialize general software structures (struct igb_adapter)
2618 * @adapter: board private structure to initialize
2619 *
2620 * igb_sw_init initializes the Adapter private data structure.
2621 * Fields are initialized based on PCI device information and
2622 * OS network device settings (MTU size).
2623 **/
2624static int igb_sw_init(struct igb_adapter *adapter)
2625{
2626	struct e1000_hw *hw = &adapter->hw;
2627	struct net_device *netdev = adapter->netdev;
2628	struct pci_dev *pdev = adapter->pdev;
2629
2630	pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
2631
2632	/* set default ring sizes */
2633	adapter->tx_ring_count = IGB_DEFAULT_TXD;
2634	adapter->rx_ring_count = IGB_DEFAULT_RXD;
2635
2636	/* set default ITR values */
2637	adapter->rx_itr_setting = IGB_DEFAULT_ITR;
2638	adapter->tx_itr_setting = IGB_DEFAULT_ITR;
2639
2640	/* set default work limits */
2641	adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
2642
2643	adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
2644				  VLAN_HLEN;
2645	adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2646
2647	spin_lock_init(&adapter->stats64_lock);
2648#ifdef CONFIG_PCI_IOV
2649	switch (hw->mac.type) {
2650	case e1000_82576:
2651	case e1000_i350:
2652		if (max_vfs > 7) {
2653			dev_warn(&pdev->dev,
2654				 "Maximum of 7 VFs per PF, using max\n");
2655			max_vfs = adapter->vfs_allocated_count = 7;
2656		} else
2657			adapter->vfs_allocated_count = max_vfs;
2658		if (adapter->vfs_allocated_count)
2659			dev_warn(&pdev->dev,
2660				 "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n");
2661		break;
2662	default:
2663		break;
2664	}
2665#endif /* CONFIG_PCI_IOV */
2666
2667	igb_init_queue_configuration(adapter);
2668
2669	/* Setup and initialize a copy of the hw vlan table array */
2670	adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
2671				       GFP_ATOMIC);
2672
2673	/* This call may decrease the number of queues */
2674	if (igb_init_interrupt_scheme(adapter, true)) {
2675		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
2676		return -ENOMEM;
2677	}
2678
2679	igb_probe_vfs(adapter);
2680
2681	/* Explicitly disable IRQ since the NIC can be in any state. */
2682	igb_irq_disable(adapter);
2683
2684	if (hw->mac.type >= e1000_i350)
2685		adapter->flags &= ~IGB_FLAG_DMAC;
2686
2687	set_bit(__IGB_DOWN, &adapter->state);
2688	return 0;
2689}
2690
2691/**
2692 * igb_open - Called when a network interface is made active
2693 * @netdev: network interface device structure
2694 *
2695 * Returns 0 on success, negative value on failure
2696 *
2697 * The open entry point is called when a network interface is made
2698 * active by the system (IFF_UP).  At this point all resources needed
2699 * for transmit and receive operations are allocated, the interrupt
2700 * handler is registered with the OS, the watchdog timer is started,
2701 * and the stack is notified that the interface is ready.
2702 **/
2703static int __igb_open(struct net_device *netdev, bool resuming)
2704{
2705	struct igb_adapter *adapter = netdev_priv(netdev);
2706	struct e1000_hw *hw = &adapter->hw;
2707	struct pci_dev *pdev = adapter->pdev;
2708	int err;
2709	int i;
2710
2711	/* disallow open during test */
2712	if (test_bit(__IGB_TESTING, &adapter->state)) {
2713		WARN_ON(resuming);
2714		return -EBUSY;
2715	}
2716
2717	if (!resuming)
2718		pm_runtime_get_sync(&pdev->dev);
2719
2720	netif_carrier_off(netdev);
2721
2722	/* allocate transmit descriptors */
2723	err = igb_setup_all_tx_resources(adapter);
2724	if (err)
2725		goto err_setup_tx;
2726
2727	/* allocate receive descriptors */
2728	err = igb_setup_all_rx_resources(adapter);
2729	if (err)
2730		goto err_setup_rx;
2731
2732	igb_power_up_link(adapter);
2733
2734	/* before we allocate an interrupt, we must be ready to handle it.
2735	 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2736	 * as soon as we call pci_request_irq, so we have to setup our
2737	 * clean_rx handler before we do so.  */
2738	igb_configure(adapter);
2739
2740	err = igb_request_irq(adapter);
2741	if (err)
2742		goto err_req_irq;
2743
2744	/* Notify the stack of the actual queue counts. */
2745	err = netif_set_real_num_tx_queues(adapter->netdev,
2746					   adapter->num_tx_queues);
2747	if (err)
2748		goto err_set_queues;
2749
2750	err = netif_set_real_num_rx_queues(adapter->netdev,
2751					   adapter->num_rx_queues);
2752	if (err)
2753		goto err_set_queues;
2754
2755	/* From here on the code is the same as igb_up() */
2756	clear_bit(__IGB_DOWN, &adapter->state);
2757
2758	for (i = 0; i < adapter->num_q_vectors; i++)
2759		napi_enable(&(adapter->q_vector[i]->napi));
2760
2761	/* Clear any pending interrupts. */
2762	rd32(E1000_ICR);
2763
2764	igb_irq_enable(adapter);
2765
2766	/* notify VFs that reset has been completed */
2767	if (adapter->vfs_allocated_count) {
2768		u32 reg_data = rd32(E1000_CTRL_EXT);
2769		reg_data |= E1000_CTRL_EXT_PFRSTD;
2770		wr32(E1000_CTRL_EXT, reg_data);
2771	}
2772
2773	netif_tx_start_all_queues(netdev);
2774
2775	if (!resuming)
2776		pm_runtime_put(&pdev->dev);
2777
2778	/* start the watchdog. */
2779	hw->mac.get_link_status = 1;
2780	schedule_work(&adapter->watchdog_task);
2781
2782	return 0;
2783
2784err_set_queues:
2785	igb_free_irq(adapter);
2786err_req_irq:
2787	igb_release_hw_control(adapter);
2788	igb_power_down_link(adapter);
2789	igb_free_all_rx_resources(adapter);
2790err_setup_rx:
2791	igb_free_all_tx_resources(adapter);
2792err_setup_tx:
2793	igb_reset(adapter);
2794	if (!resuming)
2795		pm_runtime_put(&pdev->dev);
2796
2797	return err;
2798}
2799
2800static int igb_open(struct net_device *netdev)
2801{
2802	return __igb_open(netdev, false);
2803}
2804
2805/**
2806 * igb_close - Disables a network interface
2807 * @netdev: network interface device structure
2808 *
2809 * Returns 0, this is not allowed to fail
2810 *
2811 * The close entry point is called when an interface is de-activated
2812 * by the OS.  The hardware is still under the driver's control, but
2813 * needs to be disabled.  A global MAC reset is issued to stop the
2814 * hardware, and all transmit and receive resources are freed.
2815 **/
2816static int __igb_close(struct net_device *netdev, bool suspending)
2817{
2818	struct igb_adapter *adapter = netdev_priv(netdev);
2819	struct pci_dev *pdev = adapter->pdev;
2820
2821	WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
2822
2823	if (!suspending)
2824		pm_runtime_get_sync(&pdev->dev);
2825
2826	igb_down(adapter);
2827	igb_free_irq(adapter);
2828
2829	igb_free_all_tx_resources(adapter);
2830	igb_free_all_rx_resources(adapter);
2831
2832	if (!suspending)
2833		pm_runtime_put_sync(&pdev->dev);
2834	return 0;
2835}
2836
2837static int igb_close(struct net_device *netdev)
2838{
2839	return __igb_close(netdev, false);
2840}
2841
2842/**
2843 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
2844 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2845 *
2846 * Return 0 on success, negative on failure
2847 **/
2848int igb_setup_tx_resources(struct igb_ring *tx_ring)
2849{
2850	struct device *dev = tx_ring->dev;
2851	int size;
2852
2853	size = sizeof(struct igb_tx_buffer) * tx_ring->count;
2854
2855	tx_ring->tx_buffer_info = vzalloc(size);
2856	if (!tx_ring->tx_buffer_info)
2857		goto err;
2858
2859	/* round up to nearest 4K */
2860	tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
2861	tx_ring->size = ALIGN(tx_ring->size, 4096);
2862
2863	tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
2864					   &tx_ring->dma, GFP_KERNEL);
2865	if (!tx_ring->desc)
2866		goto err;
2867
2868	tx_ring->next_to_use = 0;
2869	tx_ring->next_to_clean = 0;
2870
2871	return 0;
2872
2873err:
2874	vfree(tx_ring->tx_buffer_info);
2875	tx_ring->tx_buffer_info = NULL;
2876	dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
2877	return -ENOMEM;
2878}
2879
2880/**
2881 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
2882 *				  (Descriptors) for all queues
2883 * @adapter: board private structure
2884 *
2885 * Return 0 on success, negative on failure
2886 **/
2887static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2888{
2889	struct pci_dev *pdev = adapter->pdev;
2890	int i, err = 0;
2891
2892	for (i = 0; i < adapter->num_tx_queues; i++) {
2893		err = igb_setup_tx_resources(adapter->tx_ring[i]);
2894		if (err) {
2895			dev_err(&pdev->dev,
2896				"Allocation for Tx Queue %u failed\n", i);
2897			for (i--; i >= 0; i--)
2898				igb_free_tx_resources(adapter->tx_ring[i]);
2899			break;
2900		}
2901	}
2902
2903	return err;
2904}
2905
2906/**
2907 * igb_setup_tctl - configure the transmit control registers
2908 * @adapter: Board private structure
2909 **/
2910void igb_setup_tctl(struct igb_adapter *adapter)
2911{
2912	struct e1000_hw *hw = &adapter->hw;
2913	u32 tctl;
2914
2915	/* disable queue 0 which is enabled by default on 82575 and 82576 */
2916	wr32(E1000_TXDCTL(0), 0);
2917
2918	/* Program the Transmit Control Register */
2919	tctl = rd32(E1000_TCTL);
2920	tctl &= ~E1000_TCTL_CT;
2921	tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2922		(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2923
2924	igb_config_collision_dist(hw);
2925
2926	/* Enable transmits */
2927	tctl |= E1000_TCTL_EN;
2928
2929	wr32(E1000_TCTL, tctl);
2930}
2931
2932/**
2933 * igb_configure_tx_ring - Configure transmit ring after Reset
2934 * @adapter: board private structure
2935 * @ring: tx ring to configure
2936 *
2937 * Configure a transmit ring after a reset.
2938 **/
2939void igb_configure_tx_ring(struct igb_adapter *adapter,
2940                           struct igb_ring *ring)
2941{
2942	struct e1000_hw *hw = &adapter->hw;
2943	u32 txdctl = 0;
2944	u64 tdba = ring->dma;
2945	int reg_idx = ring->reg_idx;
2946
2947	/* disable the queue */
2948	wr32(E1000_TXDCTL(reg_idx), 0);
2949	wrfl();
2950	mdelay(10);
2951
2952	wr32(E1000_TDLEN(reg_idx),
2953	                ring->count * sizeof(union e1000_adv_tx_desc));
2954	wr32(E1000_TDBAL(reg_idx),
2955	                tdba & 0x00000000ffffffffULL);
2956	wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2957
2958	ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
2959	wr32(E1000_TDH(reg_idx), 0);
2960	writel(0, ring->tail);
2961
2962	txdctl |= IGB_TX_PTHRESH;
2963	txdctl |= IGB_TX_HTHRESH << 8;
2964	txdctl |= IGB_TX_WTHRESH << 16;
2965
2966	txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2967	wr32(E1000_TXDCTL(reg_idx), txdctl);
2968}
2969
2970/**
2971 * igb_configure_tx - Configure transmit Unit after Reset
2972 * @adapter: board private structure
2973 *
2974 * Configure the Tx unit of the MAC after a reset.
2975 **/
2976static void igb_configure_tx(struct igb_adapter *adapter)
2977{
2978	int i;
2979
2980	for (i = 0; i < adapter->num_tx_queues; i++)
2981		igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
2982}
2983
2984/**
2985 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
2986 * @rx_ring:    rx descriptor ring (for a specific queue) to setup
2987 *
2988 * Returns 0 on success, negative on failure
2989 **/
2990int igb_setup_rx_resources(struct igb_ring *rx_ring)
2991{
2992	struct device *dev = rx_ring->dev;
2993	int size;
2994
2995	size = sizeof(struct igb_rx_buffer) * rx_ring->count;
2996
2997	rx_ring->rx_buffer_info = vzalloc(size);
2998	if (!rx_ring->rx_buffer_info)
2999		goto err;
3000
3001	/* Round up to nearest 4K */
3002	rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
3003	rx_ring->size = ALIGN(rx_ring->size, 4096);
3004
3005	rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
3006					   &rx_ring->dma, GFP_KERNEL);
3007	if (!rx_ring->desc)
3008		goto err;
3009
3010	rx_ring->next_to_alloc = 0;
3011	rx_ring->next_to_clean = 0;
3012	rx_ring->next_to_use = 0;
3013
3014	return 0;
3015
3016err:
3017	vfree(rx_ring->rx_buffer_info);
3018	rx_ring->rx_buffer_info = NULL;
3019	dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
3020	return -ENOMEM;
3021}
3022
3023/**
3024 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
3025 *				  (Descriptors) for all queues
3026 * @adapter: board private structure
3027 *
3028 * Return 0 on success, negative on failure
3029 **/
3030static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
3031{
3032	struct pci_dev *pdev = adapter->pdev;
3033	int i, err = 0;
3034
3035	for (i = 0; i < adapter->num_rx_queues; i++) {
3036		err = igb_setup_rx_resources(adapter->rx_ring[i]);
3037		if (err) {
3038			dev_err(&pdev->dev,
3039				"Allocation for Rx Queue %u failed\n", i);
3040			for (i--; i >= 0; i--)
3041				igb_free_rx_resources(adapter->rx_ring[i]);
3042			break;
3043		}
3044	}
3045
3046	return err;
3047}
3048
3049/**
3050 * igb_setup_mrqc - configure the multiple receive queue control registers
3051 * @adapter: Board private structure
3052 **/
3053static void igb_setup_mrqc(struct igb_adapter *adapter)
3054{
3055	struct e1000_hw *hw = &adapter->hw;
3056	u32 mrqc, rxcsum;
3057	u32 j, num_rx_queues, shift = 0;
3058	static const u32 rsskey[10] = { 0xDA565A6D, 0xC20E5B25, 0x3D256741,
3059					0xB08FA343, 0xCB2BCAD0, 0xB4307BAE,
3060					0xA32DCB77, 0x0CF23080, 0x3BB7426A,
3061					0xFA01ACBE };
3062
3063	/* Fill out hash function seeds */
3064	for (j = 0; j < 10; j++)
3065		wr32(E1000_RSSRK(j), rsskey[j]);
3066
3067	num_rx_queues = adapter->rss_queues;
3068
3069	switch (hw->mac.type) {
3070	case e1000_82575:
3071		shift = 6;
3072		break;
3073	case e1000_82576:
3074		/* 82576 supports 2 RSS queues for SR-IOV */
3075		if (adapter->vfs_allocated_count) {
3076			shift = 3;
3077			num_rx_queues = 2;
3078		}
3079		break;
3080	default:
3081		break;
3082	}
3083
3084	/*
3085	 * Populate the indirection table 4 entries at a time.  To do this
3086	 * we are generating the results for n and n+2 and then interleaving
3087	 * those with the results with n+1 and n+3.
3088	 */
3089	for (j = 0; j < 32; j++) {
3090		/* first pass generates n and n+2 */
3091		u32 base = ((j * 0x00040004) + 0x00020000) * num_rx_queues;
3092		u32 reta = (base & 0x07800780) >> (7 - shift);
3093
3094		/* second pass generates n+1 and n+3 */
3095		base += 0x00010001 * num_rx_queues;
3096		reta |= (base & 0x07800780) << (1 + shift);
3097
3098		wr32(E1000_RETA(j), reta);
3099	}
3100
3101	/*
3102	 * Disable raw packet checksumming so that RSS hash is placed in
3103	 * descriptor on writeback.  No need to enable TCP/UDP/IP checksum
3104	 * offloads as they are enabled by default
3105	 */
3106	rxcsum = rd32(E1000_RXCSUM);
3107	rxcsum |= E1000_RXCSUM_PCSD;
3108
3109	if (adapter->hw.mac.type >= e1000_82576)
3110		/* Enable Receive Checksum Offload for SCTP */
3111		rxcsum |= E1000_RXCSUM_CRCOFL;
3112
3113	/* Don't need to set TUOFL or IPOFL, they default to 1 */
3114	wr32(E1000_RXCSUM, rxcsum);
3115
3116	/* Generate RSS hash based on packet types, TCP/UDP
3117	 * port numbers and/or IPv4/v6 src and dst addresses
3118	 */
3119	mrqc = E1000_MRQC_RSS_FIELD_IPV4 |
3120	       E1000_MRQC_RSS_FIELD_IPV4_TCP |
3121	       E1000_MRQC_RSS_FIELD_IPV6 |
3122	       E1000_MRQC_RSS_FIELD_IPV6_TCP |
3123	       E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
3124
3125	if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
3126		mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
3127	if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
3128		mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
3129
3130	/* If VMDq is enabled then we set the appropriate mode for that, else
3131	 * we default to RSS so that an RSS hash is calculated per packet even
3132	 * if we are only using one queue */
3133	if (adapter->vfs_allocated_count) {
3134		if (hw->mac.type > e1000_82575) {
3135			/* Set the default pool for the PF's first queue */
3136			u32 vtctl = rd32(E1000_VT_CTL);
3137			vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
3138				   E1000_VT_CTL_DISABLE_DEF_POOL);
3139			vtctl |= adapter->vfs_allocated_count <<
3140				E1000_VT_CTL_DEFAULT_POOL_SHIFT;
3141			wr32(E1000_VT_CTL, vtctl);
3142		}
3143		if (adapter->rss_queues > 1)
3144			mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
3145		else
3146			mrqc |= E1000_MRQC_ENABLE_VMDQ;
3147	} else {
3148		if (hw->mac.type != e1000_i211)
3149			mrqc |= E1000_MRQC_ENABLE_RSS_4Q;
3150	}
3151	igb_vmm_control(adapter);
3152
3153	wr32(E1000_MRQC, mrqc);
3154}
3155
3156/**
3157 * igb_setup_rctl - configure the receive control registers
3158 * @adapter: Board private structure
3159 **/
3160void igb_setup_rctl(struct igb_adapter *adapter)
3161{
3162	struct e1000_hw *hw = &adapter->hw;
3163	u32 rctl;
3164
3165	rctl = rd32(E1000_RCTL);
3166
3167	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
3168	rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
3169
3170	rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
3171		(hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
3172
3173	/*
3174	 * enable stripping of CRC. It's unlikely this will break BMC
3175	 * redirection as it did with e1000. Newer features require
3176	 * that the HW strips the CRC.
3177	 */
3178	rctl |= E1000_RCTL_SECRC;
3179
3180	/* disable store bad packets and clear size bits. */
3181	rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
3182
3183	/* enable LPE to prevent packets larger than max_frame_size */
3184	rctl |= E1000_RCTL_LPE;
3185
3186	/* disable queue 0 to prevent tail write w/o re-config */
3187	wr32(E1000_RXDCTL(0), 0);
3188
3189	/* Attention!!!  For SR-IOV PF driver operations you must enable
3190	 * queue drop for all VF and PF queues to prevent head of line blocking
3191	 * if an un-trusted VF does not provide descriptors to hardware.
3192	 */
3193	if (adapter->vfs_allocated_count) {
3194		/* set all queue drop enable bits */
3195		wr32(E1000_QDE, ALL_QUEUES);
3196	}
3197
3198	/* This is useful for sniffing bad packets. */
3199	if (adapter->netdev->features & NETIF_F_RXALL) {
3200		/* UPE and MPE will be handled by normal PROMISC logic
3201		 * in e1000e_set_rx_mode */
3202		rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
3203			 E1000_RCTL_BAM | /* RX All Bcast Pkts */
3204			 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
3205
3206		rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
3207			  E1000_RCTL_DPF | /* Allow filtered pause */
3208			  E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
3209		/* Do not mess with E1000_CTRL_VME, it affects transmit as well,
3210		 * and that breaks VLANs.
3211		 */
3212	}
3213
3214	wr32(E1000_RCTL, rctl);
3215}
3216
3217static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
3218                                   int vfn)
3219{
3220	struct e1000_hw *hw = &adapter->hw;
3221	u32 vmolr;
3222
3223	/* if it isn't the PF check to see if VFs are enabled and
3224	 * increase the size to support vlan tags */
3225	if (vfn < adapter->vfs_allocated_count &&
3226	    adapter->vf_data[vfn].vlans_enabled)
3227		size += VLAN_TAG_SIZE;
3228
3229	vmolr = rd32(E1000_VMOLR(vfn));
3230	vmolr &= ~E1000_VMOLR_RLPML_MASK;
3231	vmolr |= size | E1000_VMOLR_LPE;
3232	wr32(E1000_VMOLR(vfn), vmolr);
3233
3234	return 0;
3235}
3236
3237/**
3238 * igb_rlpml_set - set maximum receive packet size
3239 * @adapter: board private structure
3240 *
3241 * Configure maximum receivable packet size.
3242 **/
3243static void igb_rlpml_set(struct igb_adapter *adapter)
3244{
3245	u32 max_frame_size = adapter->max_frame_size;
3246	struct e1000_hw *hw = &adapter->hw;
3247	u16 pf_id = adapter->vfs_allocated_count;
3248
3249	if (pf_id) {
3250		igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
3251		/*
3252		 * If we're in VMDQ or SR-IOV mode, then set global RLPML
3253		 * to our max jumbo frame size, in case we need to enable
3254		 * jumbo frames on one of the rings later.
3255		 * This will not pass over-length frames into the default
3256		 * queue because it's gated by the VMOLR.RLPML.
3257		 */
3258		max_frame_size = MAX_JUMBO_FRAME_SIZE;
3259	}
3260
3261	wr32(E1000_RLPML, max_frame_size);
3262}
3263
3264static inline void igb_set_vmolr(struct igb_adapter *adapter,
3265				 int vfn, bool aupe)
3266{
3267	struct e1000_hw *hw = &adapter->hw;
3268	u32 vmolr;
3269
3270	/*
3271	 * This register exists only on 82576 and newer so if we are older then
3272	 * we should exit and do nothing
3273	 */
3274	if (hw->mac.type < e1000_82576)
3275		return;
3276
3277	vmolr = rd32(E1000_VMOLR(vfn));
3278	vmolr |= E1000_VMOLR_STRVLAN;      /* Strip vlan tags */
3279	if (aupe)
3280		vmolr |= E1000_VMOLR_AUPE;        /* Accept untagged packets */
3281	else
3282		vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
3283
3284	/* clear all bits that might not be set */
3285	vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
3286
3287	if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
3288		vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
3289	/*
3290	 * for VMDq only allow the VFs and pool 0 to accept broadcast and
3291	 * multicast packets
3292	 */
3293	if (vfn <= adapter->vfs_allocated_count)
3294		vmolr |= E1000_VMOLR_BAM;	   /* Accept broadcast */
3295
3296	wr32(E1000_VMOLR(vfn), vmolr);
3297}
3298
3299/**
3300 * igb_configure_rx_ring - Configure a receive ring after Reset
3301 * @adapter: board private structure
3302 * @ring: receive ring to be configured
3303 *
3304 * Configure the Rx unit of the MAC after a reset.
3305 **/
3306void igb_configure_rx_ring(struct igb_adapter *adapter,
3307                           struct igb_ring *ring)
3308{
3309	struct e1000_hw *hw = &adapter->hw;
3310	u64 rdba = ring->dma;
3311	int reg_idx = ring->reg_idx;
3312	u32 srrctl = 0, rxdctl = 0;
3313
3314	/* disable the queue */
3315	wr32(E1000_RXDCTL(reg_idx), 0);
3316
3317	/* Set DMA base address registers */
3318	wr32(E1000_RDBAL(reg_idx),
3319	     rdba & 0x00000000ffffffffULL);
3320	wr32(E1000_RDBAH(reg_idx), rdba >> 32);
3321	wr32(E1000_RDLEN(reg_idx),
3322	               ring->count * sizeof(union e1000_adv_rx_desc));
3323
3324	/* initialize head and tail */
3325	ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
3326	wr32(E1000_RDH(reg_idx), 0);
3327	writel(0, ring->tail);
3328
3329	/* set descriptor configuration */
3330	srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
3331	srrctl |= IGB_RX_BUFSZ >> E1000_SRRCTL_BSIZEPKT_SHIFT;
3332	srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
3333	if (hw->mac.type >= e1000_82580)
3334		srrctl |= E1000_SRRCTL_TIMESTAMP;
3335	/* Only set Drop Enable if we are supporting multiple queues */
3336	if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
3337		srrctl |= E1000_SRRCTL_DROP_EN;
3338
3339	wr32(E1000_SRRCTL(reg_idx), srrctl);
3340
3341	/* set filtering for VMDQ pools */
3342	igb_set_vmolr(adapter, reg_idx & 0x7, true);
3343
3344	rxdctl |= IGB_RX_PTHRESH;
3345	rxdctl |= IGB_RX_HTHRESH << 8;
3346	rxdctl |= IGB_RX_WTHRESH << 16;
3347
3348	/* enable receive descriptor fetching */
3349	rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
3350	wr32(E1000_RXDCTL(reg_idx), rxdctl);
3351}
3352
3353/**
3354 * igb_configure_rx - Configure receive Unit after Reset
3355 * @adapter: board private structure
3356 *
3357 * Configure the Rx unit of the MAC after a reset.
3358 **/
3359static void igb_configure_rx(struct igb_adapter *adapter)
3360{
3361	int i;
3362
3363	/* set UTA to appropriate mode */
3364	igb_set_uta(adapter);
3365
3366	/* set the correct pool for the PF default MAC address in entry 0 */
3367	igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
3368	                 adapter->vfs_allocated_count);
3369
3370	/* Setup the HW Rx Head and Tail Descriptor Pointers and
3371	 * the Base and Length of the Rx Descriptor Ring */
3372	for (i = 0; i < adapter->num_rx_queues; i++)
3373		igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
3374}
3375
3376/**
3377 * igb_free_tx_resources - Free Tx Resources per Queue
3378 * @tx_ring: Tx descriptor ring for a specific queue
3379 *
3380 * Free all transmit software resources
3381 **/
3382void igb_free_tx_resources(struct igb_ring *tx_ring)
3383{
3384	igb_clean_tx_ring(tx_ring);
3385
3386	vfree(tx_ring->tx_buffer_info);
3387	tx_ring->tx_buffer_info = NULL;
3388
3389	/* if not set, then don't free */
3390	if (!tx_ring->desc)
3391		return;
3392
3393	dma_free_coherent(tx_ring->dev, tx_ring->size,
3394			  tx_ring->desc, tx_ring->dma);
3395
3396	tx_ring->desc = NULL;
3397}
3398
3399/**
3400 * igb_free_all_tx_resources - Free Tx Resources for All Queues
3401 * @adapter: board private structure
3402 *
3403 * Free all transmit software resources
3404 **/
3405static void igb_free_all_tx_resources(struct igb_adapter *adapter)
3406{
3407	int i;
3408
3409	for (i = 0; i < adapter->num_tx_queues; i++)
3410		igb_free_tx_resources(adapter->tx_ring[i]);
3411}
3412
3413void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
3414				    struct igb_tx_buffer *tx_buffer)
3415{
3416	if (tx_buffer->skb) {
3417		dev_kfree_skb_any(tx_buffer->skb);
3418		if (dma_unmap_len(tx_buffer, len))
3419			dma_unmap_single(ring->dev,
3420					 dma_unmap_addr(tx_buffer, dma),
3421					 dma_unmap_len(tx_buffer, len),
3422					 DMA_TO_DEVICE);
3423	} else if (dma_unmap_len(tx_buffer, len)) {
3424		dma_unmap_page(ring->dev,
3425			       dma_unmap_addr(tx_buffer, dma),
3426			       dma_unmap_len(tx_buffer, len),
3427			       DMA_TO_DEVICE);
3428	}
3429	tx_buffer->next_to_watch = NULL;
3430	tx_buffer->skb = NULL;
3431	dma_unmap_len_set(tx_buffer, len, 0);
3432	/* buffer_info must be completely set up in the transmit path */
3433}
3434
3435/**
3436 * igb_clean_tx_ring - Free Tx Buffers
3437 * @tx_ring: ring to be cleaned
3438 **/
3439static void igb_clean_tx_ring(struct igb_ring *tx_ring)
3440{
3441	struct igb_tx_buffer *buffer_info;
3442	unsigned long size;
3443	u16 i;
3444
3445	if (!tx_ring->tx_buffer_info)
3446		return;
3447	/* Free all the Tx ring sk_buffs */
3448
3449	for (i = 0; i < tx_ring->count; i++) {
3450		buffer_info = &tx_ring->tx_buffer_info[i];
3451		igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
3452	}
3453
3454	netdev_tx_reset_queue(txring_txq(tx_ring));
3455
3456	size = sizeof(struct igb_tx_buffer) * tx_ring->count;
3457	memset(tx_ring->tx_buffer_info, 0, size);
3458
3459	/* Zero out the descriptor ring */
3460	memset(tx_ring->desc, 0, tx_ring->size);
3461
3462	tx_ring->next_to_use = 0;
3463	tx_ring->next_to_clean = 0;
3464}
3465
3466/**
3467 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
3468 * @adapter: board private structure
3469 **/
3470static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
3471{
3472	int i;
3473
3474	for (i = 0; i < adapter->num_tx_queues; i++)
3475		igb_clean_tx_ring(adapter->tx_ring[i]);
3476}
3477
3478/**
3479 * igb_free_rx_resources - Free Rx Resources
3480 * @rx_ring: ring to clean the resources from
3481 *
3482 * Free all receive software resources
3483 **/
3484void igb_free_rx_resources(struct igb_ring *rx_ring)
3485{
3486	igb_clean_rx_ring(rx_ring);
3487
3488	vfree(rx_ring->rx_buffer_info);
3489	rx_ring->rx_buffer_info = NULL;
3490
3491	/* if not set, then don't free */
3492	if (!rx_ring->desc)
3493		return;
3494
3495	dma_free_coherent(rx_ring->dev, rx_ring->size,
3496			  rx_ring->desc, rx_ring->dma);
3497
3498	rx_ring->desc = NULL;
3499}
3500
3501/**
3502 * igb_free_all_rx_resources - Free Rx Resources for All Queues
3503 * @adapter: board private structure
3504 *
3505 * Free all receive software resources
3506 **/
3507static void igb_free_all_rx_resources(struct igb_adapter *adapter)
3508{
3509	int i;
3510
3511	for (i = 0; i < adapter->num_rx_queues; i++)
3512		igb_free_rx_resources(adapter->rx_ring[i]);
3513}
3514
3515/**
3516 * igb_clean_rx_ring - Free Rx Buffers per Queue
3517 * @rx_ring: ring to free buffers from
3518 **/
3519static void igb_clean_rx_ring(struct igb_ring *rx_ring)
3520{
3521	unsigned long size;
3522	u16 i;
3523
3524	if (rx_ring->skb)
3525		dev_kfree_skb(rx_ring->skb);
3526	rx_ring->skb = NULL;
3527
3528	if (!rx_ring->rx_buffer_info)
3529		return;
3530
3531	/* Free all the Rx ring sk_buffs */
3532	for (i = 0; i < rx_ring->count; i++) {
3533		struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
3534
3535		if (!buffer_info->page)
3536			continue;
3537
3538		dma_unmap_page(rx_ring->dev,
3539			       buffer_info->dma,
3540			       PAGE_SIZE,
3541			       DMA_FROM_DEVICE);
3542		__free_page(buffer_info->page);
3543
3544		buffer_info->page = NULL;
3545	}
3546
3547	size = sizeof(struct igb_rx_buffer) * rx_ring->count;
3548	memset(rx_ring->rx_buffer_info, 0, size);
3549
3550	/* Zero out the descriptor ring */
3551	memset(rx_ring->desc, 0, rx_ring->size);
3552
3553	rx_ring->next_to_alloc = 0;
3554	rx_ring->next_to_clean = 0;
3555	rx_ring->next_to_use = 0;
3556}
3557
3558/**
3559 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
3560 * @adapter: board private structure
3561 **/
3562static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
3563{
3564	int i;
3565
3566	for (i = 0; i < adapter->num_rx_queues; i++)
3567		igb_clean_rx_ring(adapter->rx_ring[i]);
3568}
3569
3570/**
3571 * igb_set_mac - Change the Ethernet Address of the NIC
3572 * @netdev: network interface device structure
3573 * @p: pointer to an address structure
3574 *
3575 * Returns 0 on success, negative on failure
3576 **/
3577static int igb_set_mac(struct net_device *netdev, void *p)
3578{
3579	struct igb_adapter *adapter = netdev_priv(netdev);
3580	struct e1000_hw *hw = &adapter->hw;
3581	struct sockaddr *addr = p;
3582
3583	if (!is_valid_ether_addr(addr->sa_data))
3584		return -EADDRNOTAVAIL;
3585
3586	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3587	memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
3588
3589	/* set the correct pool for the new PF MAC address in entry 0 */
3590	igb_rar_set_qsel(adapter, hw->mac.addr, 0,
3591	                 adapter->vfs_allocated_count);
3592
3593	return 0;
3594}
3595
3596/**
3597 * igb_write_mc_addr_list - write multicast addresses to MTA
3598 * @netdev: network interface device structure
3599 *
3600 * Writes multicast address list to the MTA hash table.
3601 * Returns: -ENOMEM on failure
3602 *                0 on no addresses written
3603 *                X on writing X addresses to MTA
3604 **/
3605static int igb_write_mc_addr_list(struct net_device *netdev)
3606{
3607	struct igb_adapter *adapter = netdev_priv(netdev);
3608	struct e1000_hw *hw = &adapter->hw;
3609	struct netdev_hw_addr *ha;
3610	u8  *mta_list;
3611	int i;
3612
3613	if (netdev_mc_empty(netdev)) {
3614		/* nothing to program, so clear mc list */
3615		igb_update_mc_addr_list(hw, NULL, 0);
3616		igb_restore_vf_multicasts(adapter);
3617		return 0;
3618	}
3619
3620	mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
3621	if (!mta_list)
3622		return -ENOMEM;
3623
3624	/* The shared function expects a packed array of only addresses. */
3625	i = 0;
3626	netdev_for_each_mc_addr(ha, netdev)
3627		memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
3628
3629	igb_update_mc_addr_list(hw, mta_list, i);
3630	kfree(mta_list);
3631
3632	return netdev_mc_count(netdev);
3633}
3634
3635/**
3636 * igb_write_uc_addr_list - write unicast addresses to RAR table
3637 * @netdev: network interface device structure
3638 *
3639 * Writes unicast address list to the RAR table.
3640 * Returns: -ENOMEM on failure/insufficient address space
3641 *                0 on no addresses written
3642 *                X on writing X addresses to the RAR table
3643 **/
3644static int igb_write_uc_addr_list(struct net_device *netdev)
3645{
3646	struct igb_adapter *adapter = netdev_priv(netdev);
3647	struct e1000_hw *hw = &adapter->hw;
3648	unsigned int vfn = adapter->vfs_allocated_count;
3649	unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
3650	int count = 0;
3651
3652	/* return ENOMEM indicating insufficient memory for addresses */
3653	if (netdev_uc_count(netdev) > rar_entries)
3654		return -ENOMEM;
3655
3656	if (!netdev_uc_empty(netdev) && rar_entries) {
3657		struct netdev_hw_addr *ha;
3658
3659		netdev_for_each_uc_addr(ha, netdev) {
3660			if (!rar_entries)
3661				break;
3662			igb_rar_set_qsel(adapter, ha->addr,
3663			                 rar_entries--,
3664			                 vfn);
3665			count++;
3666		}
3667	}
3668	/* write the addresses in reverse order to avoid write combining */
3669	for (; rar_entries > 0 ; rar_entries--) {
3670		wr32(E1000_RAH(rar_entries), 0);
3671		wr32(E1000_RAL(rar_entries), 0);
3672	}
3673	wrfl();
3674
3675	return count;
3676}
3677
3678/**
3679 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
3680 * @netdev: network interface device structure
3681 *
3682 * The set_rx_mode entry point is called whenever the unicast or multicast
3683 * address lists or the network interface flags are updated.  This routine is
3684 * responsible for configuring the hardware for proper unicast, multicast,
3685 * promiscuous mode, and all-multi behavior.
3686 **/
3687static void igb_set_rx_mode(struct net_device *netdev)
3688{
3689	struct igb_adapter *adapter = netdev_priv(netdev);
3690	struct e1000_hw *hw = &adapter->hw;
3691	unsigned int vfn = adapter->vfs_allocated_count;
3692	u32 rctl, vmolr = 0;
3693	int count;
3694
3695	/* Check for Promiscuous and All Multicast modes */
3696	rctl = rd32(E1000_RCTL);
3697
3698	/* clear the effected bits */
3699	rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
3700
3701	if (netdev->flags & IFF_PROMISC) {
3702		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
3703		vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
3704	} else {
3705		if (netdev->flags & IFF_ALLMULTI) {
3706			rctl |= E1000_RCTL_MPE;
3707			vmolr |= E1000_VMOLR_MPME;
3708		} else {
3709			/*
3710			 * Write addresses to the MTA, if the attempt fails
3711			 * then we should just turn on promiscuous mode so
3712			 * that we can at least receive multicast traffic
3713			 */
3714			count = igb_write_mc_addr_list(netdev);
3715			if (count < 0) {
3716				rctl |= E1000_RCTL_MPE;
3717				vmolr |= E1000_VMOLR_MPME;
3718			} else if (count) {
3719				vmolr |= E1000_VMOLR_ROMPE;
3720			}
3721		}
3722		/*
3723		 * Write addresses to available RAR registers, if there is not
3724		 * sufficient space to store all the addresses then enable
3725		 * unicast promiscuous mode
3726		 */
3727		count = igb_write_uc_addr_list(netdev);
3728		if (count < 0) {
3729			rctl |= E1000_RCTL_UPE;
3730			vmolr |= E1000_VMOLR_ROPE;
3731		}
3732		rctl |= E1000_RCTL_VFE;
3733	}
3734	wr32(E1000_RCTL, rctl);
3735
3736	/*
3737	 * In order to support SR-IOV and eventually VMDq it is necessary to set
3738	 * the VMOLR to enable the appropriate modes.  Without this workaround
3739	 * we will have issues with VLAN tag stripping not being done for frames
3740	 * that are only arriving because we are the default pool
3741	 */
3742	if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350))
3743		return;
3744
3745	vmolr |= rd32(E1000_VMOLR(vfn)) &
3746	         ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
3747	wr32(E1000_VMOLR(vfn), vmolr);
3748	igb_restore_vf_multicasts(adapter);
3749}
3750
3751static void igb_check_wvbr(struct igb_adapter *adapter)
3752{
3753	struct e1000_hw *hw = &adapter->hw;
3754	u32 wvbr = 0;
3755
3756	switch (hw->mac.type) {
3757	case e1000_82576:
3758	case e1000_i350:
3759		if (!(wvbr = rd32(E1000_WVBR)))
3760			return;
3761		break;
3762	default:
3763		break;
3764	}
3765
3766	adapter->wvbr |= wvbr;
3767}
3768
3769#define IGB_STAGGERED_QUEUE_OFFSET 8
3770
3771static void igb_spoof_check(struct igb_adapter *adapter)
3772{
3773	int j;
3774
3775	if (!adapter->wvbr)
3776		return;
3777
3778	for(j = 0; j < adapter->vfs_allocated_count; j++) {
3779		if (adapter->wvbr & (1 << j) ||
3780		    adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
3781			dev_warn(&adapter->pdev->dev,
3782				"Spoof event(s) detected on VF %d\n", j);
3783			adapter->wvbr &=
3784				~((1 << j) |
3785				  (1 << (j + IGB_STAGGERED_QUEUE_OFFSET)));
3786		}
3787	}
3788}
3789
3790/* Need to wait a few seconds after link up to get diagnostic information from
3791 * the phy */
3792static void igb_update_phy_info(unsigned long data)
3793{
3794	struct igb_adapter *adapter = (struct igb_adapter *) data;
3795	igb_get_phy_info(&adapter->hw);
3796}
3797
3798/**
3799 * igb_has_link - check shared code for link and determine up/down
3800 * @adapter: pointer to driver private info
3801 **/
3802bool igb_has_link(struct igb_adapter *adapter)
3803{
3804	struct e1000_hw *hw = &adapter->hw;
3805	bool link_active = false;
3806	s32 ret_val = 0;
3807
3808	/* get_link_status is set on LSC (link status) interrupt or
3809	 * rx sequence error interrupt.  get_link_status will stay
3810	 * false until the e1000_check_for_link establishes link
3811	 * for copper adapters ONLY
3812	 */
3813	switch (hw->phy.media_type) {
3814	case e1000_media_type_copper:
3815		if (hw->mac.get_link_status) {
3816			ret_val = hw->mac.ops.check_for_link(hw);
3817			link_active = !hw->mac.get_link_status;
3818		} else {
3819			link_active = true;
3820		}
3821		break;
3822	case e1000_media_type_internal_serdes:
3823		ret_val = hw->mac.ops.check_for_link(hw);
3824		link_active = hw->mac.serdes_has_link;
3825		break;
3826	default:
3827	case e1000_media_type_unknown:
3828		break;
3829	}
3830
3831	return link_active;
3832}
3833
3834static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
3835{
3836	bool ret = false;
3837	u32 ctrl_ext, thstat;
3838
3839	/* check for thermal sensor event on i350 copper only */
3840	if (hw->mac.type == e1000_i350) {
3841		thstat = rd32(E1000_THSTAT);
3842		ctrl_ext = rd32(E1000_CTRL_EXT);
3843
3844		if ((hw->phy.media_type == e1000_media_type_copper) &&
3845		    !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) {
3846			ret = !!(thstat & event);
3847		}
3848	}
3849
3850	return ret;
3851}
3852
3853/**
3854 * igb_watchdog - Timer Call-back
3855 * @data: pointer to adapter cast into an unsigned long
3856 **/
3857static void igb_watchdog(unsigned long data)
3858{
3859	struct igb_adapter *adapter = (struct igb_adapter *)data;
3860	/* Do the rest outside of interrupt context */
3861	schedule_work(&adapter->watchdog_task);
3862}
3863
3864static void igb_watchdog_task(struct work_struct *work)
3865{
3866	struct igb_adapter *adapter = container_of(work,
3867	                                           struct igb_adapter,
3868                                                   watchdog_task);
3869	struct e1000_hw *hw = &adapter->hw;
3870	struct net_device *netdev = adapter->netdev;
3871	u32 link;
3872	int i;
3873
3874	link = igb_has_link(adapter);
3875	if (link) {
3876		/* Cancel scheduled suspend requests. */
3877		pm_runtime_resume(netdev->dev.parent);
3878
3879		if (!netif_carrier_ok(netdev)) {
3880			u32 ctrl;
3881			hw->mac.ops.get_speed_and_duplex(hw,
3882			                                 &adapter->link_speed,
3883			                                 &adapter->link_duplex);
3884
3885			ctrl = rd32(E1000_CTRL);
3886			/* Links status message must follow this format */
3887			printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s "
3888			       "Duplex, Flow Control: %s\n",
3889			       netdev->name,
3890			       adapter->link_speed,
3891			       adapter->link_duplex == FULL_DUPLEX ?
3892			       "Full" : "Half",
3893			       (ctrl & E1000_CTRL_TFCE) &&
3894			       (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
3895			       (ctrl & E1000_CTRL_RFCE) ?  "RX" :
3896			       (ctrl & E1000_CTRL_TFCE) ?  "TX" : "None");
3897
3898			/* check for thermal sensor event */
3899			if (igb_thermal_sensor_event(hw,
3900			    E1000_THSTAT_LINK_THROTTLE)) {
3901				netdev_info(netdev, "The network adapter link "
3902					    "speed was downshifted because it "
3903					    "overheated\n");
3904			}
3905
3906			/* adjust timeout factor according to speed/duplex */
3907			adapter->tx_timeout_factor = 1;
3908			switch (adapter->link_speed) {
3909			case SPEED_10:
3910				adapter->tx_timeout_factor = 14;
3911				break;
3912			case SPEED_100:
3913				/* maybe add some timeout factor ? */
3914				break;
3915			}
3916
3917			netif_carrier_on(netdev);
3918
3919			igb_ping_all_vfs(adapter);
3920			igb_check_vf_rate_limit(adapter);
3921
3922			/* link state has changed, schedule phy info update */
3923			if (!test_bit(__IGB_DOWN, &adapter->state))
3924				mod_timer(&adapter->phy_info_timer,
3925					  round_jiffies(jiffies + 2 * HZ));
3926		}
3927	} else {
3928		if (netif_carrier_ok(netdev)) {
3929			adapter->link_speed = 0;
3930			adapter->link_duplex = 0;
3931
3932			/* check for thermal sensor event */
3933			if (igb_thermal_sensor_event(hw,
3934			    E1000_THSTAT_PWR_DOWN)) {
3935				netdev_err(netdev, "The network adapter was "
3936					   "stopped because it overheated\n");
3937			}
3938
3939			/* Links status message must follow this format */
3940			printk(KERN_INFO "igb: %s NIC Link is Down\n",
3941			       netdev->name);
3942			netif_carrier_off(netdev);
3943
3944			igb_ping_all_vfs(adapter);
3945
3946			/* link state has changed, schedule phy info update */
3947			if (!test_bit(__IGB_DOWN, &adapter->state))
3948				mod_timer(&adapter->phy_info_timer,
3949					  round_jiffies(jiffies + 2 * HZ));
3950
3951			pm_schedule_suspend(netdev->dev.parent,
3952					    MSEC_PER_SEC * 5);
3953		}
3954	}
3955
3956	spin_lock(&adapter->stats64_lock);
3957	igb_update_stats(adapter, &adapter->stats64);
3958	spin_unlock(&adapter->stats64_lock);
3959
3960	for (i = 0; i < adapter->num_tx_queues; i++) {
3961		struct igb_ring *tx_ring = adapter->tx_ring[i];
3962		if (!netif_carrier_ok(netdev)) {
3963			/* We've lost link, so the controller stops DMA,
3964			 * but we've got queued Tx work that's never going
3965			 * to get done, so reset controller to flush Tx.
3966			 * (Do the reset outside of interrupt context). */
3967			if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
3968				adapter->tx_timeout_count++;
3969				schedule_work(&adapter->reset_task);
3970				/* return immediately since reset is imminent */
3971				return;
3972			}
3973		}
3974
3975		/* Force detection of hung controller every watchdog period */
3976		set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
3977	}
3978
3979	/* Cause software interrupt to ensure rx ring is cleaned */
3980	if (adapter->msix_entries) {
3981		u32 eics = 0;
3982		for (i = 0; i < adapter->num_q_vectors; i++)
3983			eics |= adapter->q_vector[i]->eims_value;
3984		wr32(E1000_EICS, eics);
3985	} else {
3986		wr32(E1000_ICS, E1000_ICS_RXDMT0);
3987	}
3988
3989	igb_spoof_check(adapter);
3990	igb_ptp_rx_hang(adapter);
3991
3992	/* Reset the timer */
3993	if (!test_bit(__IGB_DOWN, &adapter->state))
3994		mod_timer(&adapter->watchdog_timer,
3995			  round_jiffies(jiffies + 2 * HZ));
3996}
3997
3998enum latency_range {
3999	lowest_latency = 0,
4000	low_latency = 1,
4001	bulk_latency = 2,
4002	latency_invalid = 255
4003};
4004
4005/**
4006 * igb_update_ring_itr - update the dynamic ITR value based on packet size
4007 *
4008 *      Stores a new ITR value based on strictly on packet size.  This
4009 *      algorithm is less sophisticated than that used in igb_update_itr,
4010 *      due to the difficulty of synchronizing statistics across multiple
4011 *      receive rings.  The divisors and thresholds used by this function
4012 *      were determined based on theoretical maximum wire speed and testing
4013 *      data, in order to minimize response time while increasing bulk
4014 *      throughput.
4015 *      This functionality is controlled by the InterruptThrottleRate module
4016 *      parameter (see igb_param.c)
4017 *      NOTE:  This function is called only when operating in a multiqueue
4018 *             receive environment.
4019 * @q_vector: pointer to q_vector
4020 **/
4021static void igb_update_ring_itr(struct igb_q_vector *q_vector)
4022{
4023	int new_val = q_vector->itr_val;
4024	int avg_wire_size = 0;
4025	struct igb_adapter *adapter = q_vector->adapter;
4026	unsigned int packets;
4027
4028	/* For non-gigabit speeds, just fix the interrupt rate at 4000
4029	 * ints/sec - ITR timer value of 120 ticks.
4030	 */
4031	if (adapter->link_speed != SPEED_1000) {
4032		new_val = IGB_4K_ITR;
4033		goto set_itr_val;
4034	}
4035
4036	packets = q_vector->rx.total_packets;
4037	if (packets)
4038		avg_wire_size = q_vector->rx.total_bytes / packets;
4039
4040	packets = q_vector->tx.total_packets;
4041	if (packets)
4042		avg_wire_size = max_t(u32, avg_wire_size,
4043				      q_vector->tx.total_bytes / packets);
4044
4045	/* if avg_wire_size isn't set no work was done */
4046	if (!avg_wire_size)
4047		goto clear_counts;
4048
4049	/* Add 24 bytes to size to account for CRC, preamble, and gap */
4050	avg_wire_size += 24;
4051
4052	/* Don't starve jumbo frames */
4053	avg_wire_size = min(avg_wire_size, 3000);
4054
4055	/* Give a little boost to mid-size frames */
4056	if ((avg_wire_size > 300) && (avg_wire_size < 1200))
4057		new_val = avg_wire_size / 3;
4058	else
4059		new_val = avg_wire_size / 2;
4060
4061	/* conservative mode (itr 3) eliminates the lowest_latency setting */
4062	if (new_val < IGB_20K_ITR &&
4063	    ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
4064	     (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
4065		new_val = IGB_20K_ITR;
4066
4067set_itr_val:
4068	if (new_val != q_vector->itr_val) {
4069		q_vector->itr_val = new_val;
4070		q_vector->set_itr = 1;
4071	}
4072clear_counts:
4073	q_vector->rx.total_bytes = 0;
4074	q_vector->rx.total_packets = 0;
4075	q_vector->tx.total_bytes = 0;
4076	q_vector->tx.total_packets = 0;
4077}
4078
4079/**
4080 * igb_update_itr - update the dynamic ITR value based on statistics
4081 *      Stores a new ITR value based on packets and byte
4082 *      counts during the last interrupt.  The advantage of per interrupt
4083 *      computation is faster updates and more accurate ITR for the current
4084 *      traffic pattern.  Constants in this function were computed
4085 *      based on theoretical maximum wire speed and thresholds were set based
4086 *      on testing data as well as attempting to minimize response time
4087 *      while increasing bulk throughput.
4088 *      this functionality is controlled by the InterruptThrottleRate module
4089 *      parameter (see igb_param.c)
4090 *      NOTE:  These calculations are only valid when operating in a single-
4091 *             queue environment.
4092 * @q_vector: pointer to q_vector
4093 * @ring_container: ring info to update the itr for
4094 **/
4095static void igb_update_itr(struct igb_q_vector *q_vector,
4096			   struct igb_ring_container *ring_container)
4097{
4098	unsigned int packets = ring_container->total_packets;
4099	unsigned int bytes = ring_container->total_bytes;
4100	u8 itrval = ring_container->itr;
4101
4102	/* no packets, exit with status unchanged */
4103	if (packets == 0)
4104		return;
4105
4106	switch (itrval) {
4107	case lowest_latency:
4108		/* handle TSO and jumbo frames */
4109		if (bytes/packets > 8000)
4110			itrval = bulk_latency;
4111		else if ((packets < 5) && (bytes > 512))
4112			itrval = low_latency;
4113		break;
4114	case low_latency:  /* 50 usec aka 20000 ints/s */
4115		if (bytes > 10000) {
4116			/* this if handles the TSO accounting */
4117			if (bytes/packets > 8000) {
4118				itrval = bulk_latency;
4119			} else if ((packets < 10) || ((bytes/packets) > 1200)) {
4120				itrval = bulk_latency;
4121			} else if ((packets > 35)) {
4122				itrval = lowest_latency;
4123			}
4124		} else if (bytes/packets > 2000) {
4125			itrval = bulk_latency;
4126		} else if (packets <= 2 && bytes < 512) {
4127			itrval = lowest_latency;
4128		}
4129		break;
4130	case bulk_latency: /* 250 usec aka 4000 ints/s */
4131		if (bytes > 25000) {
4132			if (packets > 35)
4133				itrval = low_latency;
4134		} else if (bytes < 1500) {
4135			itrval = low_latency;
4136		}
4137		break;
4138	}
4139
4140	/* clear work counters since we have the values we need */
4141	ring_container->total_bytes = 0;
4142	ring_container->total_packets = 0;
4143
4144	/* write updated itr to ring container */
4145	ring_container->itr = itrval;
4146}
4147
4148static void igb_set_itr(struct igb_q_vector *q_vector)
4149{
4150	struct igb_adapter *adapter = q_vector->adapter;
4151	u32 new_itr = q_vector->itr_val;
4152	u8 current_itr = 0;
4153
4154	/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
4155	if (adapter->link_speed != SPEED_1000) {
4156		current_itr = 0;
4157		new_itr = IGB_4K_ITR;
4158		goto set_itr_now;
4159	}
4160
4161	igb_update_itr(q_vector, &q_vector->tx);
4162	igb_update_itr(q_vector, &q_vector->rx);
4163
4164	current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
4165
4166	/* conservative mode (itr 3) eliminates the lowest_latency setting */
4167	if (current_itr == lowest_latency &&
4168	    ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
4169	     (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
4170		current_itr = low_latency;
4171
4172	switch (current_itr) {
4173	/* counts and packets in update_itr are dependent on these numbers */
4174	case lowest_latency:
4175		new_itr = IGB_70K_ITR; /* 70,000 ints/sec */
4176		break;
4177	case low_latency:
4178		new_itr = IGB_20K_ITR; /* 20,000 ints/sec */
4179		break;
4180	case bulk_latency:
4181		new_itr = IGB_4K_ITR;  /* 4,000 ints/sec */
4182		break;
4183	default:
4184		break;
4185	}
4186
4187set_itr_now:
4188	if (new_itr != q_vector->itr_val) {
4189		/* this attempts to bias the interrupt rate towards Bulk
4190		 * by adding intermediate steps when interrupt rate is
4191		 * increasing */
4192		new_itr = new_itr > q_vector->itr_val ?
4193		             max((new_itr * q_vector->itr_val) /
4194		                 (new_itr + (q_vector->itr_val >> 2)),
4195				 new_itr) :
4196			     new_itr;
4197		/* Don't write the value here; it resets the adapter's
4198		 * internal timer, and causes us to delay far longer than
4199		 * we should between interrupts.  Instead, we write the ITR
4200		 * value at the beginning of the next interrupt so the timing
4201		 * ends up being correct.
4202		 */
4203		q_vector->itr_val = new_itr;
4204		q_vector->set_itr = 1;
4205	}
4206}
4207
4208static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
4209			    u32 type_tucmd, u32 mss_l4len_idx)
4210{
4211	struct e1000_adv_tx_context_desc *context_desc;
4212	u16 i = tx_ring->next_to_use;
4213
4214	context_desc = IGB_TX_CTXTDESC(tx_ring, i);
4215
4216	i++;
4217	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
4218
4219	/* set bits to identify this as an advanced context descriptor */
4220	type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
4221
4222	/* For 82575, context index must be unique per ring. */
4223	if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
4224		mss_l4len_idx |= tx_ring->reg_idx << 4;
4225
4226	context_desc->vlan_macip_lens	= cpu_to_le32(vlan_macip_lens);
4227	context_desc->seqnum_seed	= 0;
4228	context_desc->type_tucmd_mlhl	= cpu_to_le32(type_tucmd);
4229	context_desc->mss_l4len_idx	= cpu_to_le32(mss_l4len_idx);
4230}
4231
4232static int igb_tso(struct igb_ring *tx_ring,
4233		   struct igb_tx_buffer *first,
4234		   u8 *hdr_len)
4235{
4236	struct sk_buff *skb = first->skb;
4237	u32 vlan_macip_lens, type_tucmd;
4238	u32 mss_l4len_idx, l4len;
4239
4240	if (skb->ip_summed != CHECKSUM_PARTIAL)
4241		return 0;
4242
4243	if (!skb_is_gso(skb))
4244		return 0;
4245
4246	if (skb_header_cloned(skb)) {
4247		int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
4248		if (err)
4249			return err;
4250	}
4251
4252	/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
4253	type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
4254
4255	if (first->protocol == __constant_htons(ETH_P_IP)) {
4256		struct iphdr *iph = ip_hdr(skb);
4257		iph->tot_len = 0;
4258		iph->check = 0;
4259		tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4260							 iph->daddr, 0,
4261							 IPPROTO_TCP,
4262							 0);
4263		type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
4264		first->tx_flags |= IGB_TX_FLAGS_TSO |
4265				   IGB_TX_FLAGS_CSUM |
4266				   IGB_TX_FLAGS_IPV4;
4267	} else if (skb_is_gso_v6(skb)) {
4268		ipv6_hdr(skb)->payload_len = 0;
4269		tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4270						       &ipv6_hdr(skb)->daddr,
4271						       0, IPPROTO_TCP, 0);
4272		first->tx_flags |= IGB_TX_FLAGS_TSO |
4273				   IGB_TX_FLAGS_CSUM;
4274	}
4275
4276	/* compute header lengths */
4277	l4len = tcp_hdrlen(skb);
4278	*hdr_len = skb_transport_offset(skb) + l4len;
4279
4280	/* update gso size and bytecount with header size */
4281	first->gso_segs = skb_shinfo(skb)->gso_segs;
4282	first->bytecount += (first->gso_segs - 1) * *hdr_len;
4283
4284	/* MSS L4LEN IDX */
4285	mss_l4len_idx = l4len << E1000_ADVTXD_L4LEN_SHIFT;
4286	mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
4287
4288	/* VLAN MACLEN IPLEN */
4289	vlan_macip_lens = skb_network_header_len(skb);
4290	vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
4291	vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
4292
4293	igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
4294
4295	return 1;
4296}
4297
4298static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
4299{
4300	struct sk_buff *skb = first->skb;
4301	u32 vlan_macip_lens = 0;
4302	u32 mss_l4len_idx = 0;
4303	u32 type_tucmd = 0;
4304
4305	if (skb->ip_summed != CHECKSUM_PARTIAL) {
4306		if (!(first->tx_flags & IGB_TX_FLAGS_VLAN))
4307			return;
4308	} else {
4309		u8 l4_hdr = 0;
4310		switch (first->protocol) {
4311		case __constant_htons(ETH_P_IP):
4312			vlan_macip_lens |= skb_network_header_len(skb);
4313			type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
4314			l4_hdr = ip_hdr(skb)->protocol;
4315			break;
4316		case __constant_htons(ETH_P_IPV6):
4317			vlan_macip_lens |= skb_network_header_len(skb);
4318			l4_hdr = ipv6_hdr(skb)->nexthdr;
4319			break;
4320		default:
4321			if (unlikely(net_ratelimit())) {
4322				dev_warn(tx_ring->dev,
4323				 "partial checksum but proto=%x!\n",
4324				 first->protocol);
4325			}
4326			break;
4327		}
4328
4329		switch (l4_hdr) {
4330		case IPPROTO_TCP:
4331			type_tucmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
4332			mss_l4len_idx = tcp_hdrlen(skb) <<
4333					E1000_ADVTXD_L4LEN_SHIFT;
4334			break;
4335		case IPPROTO_SCTP:
4336			type_tucmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
4337			mss_l4len_idx = sizeof(struct sctphdr) <<
4338					E1000_ADVTXD_L4LEN_SHIFT;
4339			break;
4340		case IPPROTO_UDP:
4341			mss_l4len_idx = sizeof(struct udphdr) <<
4342					E1000_ADVTXD_L4LEN_SHIFT;
4343			break;
4344		default:
4345			if (unlikely(net_ratelimit())) {
4346				dev_warn(tx_ring->dev,
4347				 "partial checksum but l4 proto=%x!\n",
4348				 l4_hdr);
4349			}
4350			break;
4351		}
4352
4353		/* update TX checksum flag */
4354		first->tx_flags |= IGB_TX_FLAGS_CSUM;
4355	}
4356
4357	vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
4358	vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
4359
4360	igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
4361}
4362
4363#define IGB_SET_FLAG(_input, _flag, _result) \
4364	((_flag <= _result) ? \
4365	 ((u32)(_input & _flag) * (_result / _flag)) : \
4366	 ((u32)(_input & _flag) / (_flag / _result)))
4367
4368static u32 igb_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
4369{
4370	/* set type for advanced descriptor with frame checksum insertion */
4371	u32 cmd_type = E1000_ADVTXD_DTYP_DATA |
4372		       E1000_ADVTXD_DCMD_DEXT |
4373		       E1000_ADVTXD_DCMD_IFCS;
4374
4375	/* set HW vlan bit if vlan is present */
4376	cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_VLAN,
4377				 (E1000_ADVTXD_DCMD_VLE));
4378
4379	/* set segmentation bits for TSO */
4380	cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSO,
4381				 (E1000_ADVTXD_DCMD_TSE));
4382
4383	/* set timestamp bit if present */
4384	cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP,
4385				 (E1000_ADVTXD_MAC_TSTAMP));
4386
4387	/* insert frame checksum */
4388	cmd_type ^= IGB_SET_FLAG(skb->no_fcs, 1, E1000_ADVTXD_DCMD_IFCS);
4389
4390	return cmd_type;
4391}
4392
4393static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
4394				 union e1000_adv_tx_desc *tx_desc,
4395				 u32 tx_flags, unsigned int paylen)
4396{
4397	u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
4398
4399	/* 82575 requires a unique index per ring */
4400	if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
4401		olinfo_status |= tx_ring->reg_idx << 4;
4402
4403	/* insert L4 checksum */
4404	olinfo_status |= IGB_SET_FLAG(tx_flags,
4405				      IGB_TX_FLAGS_CSUM,
4406				      (E1000_TXD_POPTS_TXSM << 8));
4407
4408	/* insert IPv4 checksum */
4409	olinfo_status |= IGB_SET_FLAG(tx_flags,
4410				      IGB_TX_FLAGS_IPV4,
4411				      (E1000_TXD_POPTS_IXSM << 8));
4412
4413	tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
4414}
4415
4416static void igb_tx_map(struct igb_ring *tx_ring,
4417		       struct igb_tx_buffer *first,
4418		       const u8 hdr_len)
4419{
4420	struct sk_buff *skb = first->skb;
4421	struct igb_tx_buffer *tx_buffer;
4422	union e1000_adv_tx_desc *tx_desc;
4423	struct skb_frag_struct *frag;
4424	dma_addr_t dma;
4425	unsigned int data_len, size;
4426	u32 tx_flags = first->tx_flags;
4427	u32 cmd_type = igb_tx_cmd_type(skb, tx_flags);
4428	u16 i = tx_ring->next_to_use;
4429
4430	tx_desc = IGB_TX_DESC(tx_ring, i);
4431
4432	igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
4433
4434	size = skb_headlen(skb);
4435	data_len = skb->data_len;
4436
4437	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
4438
4439	tx_buffer = first;
4440
4441	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
4442		if (dma_mapping_error(tx_ring->dev, dma))
4443			goto dma_error;
4444
4445		/* record length, and DMA address */
4446		dma_unmap_len_set(tx_buffer, len, size);
4447		dma_unmap_addr_set(tx_buffer, dma, dma);
4448
4449		tx_desc->read.buffer_addr = cpu_to_le64(dma);
4450
4451		while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
4452			tx_desc->read.cmd_type_len =
4453				cpu_to_le32(cmd_type ^ IGB_MAX_DATA_PER_TXD);
4454
4455			i++;
4456			tx_desc++;
4457			if (i == tx_ring->count) {
4458				tx_desc = IGB_TX_DESC(tx_ring, 0);
4459				i = 0;
4460			}
4461			tx_desc->read.olinfo_status = 0;
4462
4463			dma += IGB_MAX_DATA_PER_TXD;
4464			size -= IGB_MAX_DATA_PER_TXD;
4465
4466			tx_desc->read.buffer_addr = cpu_to_le64(dma);
4467		}
4468
4469		if (likely(!data_len))
4470			break;
4471
4472		tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
4473
4474		i++;
4475		tx_desc++;
4476		if (i == tx_ring->count) {
4477			tx_desc = IGB_TX_DESC(tx_ring, 0);
4478			i = 0;
4479		}
4480		tx_desc->read.olinfo_status = 0;
4481
4482		size = skb_frag_size(frag);
4483		data_len -= size;
4484
4485		dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
4486				       size, DMA_TO_DEVICE);
4487
4488		tx_buffer = &tx_ring->tx_buffer_info[i];
4489	}
4490
4491	/* write last descriptor with RS and EOP bits */
4492	cmd_type |= size | IGB_TXD_DCMD;
4493	tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
4494
4495	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
4496
4497	/* set the timestamp */
4498	first->time_stamp = jiffies;
4499
4500	/*
4501	 * Force memory writes to complete before letting h/w know there
4502	 * are new descriptors to fetch.  (Only applicable for weak-ordered
4503	 * memory model archs, such as IA-64).
4504	 *
4505	 * We also need this memory barrier to make certain all of the
4506	 * status bits have been updated before next_to_watch is written.
4507	 */
4508	wmb();
4509
4510	/* set next_to_watch value indicating a packet is present */
4511	first->next_to_watch = tx_desc;
4512
4513	i++;
4514	if (i == tx_ring->count)
4515		i = 0;
4516
4517	tx_ring->next_to_use = i;
4518
4519	writel(i, tx_ring->tail);
4520
4521	/* we need this if more than one processor can write to our tail
4522	 * at a time, it syncronizes IO on IA64/Altix systems */
4523	mmiowb();
4524
4525	return;
4526
4527dma_error:
4528	dev_err(tx_ring->dev, "TX DMA map failed\n");
4529
4530	/* clear dma mappings for failed tx_buffer_info map */
4531	for (;;) {
4532		tx_buffer = &tx_ring->tx_buffer_info[i];
4533		igb_unmap_and_free_tx_resource(tx_ring, tx_buffer);
4534		if (tx_buffer == first)
4535			break;
4536		if (i == 0)
4537			i = tx_ring->count;
4538		i--;
4539	}
4540
4541	tx_ring->next_to_use = i;
4542}
4543
4544static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
4545{
4546	struct net_device *netdev = tx_ring->netdev;
4547
4548	netif_stop_subqueue(netdev, tx_ring->queue_index);
4549
4550	/* Herbert's original patch had:
4551	 *  smp_mb__after_netif_stop_queue();
4552	 * but since that doesn't exist yet, just open code it. */
4553	smp_mb();
4554
4555	/* We need to check again in a case another CPU has just
4556	 * made room available. */
4557	if (igb_desc_unused(tx_ring) < size)
4558		return -EBUSY;
4559
4560	/* A reprieve! */
4561	netif_wake_subqueue(netdev, tx_ring->queue_index);
4562
4563	u64_stats_update_begin(&tx_ring->tx_syncp2);
4564	tx_ring->tx_stats.restart_queue2++;
4565	u64_stats_update_end(&tx_ring->tx_syncp2);
4566
4567	return 0;
4568}
4569
4570static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
4571{
4572	if (igb_desc_unused(tx_ring) >= size)
4573		return 0;
4574	return __igb_maybe_stop_tx(tx_ring, size);
4575}
4576
4577netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4578				struct igb_ring *tx_ring)
4579{
4580	struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
4581	struct igb_tx_buffer *first;
4582	int tso;
4583	u32 tx_flags = 0;
4584	u16 count = TXD_USE_COUNT(skb_headlen(skb));
4585	__be16 protocol = vlan_get_protocol(skb);
4586	u8 hdr_len = 0;
4587
4588	/* need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD,
4589	 *       + 1 desc for skb_headlen/IGB_MAX_DATA_PER_TXD,
4590	 *       + 2 desc gap to keep tail from touching head,
4591	 *       + 1 desc for context descriptor,
4592	 * otherwise try next time
4593	 */
4594	if (NETDEV_FRAG_PAGE_MAX_SIZE > IGB_MAX_DATA_PER_TXD) {
4595		unsigned short f;
4596		for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
4597			count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
4598	} else {
4599		count += skb_shinfo(skb)->nr_frags;
4600	}
4601
4602	if (igb_maybe_stop_tx(tx_ring, count + 3)) {
4603		/* this is a hard error */
4604		return NETDEV_TX_BUSY;
4605	}
4606
4607	/* record the location of the first descriptor for this packet */
4608	first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
4609	first->skb = skb;
4610	first->bytecount = skb->len;
4611	first->gso_segs = 1;
4612
4613	skb_tx_timestamp(skb);
4614
4615	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4616		     !(adapter->ptp_tx_skb))) {
4617		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4618		tx_flags |= IGB_TX_FLAGS_TSTAMP;
4619
4620		adapter->ptp_tx_skb = skb_get(skb);
4621		adapter->ptp_tx_start = jiffies;
4622		if (adapter->hw.mac.type == e1000_82576)
4623			schedule_work(&adapter->ptp_tx_work);
4624	}
4625
4626	if (vlan_tx_tag_present(skb)) {
4627		tx_flags |= IGB_TX_FLAGS_VLAN;
4628		tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
4629	}
4630
4631	/* record initial flags and protocol */
4632	first->tx_flags = tx_flags;
4633	first->protocol = protocol;
4634
4635	tso = igb_tso(tx_ring, first, &hdr_len);
4636	if (tso < 0)
4637		goto out_drop;
4638	else if (!tso)
4639		igb_tx_csum(tx_ring, first);
4640
4641	igb_tx_map(tx_ring, first, hdr_len);
4642
4643	/* Make sure there is space in the ring for the next send. */
4644	igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
4645
4646	return NETDEV_TX_OK;
4647
4648out_drop:
4649	igb_unmap_and_free_tx_resource(tx_ring, first);
4650
4651	return NETDEV_TX_OK;
4652}
4653
4654static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
4655						    struct sk_buff *skb)
4656{
4657	unsigned int r_idx = skb->queue_mapping;
4658
4659	if (r_idx >= adapter->num_tx_queues)
4660		r_idx = r_idx % adapter->num_tx_queues;
4661
4662	return adapter->tx_ring[r_idx];
4663}
4664
4665static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
4666				  struct net_device *netdev)
4667{
4668	struct igb_adapter *adapter = netdev_priv(netdev);
4669
4670	if (test_bit(__IGB_DOWN, &adapter->state)) {
4671		dev_kfree_skb_any(skb);
4672		return NETDEV_TX_OK;
4673	}
4674
4675	if (skb->len <= 0) {
4676		dev_kfree_skb_any(skb);
4677		return NETDEV_TX_OK;
4678	}
4679
4680	/*
4681	 * The minimum packet size with TCTL.PSP set is 17 so pad the skb
4682	 * in order to meet this minimum size requirement.
4683	 */
4684	if (unlikely(skb->len < 17)) {
4685		if (skb_pad(skb, 17 - skb->len))
4686			return NETDEV_TX_OK;
4687		skb->len = 17;
4688		skb_set_tail_pointer(skb, 17);
4689	}
4690
4691	return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
4692}
4693
4694/**
4695 * igb_tx_timeout - Respond to a Tx Hang
4696 * @netdev: network interface device structure
4697 **/
4698static void igb_tx_timeout(struct net_device *netdev)
4699{
4700	struct igb_adapter *adapter = netdev_priv(netdev);
4701	struct e1000_hw *hw = &adapter->hw;
4702
4703	/* Do the reset outside of interrupt context */
4704	adapter->tx_timeout_count++;
4705
4706	if (hw->mac.type >= e1000_82580)
4707		hw->dev_spec._82575.global_device_reset = true;
4708
4709	schedule_work(&adapter->reset_task);
4710	wr32(E1000_EICS,
4711	     (adapter->eims_enable_mask & ~adapter->eims_other));
4712}
4713
4714static void igb_reset_task(struct work_struct *work)
4715{
4716	struct igb_adapter *adapter;
4717	adapter = container_of(work, struct igb_adapter, reset_task);
4718
4719	igb_dump(adapter);
4720	netdev_err(adapter->netdev, "Reset adapter\n");
4721	igb_reinit_locked(adapter);
4722}
4723
4724/**
4725 * igb_get_stats64 - Get System Network Statistics
4726 * @netdev: network interface device structure
4727 * @stats: rtnl_link_stats64 pointer
4728 *
4729 **/
4730static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
4731						 struct rtnl_link_stats64 *stats)
4732{
4733	struct igb_adapter *adapter = netdev_priv(netdev);
4734
4735	spin_lock(&adapter->stats64_lock);
4736	igb_update_stats(adapter, &adapter->stats64);
4737	memcpy(stats, &adapter->stats64, sizeof(*stats));
4738	spin_unlock(&adapter->stats64_lock);
4739
4740	return stats;
4741}
4742
4743/**
4744 * igb_change_mtu - Change the Maximum Transfer Unit
4745 * @netdev: network interface device structure
4746 * @new_mtu: new value for maximum frame size
4747 *
4748 * Returns 0 on success, negative on failure
4749 **/
4750static int igb_change_mtu(struct net_device *netdev, int new_mtu)
4751{
4752	struct igb_adapter *adapter = netdev_priv(netdev);
4753	struct pci_dev *pdev = adapter->pdev;
4754	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
4755
4756	if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
4757		dev_err(&pdev->dev, "Invalid MTU setting\n");
4758		return -EINVAL;
4759	}
4760
4761#define MAX_STD_JUMBO_FRAME_SIZE 9238
4762	if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
4763		dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
4764		return -EINVAL;
4765	}
4766
4767	while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
4768		msleep(1);
4769
4770	/* igb_down has a dependency on max_frame_size */
4771	adapter->max_frame_size = max_frame;
4772
4773	if (netif_running(netdev))
4774		igb_down(adapter);
4775
4776	dev_info(&pdev->dev, "changing MTU from %d to %d\n",
4777		 netdev->mtu, new_mtu);
4778	netdev->mtu = new_mtu;
4779
4780	if (netif_running(netdev))
4781		igb_up(adapter);
4782	else
4783		igb_reset(adapter);
4784
4785	clear_bit(__IGB_RESETTING, &adapter->state);
4786
4787	return 0;
4788}
4789
4790/**
4791 * igb_update_stats - Update the board statistics counters
4792 * @adapter: board private structure
4793 **/
4794
4795void igb_update_stats(struct igb_adapter *adapter,
4796		      struct rtnl_link_stats64 *net_stats)
4797{
4798	struct e1000_hw *hw = &adapter->hw;
4799	struct pci_dev *pdev = adapter->pdev;
4800	u32 reg, mpc;
4801	u16 phy_tmp;
4802	int i;
4803	u64 bytes, packets;
4804	unsigned int start;
4805	u64 _bytes, _packets;
4806
4807#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
4808
4809	/*
4810	 * Prevent stats update while adapter is being reset, or if the pci
4811	 * connection is down.
4812	 */
4813	if (adapter->link_speed == 0)
4814		return;
4815	if (pci_channel_offline(pdev))
4816		return;
4817
4818	bytes = 0;
4819	packets = 0;
4820	for (i = 0; i < adapter->num_rx_queues; i++) {
4821		u32 rqdpc = rd32(E1000_RQDPC(i));
4822		struct igb_ring *ring = adapter->rx_ring[i];
4823
4824		if (rqdpc) {
4825			ring->rx_stats.drops += rqdpc;
4826			net_stats->rx_fifo_errors += rqdpc;
4827		}
4828
4829		do {
4830			start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
4831			_bytes = ring->rx_stats.bytes;
4832			_packets = ring->rx_stats.packets;
4833		} while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
4834		bytes += _bytes;
4835		packets += _packets;
4836	}
4837
4838	net_stats->rx_bytes = bytes;
4839	net_stats->rx_packets = packets;
4840
4841	bytes = 0;
4842	packets = 0;
4843	for (i = 0; i < adapter->num_tx_queues; i++) {
4844		struct igb_ring *ring = adapter->tx_ring[i];
4845		do {
4846			start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
4847			_bytes = ring->tx_stats.bytes;
4848			_packets = ring->tx_stats.packets;
4849		} while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
4850		bytes += _bytes;
4851		packets += _packets;
4852	}
4853	net_stats->tx_bytes = bytes;
4854	net_stats->tx_packets = packets;
4855
4856	/* read stats registers */
4857	adapter->stats.crcerrs += rd32(E1000_CRCERRS);
4858	adapter->stats.gprc += rd32(E1000_GPRC);
4859	adapter->stats.gorc += rd32(E1000_GORCL);
4860	rd32(E1000_GORCH); /* clear GORCL */
4861	adapter->stats.bprc += rd32(E1000_BPRC);
4862	adapter->stats.mprc += rd32(E1000_MPRC);
4863	adapter->stats.roc += rd32(E1000_ROC);
4864
4865	adapter->stats.prc64 += rd32(E1000_PRC64);
4866	adapter->stats.prc127 += rd32(E1000_PRC127);
4867	adapter->stats.prc255 += rd32(E1000_PRC255);
4868	adapter->stats.prc511 += rd32(E1000_PRC511);
4869	adapter->stats.prc1023 += rd32(E1000_PRC1023);
4870	adapter->stats.prc1522 += rd32(E1000_PRC1522);
4871	adapter->stats.symerrs += rd32(E1000_SYMERRS);
4872	adapter->stats.sec += rd32(E1000_SEC);
4873
4874	mpc = rd32(E1000_MPC);
4875	adapter->stats.mpc += mpc;
4876	net_stats->rx_fifo_errors += mpc;
4877	adapter->stats.scc += rd32(E1000_SCC);
4878	adapter->stats.ecol += rd32(E1000_ECOL);
4879	adapter->stats.mcc += rd32(E1000_MCC);
4880	adapter->stats.latecol += rd32(E1000_LATECOL);
4881	adapter->stats.dc += rd32(E1000_DC);
4882	adapter->stats.rlec += rd32(E1000_RLEC);
4883	adapter->stats.xonrxc += rd32(E1000_XONRXC);
4884	adapter->stats.xontxc += rd32(E1000_XONTXC);
4885	adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
4886	adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
4887	adapter->stats.fcruc += rd32(E1000_FCRUC);
4888	adapter->stats.gptc += rd32(E1000_GPTC);
4889	adapter->stats.gotc += rd32(E1000_GOTCL);
4890	rd32(E1000_GOTCH); /* clear GOTCL */
4891	adapter->stats.rnbc += rd32(E1000_RNBC);
4892	adapter->stats.ruc += rd32(E1000_RUC);
4893	adapter->stats.rfc += rd32(E1000_RFC);
4894	adapter->stats.rjc += rd32(E1000_RJC);
4895	adapter->stats.tor += rd32(E1000_TORH);
4896	adapter->stats.tot += rd32(E1000_TOTH);
4897	adapter->stats.tpr += rd32(E1000_TPR);
4898
4899	adapter->stats.ptc64 += rd32(E1000_PTC64);
4900	adapter->stats.ptc127 += rd32(E1000_PTC127);
4901	adapter->stats.ptc255 += rd32(E1000_PTC255);
4902	adapter->stats.ptc511 += rd32(E1000_PTC511);
4903	adapter->stats.ptc1023 += rd32(E1000_PTC1023);
4904	adapter->stats.ptc1522 += rd32(E1000_PTC1522);
4905
4906	adapter->stats.mptc += rd32(E1000_MPTC);
4907	adapter->stats.bptc += rd32(E1000_BPTC);
4908
4909	adapter->stats.tpt += rd32(E1000_TPT);
4910	adapter->stats.colc += rd32(E1000_COLC);
4911
4912	adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
4913	/* read internal phy specific stats */
4914	reg = rd32(E1000_CTRL_EXT);
4915	if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
4916		adapter->stats.rxerrc += rd32(E1000_RXERRC);
4917
4918		/* this stat has invalid values on i210/i211 */
4919		if ((hw->mac.type != e1000_i210) &&
4920		    (hw->mac.type != e1000_i211))
4921			adapter->stats.tncrs += rd32(E1000_TNCRS);
4922	}
4923
4924	adapter->stats.tsctc += rd32(E1000_TSCTC);
4925	adapter->stats.tsctfc += rd32(E1000_TSCTFC);
4926
4927	adapter->stats.iac += rd32(E1000_IAC);
4928	adapter->stats.icrxoc += rd32(E1000_ICRXOC);
4929	adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
4930	adapter->stats.icrxatc += rd32(E1000_ICRXATC);
4931	adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
4932	adapter->stats.ictxatc += rd32(E1000_ICTXATC);
4933	adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
4934	adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
4935	adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
4936
4937	/* Fill out the OS statistics structure */
4938	net_stats->multicast = adapter->stats.mprc;
4939	net_stats->collisions = adapter->stats.colc;
4940
4941	/* Rx Errors */
4942
4943	/* RLEC on some newer hardware can be incorrect so build
4944	 * our own version based on RUC and ROC */
4945	net_stats->rx_errors = adapter->stats.rxerrc +
4946		adapter->stats.crcerrs + adapter->stats.algnerrc +
4947		adapter->stats.ruc + adapter->stats.roc +
4948		adapter->stats.cexterr;
4949	net_stats->rx_length_errors = adapter->stats.ruc +
4950				      adapter->stats.roc;
4951	net_stats->rx_crc_errors = adapter->stats.crcerrs;
4952	net_stats->rx_frame_errors = adapter->stats.algnerrc;
4953	net_stats->rx_missed_errors = adapter->stats.mpc;
4954
4955	/* Tx Errors */
4956	net_stats->tx_errors = adapter->stats.ecol +
4957			       adapter->stats.latecol;
4958	net_stats->tx_aborted_errors = adapter->stats.ecol;
4959	net_stats->tx_window_errors = adapter->stats.latecol;
4960	net_stats->tx_carrier_errors = adapter->stats.tncrs;
4961
4962	/* Tx Dropped needs to be maintained elsewhere */
4963
4964	/* Phy Stats */
4965	if (hw->phy.media_type == e1000_media_type_copper) {
4966		if ((adapter->link_speed == SPEED_1000) &&
4967		   (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
4968			phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
4969			adapter->phy_stats.idle_errors += phy_tmp;
4970		}
4971	}
4972
4973	/* Management Stats */
4974	adapter->stats.mgptc += rd32(E1000_MGTPTC);
4975	adapter->stats.mgprc += rd32(E1000_MGTPRC);
4976	adapter->stats.mgpdc += rd32(E1000_MGTPDC);
4977
4978	/* OS2BMC Stats */
4979	reg = rd32(E1000_MANC);
4980	if (reg & E1000_MANC_EN_BMC2OS) {
4981		adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
4982		adapter->stats.o2bspc += rd32(E1000_O2BSPC);
4983		adapter->stats.b2ospc += rd32(E1000_B2OSPC);
4984		adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
4985	}
4986}
4987
4988static irqreturn_t igb_msix_other(int irq, void *data)
4989{
4990	struct igb_adapter *adapter = data;
4991	struct e1000_hw *hw = &adapter->hw;
4992	u32 icr = rd32(E1000_ICR);
4993	/* reading ICR causes bit 31 of EICR to be cleared */
4994
4995	if (icr & E1000_ICR_DRSTA)
4996		schedule_work(&adapter->reset_task);
4997
4998	if (icr & E1000_ICR_DOUTSYNC) {
4999		/* HW is reporting DMA is out of sync */
5000		adapter->stats.doosync++;
5001		/* The DMA Out of Sync is also indication of a spoof event
5002		 * in IOV mode. Check the Wrong VM Behavior register to
5003		 * see if it is really a spoof event. */
5004		igb_check_wvbr(adapter);
5005	}
5006
5007	/* Check for a mailbox event */
5008	if (icr & E1000_ICR_VMMB)
5009		igb_msg_task(adapter);
5010
5011	if (icr & E1000_ICR_LSC) {
5012		hw->mac.get_link_status = 1;
5013		/* guard against interrupt when we're going down */
5014		if (!test_bit(__IGB_DOWN, &adapter->state))
5015			mod_timer(&adapter->watchdog_timer, jiffies + 1);
5016	}
5017
5018	if (icr & E1000_ICR_TS) {
5019		u32 tsicr = rd32(E1000_TSICR);
5020
5021		if (tsicr & E1000_TSICR_TXTS) {
5022			/* acknowledge the interrupt */
5023			wr32(E1000_TSICR, E1000_TSICR_TXTS);
5024			/* retrieve hardware timestamp */
5025			schedule_work(&adapter->ptp_tx_work);
5026		}
5027	}
5028
5029	wr32(E1000_EIMS, adapter->eims_other);
5030
5031	return IRQ_HANDLED;
5032}
5033
5034static void igb_write_itr(struct igb_q_vector *q_vector)
5035{
5036	struct igb_adapter *adapter = q_vector->adapter;
5037	u32 itr_val = q_vector->itr_val & 0x7FFC;
5038
5039	if (!q_vector->set_itr)
5040		return;
5041
5042	if (!itr_val)
5043		itr_val = 0x4;
5044
5045	if (adapter->hw.mac.type == e1000_82575)
5046		itr_val |= itr_val << 16;
5047	else
5048		itr_val |= E1000_EITR_CNT_IGNR;
5049
5050	writel(itr_val, q_vector->itr_register);
5051	q_vector->set_itr = 0;
5052}
5053
5054static irqreturn_t igb_msix_ring(int irq, void *data)
5055{
5056	struct igb_q_vector *q_vector = data;
5057
5058	/* Write the ITR value calculated from the previous interrupt. */
5059	igb_write_itr(q_vector);
5060
5061	napi_schedule(&q_vector->napi);
5062
5063	return IRQ_HANDLED;
5064}
5065
5066#ifdef CONFIG_IGB_DCA
5067static void igb_update_tx_dca(struct igb_adapter *adapter,
5068			      struct igb_ring *tx_ring,
5069			      int cpu)
5070{
5071	struct e1000_hw *hw = &adapter->hw;
5072	u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
5073
5074	if (hw->mac.type != e1000_82575)
5075		txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT;
5076
5077	/*
5078	 * We can enable relaxed ordering for reads, but not writes when
5079	 * DCA is enabled.  This is due to a known issue in some chipsets
5080	 * which will cause the DCA tag to be cleared.
5081	 */
5082	txctrl |= E1000_DCA_TXCTRL_DESC_RRO_EN |
5083		  E1000_DCA_TXCTRL_DATA_RRO_EN |
5084		  E1000_DCA_TXCTRL_DESC_DCA_EN;
5085
5086	wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl);
5087}
5088
5089static void igb_update_rx_dca(struct igb_adapter *adapter,
5090			      struct igb_ring *rx_ring,
5091			      int cpu)
5092{
5093	struct e1000_hw *hw = &adapter->hw;
5094	u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu);
5095
5096	if (hw->mac.type != e1000_82575)
5097		rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT;
5098
5099	/*
5100	 * We can enable relaxed ordering for reads, but not writes when
5101	 * DCA is enabled.  This is due to a known issue in some chipsets
5102	 * which will cause the DCA tag to be cleared.
5103	 */
5104	rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN |
5105		  E1000_DCA_RXCTRL_DESC_DCA_EN;
5106
5107	wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl);
5108}
5109
5110static void igb_update_dca(struct igb_q_vector *q_vector)
5111{
5112	struct igb_adapter *adapter = q_vector->adapter;
5113	int cpu = get_cpu();
5114
5115	if (q_vector->cpu == cpu)
5116		goto out_no_update;
5117
5118	if (q_vector->tx.ring)
5119		igb_update_tx_dca(adapter, q_vector->tx.ring, cpu);
5120
5121	if (q_vector->rx.ring)
5122		igb_update_rx_dca(adapter, q_vector->rx.ring, cpu);
5123
5124	q_vector->cpu = cpu;
5125out_no_update:
5126	put_cpu();
5127}
5128
5129static void igb_setup_dca(struct igb_adapter *adapter)
5130{
5131	struct e1000_hw *hw = &adapter->hw;
5132	int i;
5133
5134	if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
5135		return;
5136
5137	/* Always use CB2 mode, difference is masked in the CB driver. */
5138	wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
5139
5140	for (i = 0; i < adapter->num_q_vectors; i++) {
5141		adapter->q_vector[i]->cpu = -1;
5142		igb_update_dca(adapter->q_vector[i]);
5143	}
5144}
5145
5146static int __igb_notify_dca(struct device *dev, void *data)
5147{
5148	struct net_device *netdev = dev_get_drvdata(dev);
5149	struct igb_adapter *adapter = netdev_priv(netdev);
5150	struct pci_dev *pdev = adapter->pdev;
5151	struct e1000_hw *hw = &adapter->hw;
5152	unsigned long event = *(unsigned long *)data;
5153
5154	switch (event) {
5155	case DCA_PROVIDER_ADD:
5156		/* if already enabled, don't do it again */
5157		if (adapter->flags & IGB_FLAG_DCA_ENABLED)
5158			break;
5159		if (dca_add_requester(dev) == 0) {
5160			adapter->flags |= IGB_FLAG_DCA_ENABLED;
5161			dev_info(&pdev->dev, "DCA enabled\n");
5162			igb_setup_dca(adapter);
5163			break;
5164		}
5165		/* Fall Through since DCA is disabled. */
5166	case DCA_PROVIDER_REMOVE:
5167		if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
5168			/* without this a class_device is left
5169			 * hanging around in the sysfs model */
5170			dca_remove_requester(dev);
5171			dev_info(&pdev->dev, "DCA disabled\n");
5172			adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
5173			wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
5174		}
5175		break;
5176	}
5177
5178	return 0;
5179}
5180
5181static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
5182                          void *p)
5183{
5184	int ret_val;
5185
5186	ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
5187	                                 __igb_notify_dca);
5188
5189	return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
5190}
5191#endif /* CONFIG_IGB_DCA */
5192
5193#ifdef CONFIG_PCI_IOV
5194static int igb_vf_configure(struct igb_adapter *adapter, int vf)
5195{
5196	unsigned char mac_addr[ETH_ALEN];
5197
5198	eth_zero_addr(mac_addr);
5199	igb_set_vf_mac(adapter, vf, mac_addr);
5200
5201	return 0;
5202}
5203
5204static bool igb_vfs_are_assigned(struct igb_adapter *adapter)
5205{
5206	struct pci_dev *pdev = adapter->pdev;
5207	struct pci_dev *vfdev;
5208	int dev_id;
5209
5210	switch (adapter->hw.mac.type) {
5211	case e1000_82576:
5212		dev_id = IGB_82576_VF_DEV_ID;
5213		break;
5214	case e1000_i350:
5215		dev_id = IGB_I350_VF_DEV_ID;
5216		break;
5217	default:
5218		return false;
5219	}
5220
5221	/* loop through all the VFs to see if we own any that are assigned */
5222	vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, NULL);
5223	while (vfdev) {
5224		/* if we don't own it we don't care */
5225		if (vfdev->is_virtfn && vfdev->physfn == pdev) {
5226			/* if it is assigned we cannot release it */
5227			if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
5228				return true;
5229		}
5230
5231		vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, vfdev);
5232	}
5233
5234	return false;
5235}
5236
5237#endif
5238static void igb_ping_all_vfs(struct igb_adapter *adapter)
5239{
5240	struct e1000_hw *hw = &adapter->hw;
5241	u32 ping;
5242	int i;
5243
5244	for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
5245		ping = E1000_PF_CONTROL_MSG;
5246		if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
5247			ping |= E1000_VT_MSGTYPE_CTS;
5248		igb_write_mbx(hw, &ping, 1, i);
5249	}
5250}
5251
5252static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5253{
5254	struct e1000_hw *hw = &adapter->hw;
5255	u32 vmolr = rd32(E1000_VMOLR(vf));
5256	struct vf_data_storage *vf_data = &adapter->vf_data[vf];
5257
5258	vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
5259	                    IGB_VF_FLAG_MULTI_PROMISC);
5260	vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
5261
5262	if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
5263		vmolr |= E1000_VMOLR_MPME;
5264		vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
5265		*msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
5266	} else {
5267		/*
5268		 * if we have hashes and we are clearing a multicast promisc
5269		 * flag we need to write the hashes to the MTA as this step
5270		 * was previously skipped
5271		 */
5272		if (vf_data->num_vf_mc_hashes > 30) {
5273			vmolr |= E1000_VMOLR_MPME;
5274		} else if (vf_data->num_vf_mc_hashes) {
5275			int j;
5276			vmolr |= E1000_VMOLR_ROMPE;
5277			for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
5278				igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
5279		}
5280	}
5281
5282	wr32(E1000_VMOLR(vf), vmolr);
5283
5284	/* there are flags left unprocessed, likely not supported */
5285	if (*msgbuf & E1000_VT_MSGINFO_MASK)
5286		return -EINVAL;
5287
5288	return 0;
5289
5290}
5291
5292static int igb_set_vf_multicasts(struct igb_adapter *adapter,
5293				  u32 *msgbuf, u32 vf)
5294{
5295	int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
5296	u16 *hash_list = (u16 *)&msgbuf[1];
5297	struct vf_data_storage *vf_data = &adapter->vf_data[vf];
5298	int i;
5299
5300	/* salt away the number of multicast addresses assigned
5301	 * to this VF for later use to restore when the PF multi cast
5302	 * list changes
5303	 */
5304	vf_data->num_vf_mc_hashes = n;
5305
5306	/* only up to 30 hash values supported */
5307	if (n > 30)
5308		n = 30;
5309
5310	/* store the hashes for later use */
5311	for (i = 0; i < n; i++)
5312		vf_data->vf_mc_hashes[i] = hash_list[i];
5313
5314	/* Flush and reset the mta with the new values */
5315	igb_set_rx_mode(adapter->netdev);
5316
5317	return 0;
5318}
5319
5320static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
5321{
5322	struct e1000_hw *hw = &adapter->hw;
5323	struct vf_data_storage *vf_data;
5324	int i, j;
5325
5326	for (i = 0; i < adapter->vfs_allocated_count; i++) {
5327		u32 vmolr = rd32(E1000_VMOLR(i));
5328		vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
5329
5330		vf_data = &adapter->vf_data[i];
5331
5332		if ((vf_data->num_vf_mc_hashes > 30) ||
5333		    (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
5334			vmolr |= E1000_VMOLR_MPME;
5335		} else if (vf_data->num_vf_mc_hashes) {
5336			vmolr |= E1000_VMOLR_ROMPE;
5337			for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
5338				igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
5339		}
5340		wr32(E1000_VMOLR(i), vmolr);
5341	}
5342}
5343
5344static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
5345{
5346	struct e1000_hw *hw = &adapter->hw;
5347	u32 pool_mask, reg, vid;
5348	int i;
5349
5350	pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
5351
5352	/* Find the vlan filter for this id */
5353	for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5354		reg = rd32(E1000_VLVF(i));
5355
5356		/* remove the vf from the pool */
5357		reg &= ~pool_mask;
5358
5359		/* if pool is empty then remove entry from vfta */
5360		if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
5361		    (reg & E1000_VLVF_VLANID_ENABLE)) {
5362			reg = 0;
5363			vid = reg & E1000_VLVF_VLANID_MASK;
5364			igb_vfta_set(hw, vid, false);
5365		}
5366
5367		wr32(E1000_VLVF(i), reg);
5368	}
5369
5370	adapter->vf_data[vf].vlans_enabled = 0;
5371}
5372
5373static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
5374{
5375	struct e1000_hw *hw = &adapter->hw;
5376	u32 reg, i;
5377
5378	/* The vlvf table only exists on 82576 hardware and newer */
5379	if (hw->mac.type < e1000_82576)
5380		return -1;
5381
5382	/* we only need to do this if VMDq is enabled */
5383	if (!adapter->vfs_allocated_count)
5384		return -1;
5385
5386	/* Find the vlan filter for this id */
5387	for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5388		reg = rd32(E1000_VLVF(i));
5389		if ((reg & E1000_VLVF_VLANID_ENABLE) &&
5390		    vid == (reg & E1000_VLVF_VLANID_MASK))
5391			break;
5392	}
5393
5394	if (add) {
5395		if (i == E1000_VLVF_ARRAY_SIZE) {
5396			/* Did not find a matching VLAN ID entry that was
5397			 * enabled.  Search for a free filter entry, i.e.
5398			 * one without the enable bit set
5399			 */
5400			for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5401				reg = rd32(E1000_VLVF(i));
5402				if (!(reg & E1000_VLVF_VLANID_ENABLE))
5403					break;
5404			}
5405		}
5406		if (i < E1000_VLVF_ARRAY_SIZE) {
5407			/* Found an enabled/available entry */
5408			reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
5409
5410			/* if !enabled we need to set this up in vfta */
5411			if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
5412				/* add VID to filter table */
5413				igb_vfta_set(hw, vid, true);
5414				reg |= E1000_VLVF_VLANID_ENABLE;
5415			}
5416			reg &= ~E1000_VLVF_VLANID_MASK;
5417			reg |= vid;
5418			wr32(E1000_VLVF(i), reg);
5419
5420			/* do not modify RLPML for PF devices */
5421			if (vf >= adapter->vfs_allocated_count)
5422				return 0;
5423
5424			if (!adapter->vf_data[vf].vlans_enabled) {
5425				u32 size;
5426				reg = rd32(E1000_VMOLR(vf));
5427				size = reg & E1000_VMOLR_RLPML_MASK;
5428				size += 4;
5429				reg &= ~E1000_VMOLR_RLPML_MASK;
5430				reg |= size;
5431				wr32(E1000_VMOLR(vf), reg);
5432			}
5433
5434			adapter->vf_data[vf].vlans_enabled++;
5435		}
5436	} else {
5437		if (i < E1000_VLVF_ARRAY_SIZE) {
5438			/* remove vf from the pool */
5439			reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
5440			/* if pool is empty then remove entry from vfta */
5441			if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
5442				reg = 0;
5443				igb_vfta_set(hw, vid, false);
5444			}
5445			wr32(E1000_VLVF(i), reg);
5446
5447			/* do not modify RLPML for PF devices */
5448			if (vf >= adapter->vfs_allocated_count)
5449				return 0;
5450
5451			adapter->vf_data[vf].vlans_enabled--;
5452			if (!adapter->vf_data[vf].vlans_enabled) {
5453				u32 size;
5454				reg = rd32(E1000_VMOLR(vf));
5455				size = reg & E1000_VMOLR_RLPML_MASK;
5456				size -= 4;
5457				reg &= ~E1000_VMOLR_RLPML_MASK;
5458				reg |= size;
5459				wr32(E1000_VMOLR(vf), reg);
5460			}
5461		}
5462	}
5463	return 0;
5464}
5465
5466static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
5467{
5468	struct e1000_hw *hw = &adapter->hw;
5469
5470	if (vid)
5471		wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
5472	else
5473		wr32(E1000_VMVIR(vf), 0);
5474}
5475
5476static int igb_ndo_set_vf_vlan(struct net_device *netdev,
5477			       int vf, u16 vlan, u8 qos)
5478{
5479	int err = 0;
5480	struct igb_adapter *adapter = netdev_priv(netdev);
5481
5482	if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
5483		return -EINVAL;
5484	if (vlan || qos) {
5485		err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
5486		if (err)
5487			goto out;
5488		igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
5489		igb_set_vmolr(adapter, vf, !vlan);
5490		adapter->vf_data[vf].pf_vlan = vlan;
5491		adapter->vf_data[vf].pf_qos = qos;
5492		dev_info(&adapter->pdev->dev,
5493			 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
5494		if (test_bit(__IGB_DOWN, &adapter->state)) {
5495			dev_warn(&adapter->pdev->dev,
5496				 "The VF VLAN has been set,"
5497				 " but the PF device is not up.\n");
5498			dev_warn(&adapter->pdev->dev,
5499				 "Bring the PF device up before"
5500				 " attempting to use the VF device.\n");
5501		}
5502	} else {
5503		igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
5504				   false, vf);
5505		igb_set_vmvir(adapter, vlan, vf);
5506		igb_set_vmolr(adapter, vf, true);
5507		adapter->vf_data[vf].pf_vlan = 0;
5508		adapter->vf_data[vf].pf_qos = 0;
5509       }
5510out:
5511       return err;
5512}
5513
5514static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5515{
5516	int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
5517	int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
5518
5519	return igb_vlvf_set(adapter, vid, add, vf);
5520}
5521
5522static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
5523{
5524	/* clear flags - except flag that indicates PF has set the MAC */
5525	adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC;
5526	adapter->vf_data[vf].last_nack = jiffies;
5527
5528	/* reset offloads to defaults */
5529	igb_set_vmolr(adapter, vf, true);
5530
5531	/* reset vlans for device */
5532	igb_clear_vf_vfta(adapter, vf);
5533	if (adapter->vf_data[vf].pf_vlan)
5534		igb_ndo_set_vf_vlan(adapter->netdev, vf,
5535				    adapter->vf_data[vf].pf_vlan,
5536				    adapter->vf_data[vf].pf_qos);
5537	else
5538		igb_clear_vf_vfta(adapter, vf);
5539
5540	/* reset multicast table array for vf */
5541	adapter->vf_data[vf].num_vf_mc_hashes = 0;
5542
5543	/* Flush and reset the mta with the new values */
5544	igb_set_rx_mode(adapter->netdev);
5545}
5546
5547static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
5548{
5549	unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
5550
5551	/* clear mac address as we were hotplug removed/added */
5552	if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
5553		eth_zero_addr(vf_mac);
5554
5555	/* process remaining reset events */
5556	igb_vf_reset(adapter, vf);
5557}
5558
5559static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
5560{
5561	struct e1000_hw *hw = &adapter->hw;
5562	unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
5563	int rar_entry = hw->mac.rar_entry_count - (vf + 1);
5564	u32 reg, msgbuf[3];
5565	u8 *addr = (u8 *)(&msgbuf[1]);
5566
5567	/* process all the same items cleared in a function level reset */
5568	igb_vf_reset(adapter, vf);
5569
5570	/* set vf mac address */
5571	igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
5572
5573	/* enable transmit and receive for vf */
5574	reg = rd32(E1000_VFTE);
5575	wr32(E1000_VFTE, reg | (1 << vf));
5576	reg = rd32(E1000_VFRE);
5577	wr32(E1000_VFRE, reg | (1 << vf));
5578
5579	adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
5580
5581	/* reply to reset with ack and vf mac address */
5582	msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
5583	memcpy(addr, vf_mac, 6);
5584	igb_write_mbx(hw, msgbuf, 3, vf);
5585}
5586
5587static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
5588{
5589	/*
5590	 * The VF MAC Address is stored in a packed array of bytes
5591	 * starting at the second 32 bit word of the msg array
5592	 */
5593	unsigned char *addr = (char *)&msg[1];
5594	int err = -1;
5595
5596	if (is_valid_ether_addr(addr))
5597		err = igb_set_vf_mac(adapter, vf, addr);
5598
5599	return err;
5600}
5601
5602static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
5603{
5604	struct e1000_hw *hw = &adapter->hw;
5605	struct vf_data_storage *vf_data = &adapter->vf_data[vf];
5606	u32 msg = E1000_VT_MSGTYPE_NACK;
5607
5608	/* if device isn't clear to send it shouldn't be reading either */
5609	if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
5610	    time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
5611		igb_write_mbx(hw, &msg, 1, vf);
5612		vf_data->last_nack = jiffies;
5613	}
5614}
5615
5616static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
5617{
5618	struct pci_dev *pdev = adapter->pdev;
5619	u32 msgbuf[E1000_VFMAILBOX_SIZE];
5620	struct e1000_hw *hw = &adapter->hw;
5621	struct vf_data_storage *vf_data = &adapter->vf_data[vf];
5622	s32 retval;
5623
5624	retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
5625
5626	if (retval) {
5627		/* if receive failed revoke VF CTS stats and restart init */
5628		dev_err(&pdev->dev, "Error receiving message from VF\n");
5629		vf_data->flags &= ~IGB_VF_FLAG_CTS;
5630		if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5631			return;
5632		goto out;
5633	}
5634
5635	/* this is a message we already processed, do nothing */
5636	if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
5637		return;
5638
5639	/*
5640	 * until the vf completes a reset it should not be
5641	 * allowed to start any configuration.
5642	 */
5643
5644	if (msgbuf[0] == E1000_VF_RESET) {
5645		igb_vf_reset_msg(adapter, vf);
5646		return;
5647	}
5648
5649	if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
5650		if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5651			return;
5652		retval = -1;
5653		goto out;
5654	}
5655
5656	switch ((msgbuf[0] & 0xFFFF)) {
5657	case E1000_VF_SET_MAC_ADDR:
5658		retval = -EINVAL;
5659		if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC))
5660			retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
5661		else
5662			dev_warn(&pdev->dev,
5663				 "VF %d attempted to override administratively "
5664				 "set MAC address\nReload the VF driver to "
5665				 "resume operations\n", vf);
5666		break;
5667	case E1000_VF_SET_PROMISC:
5668		retval = igb_set_vf_promisc(adapter, msgbuf, vf);
5669		break;
5670	case E1000_VF_SET_MULTICAST:
5671		retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
5672		break;
5673	case E1000_VF_SET_LPE:
5674		retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
5675		break;
5676	case E1000_VF_SET_VLAN:
5677		retval = -1;
5678		if (vf_data->pf_vlan)
5679			dev_warn(&pdev->dev,
5680				 "VF %d attempted to override administratively "
5681				 "set VLAN tag\nReload the VF driver to "
5682				 "resume operations\n", vf);
5683		else
5684			retval = igb_set_vf_vlan(adapter, msgbuf, vf);
5685		break;
5686	default:
5687		dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
5688		retval = -1;
5689		break;
5690	}
5691
5692	msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
5693out:
5694	/* notify the VF of the results of what it sent us */
5695	if (retval)
5696		msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
5697	else
5698		msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
5699
5700	igb_write_mbx(hw, msgbuf, 1, vf);
5701}
5702
5703static void igb_msg_task(struct igb_adapter *adapter)
5704{
5705	struct e1000_hw *hw = &adapter->hw;
5706	u32 vf;
5707
5708	for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
5709		/* process any reset requests */
5710		if (!igb_check_for_rst(hw, vf))
5711			igb_vf_reset_event(adapter, vf);
5712
5713		/* process any messages pending */
5714		if (!igb_check_for_msg(hw, vf))
5715			igb_rcv_msg_from_vf(adapter, vf);
5716
5717		/* process any acks */
5718		if (!igb_check_for_ack(hw, vf))
5719			igb_rcv_ack_from_vf(adapter, vf);
5720	}
5721}
5722
5723/**
5724 *  igb_set_uta - Set unicast filter table address
5725 *  @adapter: board private structure
5726 *
5727 *  The unicast table address is a register array of 32-bit registers.
5728 *  The table is meant to be used in a way similar to how the MTA is used
5729 *  however due to certain limitations in the hardware it is necessary to
5730 *  set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
5731 *  enable bit to allow vlan tag stripping when promiscuous mode is enabled
5732 **/
5733static void igb_set_uta(struct igb_adapter *adapter)
5734{
5735	struct e1000_hw *hw = &adapter->hw;
5736	int i;
5737
5738	/* The UTA table only exists on 82576 hardware and newer */
5739	if (hw->mac.type < e1000_82576)
5740		return;
5741
5742	/* we only need to do this if VMDq is enabled */
5743	if (!adapter->vfs_allocated_count)
5744		return;
5745
5746	for (i = 0; i < hw->mac.uta_reg_count; i++)
5747		array_wr32(E1000_UTA, i, ~0);
5748}
5749
5750/**
5751 * igb_intr_msi - Interrupt Handler
5752 * @irq: interrupt number
5753 * @data: pointer to a network interface device structure
5754 **/
5755static irqreturn_t igb_intr_msi(int irq, void *data)
5756{
5757	struct igb_adapter *adapter = data;
5758	struct igb_q_vector *q_vector = adapter->q_vector[0];
5759	struct e1000_hw *hw = &adapter->hw;
5760	/* read ICR disables interrupts using IAM */
5761	u32 icr = rd32(E1000_ICR);
5762
5763	igb_write_itr(q_vector);
5764
5765	if (icr & E1000_ICR_DRSTA)
5766		schedule_work(&adapter->reset_task);
5767
5768	if (icr & E1000_ICR_DOUTSYNC) {
5769		/* HW is reporting DMA is out of sync */
5770		adapter->stats.doosync++;
5771	}
5772
5773	if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5774		hw->mac.get_link_status = 1;
5775		if (!test_bit(__IGB_DOWN, &adapter->state))
5776			mod_timer(&adapter->watchdog_timer, jiffies + 1);
5777	}
5778
5779	if (icr & E1000_ICR_TS) {
5780		u32 tsicr = rd32(E1000_TSICR);
5781
5782		if (tsicr & E1000_TSICR_TXTS) {
5783			/* acknowledge the interrupt */
5784			wr32(E1000_TSICR, E1000_TSICR_TXTS);
5785			/* retrieve hardware timestamp */
5786			schedule_work(&adapter->ptp_tx_work);
5787		}
5788	}
5789
5790	napi_schedule(&q_vector->napi);
5791
5792	return IRQ_HANDLED;
5793}
5794
5795/**
5796 * igb_intr - Legacy Interrupt Handler
5797 * @irq: interrupt number
5798 * @data: pointer to a network interface device structure
5799 **/
5800static irqreturn_t igb_intr(int irq, void *data)
5801{
5802	struct igb_adapter *adapter = data;
5803	struct igb_q_vector *q_vector = adapter->q_vector[0];
5804	struct e1000_hw *hw = &adapter->hw;
5805	/* Interrupt Auto-Mask...upon reading ICR, interrupts are masked.  No
5806	 * need for the IMC write */
5807	u32 icr = rd32(E1000_ICR);
5808
5809	/* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
5810	 * not set, then the adapter didn't send an interrupt */
5811	if (!(icr & E1000_ICR_INT_ASSERTED))
5812		return IRQ_NONE;
5813
5814	igb_write_itr(q_vector);
5815
5816	if (icr & E1000_ICR_DRSTA)
5817		schedule_work(&adapter->reset_task);
5818
5819	if (icr & E1000_ICR_DOUTSYNC) {
5820		/* HW is reporting DMA is out of sync */
5821		adapter->stats.doosync++;
5822	}
5823
5824	if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5825		hw->mac.get_link_status = 1;
5826		/* guard against interrupt when we're going down */
5827		if (!test_bit(__IGB_DOWN, &adapter->state))
5828			mod_timer(&adapter->watchdog_timer, jiffies + 1);
5829	}
5830
5831	if (icr & E1000_ICR_TS) {
5832		u32 tsicr = rd32(E1000_TSICR);
5833
5834		if (tsicr & E1000_TSICR_TXTS) {
5835			/* acknowledge the interrupt */
5836			wr32(E1000_TSICR, E1000_TSICR_TXTS);
5837			/* retrieve hardware timestamp */
5838			schedule_work(&adapter->ptp_tx_work);
5839		}
5840	}
5841
5842	napi_schedule(&q_vector->napi);
5843
5844	return IRQ_HANDLED;
5845}
5846
5847static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
5848{
5849	struct igb_adapter *adapter = q_vector->adapter;
5850	struct e1000_hw *hw = &adapter->hw;
5851
5852	if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
5853	    (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
5854		if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
5855			igb_set_itr(q_vector);
5856		else
5857			igb_update_ring_itr(q_vector);
5858	}
5859
5860	if (!test_bit(__IGB_DOWN, &adapter->state)) {
5861		if (adapter->msix_entries)
5862			wr32(E1000_EIMS, q_vector->eims_value);
5863		else
5864			igb_irq_enable(adapter);
5865	}
5866}
5867
5868/**
5869 * igb_poll - NAPI Rx polling callback
5870 * @napi: napi polling structure
5871 * @budget: count of how many packets we should handle
5872 **/
5873static int igb_poll(struct napi_struct *napi, int budget)
5874{
5875	struct igb_q_vector *q_vector = container_of(napi,
5876	                                             struct igb_q_vector,
5877	                                             napi);
5878	bool clean_complete = true;
5879
5880#ifdef CONFIG_IGB_DCA
5881	if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
5882		igb_update_dca(q_vector);
5883#endif
5884	if (q_vector->tx.ring)
5885		clean_complete = igb_clean_tx_irq(q_vector);
5886
5887	if (q_vector->rx.ring)
5888		clean_complete &= igb_clean_rx_irq(q_vector, budget);
5889
5890	/* If all work not completed, return budget and keep polling */
5891	if (!clean_complete)
5892		return budget;
5893
5894	/* If not enough Rx work done, exit the polling mode */
5895	napi_complete(napi);
5896	igb_ring_irq_enable(q_vector);
5897
5898	return 0;
5899}
5900
5901/**
5902 * igb_clean_tx_irq - Reclaim resources after transmit completes
5903 * @q_vector: pointer to q_vector containing needed info
5904 *
5905 * returns true if ring is completely cleaned
5906 **/
5907static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5908{
5909	struct igb_adapter *adapter = q_vector->adapter;
5910	struct igb_ring *tx_ring = q_vector->tx.ring;
5911	struct igb_tx_buffer *tx_buffer;
5912	union e1000_adv_tx_desc *tx_desc;
5913	unsigned int total_bytes = 0, total_packets = 0;
5914	unsigned int budget = q_vector->tx.work_limit;
5915	unsigned int i = tx_ring->next_to_clean;
5916
5917	if (test_bit(__IGB_DOWN, &adapter->state))
5918		return true;
5919
5920	tx_buffer = &tx_ring->tx_buffer_info[i];
5921	tx_desc = IGB_TX_DESC(tx_ring, i);
5922	i -= tx_ring->count;
5923
5924	do {
5925		union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
5926
5927		/* if next_to_watch is not set then there is no work pending */
5928		if (!eop_desc)
5929			break;
5930
5931		/* prevent any other reads prior to eop_desc */
5932		read_barrier_depends();
5933
5934		/* if DD is not set pending work has not been completed */
5935		if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
5936			break;
5937
5938		/* clear next_to_watch to prevent false hangs */
5939		tx_buffer->next_to_watch = NULL;
5940
5941		/* update the statistics for this packet */
5942		total_bytes += tx_buffer->bytecount;
5943		total_packets += tx_buffer->gso_segs;
5944
5945		/* free the skb */
5946		dev_kfree_skb_any(tx_buffer->skb);
5947
5948		/* unmap skb header data */
5949		dma_unmap_single(tx_ring->dev,
5950				 dma_unmap_addr(tx_buffer, dma),
5951				 dma_unmap_len(tx_buffer, len),
5952				 DMA_TO_DEVICE);
5953
5954		/* clear tx_buffer data */
5955		tx_buffer->skb = NULL;
5956		dma_unmap_len_set(tx_buffer, len, 0);
5957
5958		/* clear last DMA location and unmap remaining buffers */
5959		while (tx_desc != eop_desc) {
5960			tx_buffer++;
5961			tx_desc++;
5962			i++;
5963			if (unlikely(!i)) {
5964				i -= tx_ring->count;
5965				tx_buffer = tx_ring->tx_buffer_info;
5966				tx_desc = IGB_TX_DESC(tx_ring, 0);
5967			}
5968
5969			/* unmap any remaining paged data */
5970			if (dma_unmap_len(tx_buffer, len)) {
5971				dma_unmap_page(tx_ring->dev,
5972					       dma_unmap_addr(tx_buffer, dma),
5973					       dma_unmap_len(tx_buffer, len),
5974					       DMA_TO_DEVICE);
5975				dma_unmap_len_set(tx_buffer, len, 0);
5976			}
5977		}
5978
5979		/* move us one more past the eop_desc for start of next pkt */
5980		tx_buffer++;
5981		tx_desc++;
5982		i++;
5983		if (unlikely(!i)) {
5984			i -= tx_ring->count;
5985			tx_buffer = tx_ring->tx_buffer_info;
5986			tx_desc = IGB_TX_DESC(tx_ring, 0);
5987		}
5988
5989		/* issue prefetch for next Tx descriptor */
5990		prefetch(tx_desc);
5991
5992		/* update budget accounting */
5993		budget--;
5994	} while (likely(budget));
5995
5996	netdev_tx_completed_queue(txring_txq(tx_ring),
5997				  total_packets, total_bytes);
5998	i += tx_ring->count;
5999	tx_ring->next_to_clean = i;
6000	u64_stats_update_begin(&tx_ring->tx_syncp);
6001	tx_ring->tx_stats.bytes += total_bytes;
6002	tx_ring->tx_stats.packets += total_packets;
6003	u64_stats_update_end(&tx_ring->tx_syncp);
6004	q_vector->tx.total_bytes += total_bytes;
6005	q_vector->tx.total_packets += total_packets;
6006
6007	if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
6008		struct e1000_hw *hw = &adapter->hw;
6009
6010		/* Detect a transmit hang in hardware, this serializes the
6011		 * check with the clearing of time_stamp and movement of i */
6012		clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
6013		if (tx_buffer->next_to_watch &&
6014		    time_after(jiffies, tx_buffer->time_stamp +
6015			       (adapter->tx_timeout_factor * HZ)) &&
6016		    !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
6017
6018			/* detected Tx unit hang */
6019			dev_err(tx_ring->dev,
6020				"Detected Tx Unit Hang\n"
6021				"  Tx Queue             <%d>\n"
6022				"  TDH                  <%x>\n"
6023				"  TDT                  <%x>\n"
6024				"  next_to_use          <%x>\n"
6025				"  next_to_clean        <%x>\n"
6026				"buffer_info[next_to_clean]\n"
6027				"  time_stamp           <%lx>\n"
6028				"  next_to_watch        <%p>\n"
6029				"  jiffies              <%lx>\n"
6030				"  desc.status          <%x>\n",
6031				tx_ring->queue_index,
6032				rd32(E1000_TDH(tx_ring->reg_idx)),
6033				readl(tx_ring->tail),
6034				tx_ring->next_to_use,
6035				tx_ring->next_to_clean,
6036				tx_buffer->time_stamp,
6037				tx_buffer->next_to_watch,
6038				jiffies,
6039				tx_buffer->next_to_watch->wb.status);
6040			netif_stop_subqueue(tx_ring->netdev,
6041					    tx_ring->queue_index);
6042
6043			/* we are about to reset, no point in enabling stuff */
6044			return true;
6045		}
6046	}
6047
6048#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
6049	if (unlikely(total_packets &&
6050		     netif_carrier_ok(tx_ring->netdev) &&
6051		     igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
6052		/* Make sure that anybody stopping the queue after this
6053		 * sees the new next_to_clean.
6054		 */
6055		smp_mb();
6056		if (__netif_subqueue_stopped(tx_ring->netdev,
6057					     tx_ring->queue_index) &&
6058		    !(test_bit(__IGB_DOWN, &adapter->state))) {
6059			netif_wake_subqueue(tx_ring->netdev,
6060					    tx_ring->queue_index);
6061
6062			u64_stats_update_begin(&tx_ring->tx_syncp);
6063			tx_ring->tx_stats.restart_queue++;
6064			u64_stats_update_end(&tx_ring->tx_syncp);
6065		}
6066	}
6067
6068	return !!budget;
6069}
6070
6071/**
6072 * igb_reuse_rx_page - page flip buffer and store it back on the ring
6073 * @rx_ring: rx descriptor ring to store buffers on
6074 * @old_buff: donor buffer to have page reused
6075 *
6076 * Synchronizes page for reuse by the adapter
6077 **/
6078static void igb_reuse_rx_page(struct igb_ring *rx_ring,
6079			      struct igb_rx_buffer *old_buff)
6080{
6081	struct igb_rx_buffer *new_buff;
6082	u16 nta = rx_ring->next_to_alloc;
6083
6084	new_buff = &rx_ring->rx_buffer_info[nta];
6085
6086	/* update, and store next to alloc */
6087	nta++;
6088	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
6089
6090	/* transfer page from old buffer to new buffer */
6091	memcpy(new_buff, old_buff, sizeof(struct igb_rx_buffer));
6092
6093	/* sync the buffer for use by the device */
6094	dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
6095					 old_buff->page_offset,
6096					 IGB_RX_BUFSZ,
6097					 DMA_FROM_DEVICE);
6098}
6099
6100static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
6101				  struct page *page,
6102				  unsigned int truesize)
6103{
6104	/* avoid re-using remote pages */
6105	if (unlikely(page_to_nid(page) != numa_node_id()))
6106		return false;
6107
6108#if (PAGE_SIZE < 8192)
6109	/* if we are only owner of page we can reuse it */
6110	if (unlikely(page_count(page) != 1))
6111		return false;
6112
6113	/* flip page offset to other buffer */
6114	rx_buffer->page_offset ^= IGB_RX_BUFSZ;
6115
6116	/* since we are the only owner of the page and we need to
6117	 * increment it, just set the value to 2 in order to avoid
6118	 * an unnecessary locked operation
6119	 */
6120	atomic_set(&page->_count, 2);
6121#else
6122	/* move offset up to the next cache line */
6123	rx_buffer->page_offset += truesize;
6124
6125	if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
6126		return false;
6127
6128	/* bump ref count on page before it is given to the stack */
6129	get_page(page);
6130#endif
6131
6132	return true;
6133}
6134
6135/**
6136 * igb_add_rx_frag - Add contents of Rx buffer to sk_buff
6137 * @rx_ring: rx descriptor ring to transact packets on
6138 * @rx_buffer: buffer containing page to add
6139 * @rx_desc: descriptor containing length of buffer written by hardware
6140 * @skb: sk_buff to place the data into
6141 *
6142 * This function will add the data contained in rx_buffer->page to the skb.
6143 * This is done either through a direct copy if the data in the buffer is
6144 * less than the skb header size, otherwise it will just attach the page as
6145 * a frag to the skb.
6146 *
6147 * The function will then update the page offset if necessary and return
6148 * true if the buffer can be reused by the adapter.
6149 **/
6150static bool igb_add_rx_frag(struct igb_ring *rx_ring,
6151			    struct igb_rx_buffer *rx_buffer,
6152			    union e1000_adv_rx_desc *rx_desc,
6153			    struct sk_buff *skb)
6154{
6155	struct page *page = rx_buffer->page;
6156	unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
6157#if (PAGE_SIZE < 8192)
6158	unsigned int truesize = IGB_RX_BUFSZ;
6159#else
6160	unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
6161#endif
6162
6163	if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) {
6164		unsigned char *va = page_address(page) + rx_buffer->page_offset;
6165
6166		if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
6167			igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
6168			va += IGB_TS_HDR_LEN;
6169			size -= IGB_TS_HDR_LEN;
6170		}
6171
6172		memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
6173
6174		/* we can reuse buffer as-is, just make sure it is local */
6175		if (likely(page_to_nid(page) == numa_node_id()))
6176			return true;
6177
6178		/* this page cannot be reused so discard it */
6179		put_page(page);
6180		return false;
6181	}
6182
6183	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
6184			rx_buffer->page_offset, size, truesize);
6185
6186	return igb_can_reuse_rx_page(rx_buffer, page, truesize);
6187}
6188
6189static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
6190					   union e1000_adv_rx_desc *rx_desc,
6191					   struct sk_buff *skb)
6192{
6193	struct igb_rx_buffer *rx_buffer;
6194	struct page *page;
6195
6196	rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
6197
6198	page = rx_buffer->page;
6199	prefetchw(page);
6200
6201	if (likely(!skb)) {
6202		void *page_addr = page_address(page) +
6203				  rx_buffer->page_offset;
6204
6205		/* prefetch first cache line of first page */
6206		prefetch(page_addr);
6207#if L1_CACHE_BYTES < 128
6208		prefetch(page_addr + L1_CACHE_BYTES);
6209#endif
6210
6211		/* allocate a skb to store the frags */
6212		skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
6213						IGB_RX_HDR_LEN);
6214		if (unlikely(!skb)) {
6215			rx_ring->rx_stats.alloc_failed++;
6216			return NULL;
6217		}
6218
6219		/*
6220		 * we will be copying header into skb->data in
6221		 * pskb_may_pull so it is in our interest to prefetch
6222		 * it now to avoid a possible cache miss
6223		 */
6224		prefetchw(skb->data);
6225	}
6226
6227	/* we are reusing so sync this buffer for CPU use */
6228	dma_sync_single_range_for_cpu(rx_ring->dev,
6229				      rx_buffer->dma,
6230				      rx_buffer->page_offset,
6231				      IGB_RX_BUFSZ,
6232				      DMA_FROM_DEVICE);
6233
6234	/* pull page into skb */
6235	if (igb_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
6236		/* hand second half of page back to the ring */
6237		igb_reuse_rx_page(rx_ring, rx_buffer);
6238	} else {
6239		/* we are not reusing the buffer so unmap it */
6240		dma_unmap_page(rx_ring->dev, rx_buffer->dma,
6241			       PAGE_SIZE, DMA_FROM_DEVICE);
6242	}
6243
6244	/* clear contents of rx_buffer */
6245	rx_buffer->page = NULL;
6246
6247	return skb;
6248}
6249
6250static inline void igb_rx_checksum(struct igb_ring *ring,
6251				   union e1000_adv_rx_desc *rx_desc,
6252				   struct sk_buff *skb)
6253{
6254	skb_checksum_none_assert(skb);
6255
6256	/* Ignore Checksum bit is set */
6257	if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
6258		return;
6259
6260	/* Rx checksum disabled via ethtool */
6261	if (!(ring->netdev->features & NETIF_F_RXCSUM))
6262		return;
6263
6264	/* TCP/UDP checksum error bit is set */
6265	if (igb_test_staterr(rx_desc,
6266			     E1000_RXDEXT_STATERR_TCPE |
6267			     E1000_RXDEXT_STATERR_IPE)) {
6268		/*
6269		 * work around errata with sctp packets where the TCPE aka
6270		 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
6271		 * packets, (aka let the stack check the crc32c)
6272		 */
6273		if (!((skb->len == 60) &&
6274		      test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
6275			u64_stats_update_begin(&ring->rx_syncp);
6276			ring->rx_stats.csum_err++;
6277			u64_stats_update_end(&ring->rx_syncp);
6278		}
6279		/* let the stack verify checksum errors */
6280		return;
6281	}
6282	/* It must be a TCP or UDP packet with a valid checksum */
6283	if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
6284				      E1000_RXD_STAT_UDPCS))
6285		skb->ip_summed = CHECKSUM_UNNECESSARY;
6286
6287	dev_dbg(ring->dev, "cksum success: bits %08X\n",
6288		le32_to_cpu(rx_desc->wb.upper.status_error));
6289}
6290
6291static inline void igb_rx_hash(struct igb_ring *ring,
6292			       union e1000_adv_rx_desc *rx_desc,
6293			       struct sk_buff *skb)
6294{
6295	if (ring->netdev->features & NETIF_F_RXHASH)
6296		skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
6297}
6298
6299/**
6300 * igb_is_non_eop - process handling of non-EOP buffers
6301 * @rx_ring: Rx ring being processed
6302 * @rx_desc: Rx descriptor for current buffer
6303 * @skb: current socket buffer containing buffer in progress
6304 *
6305 * This function updates next to clean.  If the buffer is an EOP buffer
6306 * this function exits returning false, otherwise it will place the
6307 * sk_buff in the next buffer to be chained and return true indicating
6308 * that this is in fact a non-EOP buffer.
6309 **/
6310static bool igb_is_non_eop(struct igb_ring *rx_ring,
6311			   union e1000_adv_rx_desc *rx_desc)
6312{
6313	u32 ntc = rx_ring->next_to_clean + 1;
6314
6315	/* fetch, update, and store next to clean */
6316	ntc = (ntc < rx_ring->count) ? ntc : 0;
6317	rx_ring->next_to_clean = ntc;
6318
6319	prefetch(IGB_RX_DESC(rx_ring, ntc));
6320
6321	if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)))
6322		return false;
6323
6324	return true;
6325}
6326
6327/**
6328 * igb_get_headlen - determine size of header for LRO/GRO
6329 * @data: pointer to the start of the headers
6330 * @max_len: total length of section to find headers in
6331 *
6332 * This function is meant to determine the length of headers that will
6333 * be recognized by hardware for LRO, and GRO offloads.  The main
6334 * motivation of doing this is to only perform one pull for IPv4 TCP
6335 * packets so that we can do basic things like calculating the gso_size
6336 * based on the average data per packet.
6337 **/
6338static unsigned int igb_get_headlen(unsigned char *data,
6339				    unsigned int max_len)
6340{
6341	union {
6342		unsigned char *network;
6343		/* l2 headers */
6344		struct ethhdr *eth;
6345		struct vlan_hdr *vlan;
6346		/* l3 headers */
6347		struct iphdr *ipv4;
6348		struct ipv6hdr *ipv6;
6349	} hdr;
6350	__be16 protocol;
6351	u8 nexthdr = 0;	/* default to not TCP */
6352	u8 hlen;
6353
6354	/* this should never happen, but better safe than sorry */
6355	if (max_len < ETH_HLEN)
6356		return max_len;
6357
6358	/* initialize network frame pointer */
6359	hdr.network = data;
6360
6361	/* set first protocol and move network header forward */
6362	protocol = hdr.eth->h_proto;
6363	hdr.network += ETH_HLEN;
6364
6365	/* handle any vlan tag if present */
6366	if (protocol == __constant_htons(ETH_P_8021Q)) {
6367		if ((hdr.network - data) > (max_len - VLAN_HLEN))
6368			return max_len;
6369
6370		protocol = hdr.vlan->h_vlan_encapsulated_proto;
6371		hdr.network += VLAN_HLEN;
6372	}
6373
6374	/* handle L3 protocols */
6375	if (protocol == __constant_htons(ETH_P_IP)) {
6376		if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
6377			return max_len;
6378
6379		/* access ihl as a u8 to avoid unaligned access on ia64 */
6380		hlen = (hdr.network[0] & 0x0F) << 2;
6381
6382		/* verify hlen meets minimum size requirements */
6383		if (hlen < sizeof(struct iphdr))
6384			return hdr.network - data;
6385
6386		/* record next protocol if header is present */
6387		if (!hdr.ipv4->frag_off)
6388			nexthdr = hdr.ipv4->protocol;
6389	} else if (protocol == __constant_htons(ETH_P_IPV6)) {
6390		if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
6391			return max_len;
6392
6393		/* record next protocol */
6394		nexthdr = hdr.ipv6->nexthdr;
6395		hlen = sizeof(struct ipv6hdr);
6396	} else {
6397		return hdr.network - data;
6398	}
6399
6400	/* relocate pointer to start of L4 header */
6401	hdr.network += hlen;
6402
6403	/* finally sort out TCP */
6404	if (nexthdr == IPPROTO_TCP) {
6405		if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
6406			return max_len;
6407
6408		/* access doff as a u8 to avoid unaligned access on ia64 */
6409		hlen = (hdr.network[12] & 0xF0) >> 2;
6410
6411		/* verify hlen meets minimum size requirements */
6412		if (hlen < sizeof(struct tcphdr))
6413			return hdr.network - data;
6414
6415		hdr.network += hlen;
6416	} else if (nexthdr == IPPROTO_UDP) {
6417		if ((hdr.network - data) > (max_len - sizeof(struct udphdr)))
6418			return max_len;
6419
6420		hdr.network += sizeof(struct udphdr);
6421	}
6422
6423	/*
6424	 * If everything has gone correctly hdr.network should be the
6425	 * data section of the packet and will be the end of the header.
6426	 * If not then it probably represents the end of the last recognized
6427	 * header.
6428	 */
6429	if ((hdr.network - data) < max_len)
6430		return hdr.network - data;
6431	else
6432		return max_len;
6433}
6434
6435/**
6436 * igb_pull_tail - igb specific version of skb_pull_tail
6437 * @rx_ring: rx descriptor ring packet is being transacted on
6438 * @rx_desc: pointer to the EOP Rx descriptor
6439 * @skb: pointer to current skb being adjusted
6440 *
6441 * This function is an igb specific version of __pskb_pull_tail.  The
6442 * main difference between this version and the original function is that
6443 * this function can make several assumptions about the state of things
6444 * that allow for significant optimizations versus the standard function.
6445 * As a result we can do things like drop a frag and maintain an accurate
6446 * truesize for the skb.
6447 */
6448static void igb_pull_tail(struct igb_ring *rx_ring,
6449			  union e1000_adv_rx_desc *rx_desc,
6450			  struct sk_buff *skb)
6451{
6452	struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
6453	unsigned char *va;
6454	unsigned int pull_len;
6455
6456	/*
6457	 * it is valid to use page_address instead of kmap since we are
6458	 * working with pages allocated out of the lomem pool per
6459	 * alloc_page(GFP_ATOMIC)
6460	 */
6461	va = skb_frag_address(frag);
6462
6463	if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
6464		/* retrieve timestamp from buffer */
6465		igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
6466
6467		/* update pointers to remove timestamp header */
6468		skb_frag_size_sub(frag, IGB_TS_HDR_LEN);
6469		frag->page_offset += IGB_TS_HDR_LEN;
6470		skb->data_len -= IGB_TS_HDR_LEN;
6471		skb->len -= IGB_TS_HDR_LEN;
6472
6473		/* move va to start of packet data */
6474		va += IGB_TS_HDR_LEN;
6475	}
6476
6477	/*
6478	 * we need the header to contain the greater of either ETH_HLEN or
6479	 * 60 bytes if the skb->len is less than 60 for skb_pad.
6480	 */
6481	pull_len = igb_get_headlen(va, IGB_RX_HDR_LEN);
6482
6483	/* align pull length to size of long to optimize memcpy performance */
6484	skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
6485
6486	/* update all of the pointers */
6487	skb_frag_size_sub(frag, pull_len);
6488	frag->page_offset += pull_len;
6489	skb->data_len -= pull_len;
6490	skb->tail += pull_len;
6491}
6492
6493/**
6494 * igb_cleanup_headers - Correct corrupted or empty headers
6495 * @rx_ring: rx descriptor ring packet is being transacted on
6496 * @rx_desc: pointer to the EOP Rx descriptor
6497 * @skb: pointer to current skb being fixed
6498 *
6499 * Address the case where we are pulling data in on pages only
6500 * and as such no data is present in the skb header.
6501 *
6502 * In addition if skb is not at least 60 bytes we need to pad it so that
6503 * it is large enough to qualify as a valid Ethernet frame.
6504 *
6505 * Returns true if an error was encountered and skb was freed.
6506 **/
6507static bool igb_cleanup_headers(struct igb_ring *rx_ring,
6508				union e1000_adv_rx_desc *rx_desc,
6509				struct sk_buff *skb)
6510{
6511
6512	if (unlikely((igb_test_staterr(rx_desc,
6513				       E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
6514		struct net_device *netdev = rx_ring->netdev;
6515		if (!(netdev->features & NETIF_F_RXALL)) {
6516			dev_kfree_skb_any(skb);
6517			return true;
6518		}
6519	}
6520
6521	/* place header in linear portion of buffer */
6522	if (skb_is_nonlinear(skb))
6523		igb_pull_tail(rx_ring, rx_desc, skb);
6524
6525	/* if skb_pad returns an error the skb was freed */
6526	if (unlikely(skb->len < 60)) {
6527		int pad_len = 60 - skb->len;
6528
6529		if (skb_pad(skb, pad_len))
6530			return true;
6531		__skb_put(skb, pad_len);
6532	}
6533
6534	return false;
6535}
6536
6537/**
6538 * igb_process_skb_fields - Populate skb header fields from Rx descriptor
6539 * @rx_ring: rx descriptor ring packet is being transacted on
6540 * @rx_desc: pointer to the EOP Rx descriptor
6541 * @skb: pointer to current skb being populated
6542 *
6543 * This function checks the ring, descriptor, and packet information in
6544 * order to populate the hash, checksum, VLAN, timestamp, protocol, and
6545 * other fields within the skb.
6546 **/
6547static void igb_process_skb_fields(struct igb_ring *rx_ring,
6548				   union e1000_adv_rx_desc *rx_desc,
6549				   struct sk_buff *skb)
6550{
6551	struct net_device *dev = rx_ring->netdev;
6552
6553	igb_rx_hash(rx_ring, rx_desc, skb);
6554
6555	igb_rx_checksum(rx_ring, rx_desc, skb);
6556
6557	igb_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb);
6558
6559	if ((dev->features & NETIF_F_HW_VLAN_RX) &&
6560	    igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
6561		u16 vid;
6562		if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
6563		    test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
6564			vid = be16_to_cpu(rx_desc->wb.upper.vlan);
6565		else
6566			vid = le16_to_cpu(rx_desc->wb.upper.vlan);
6567
6568		__vlan_hwaccel_put_tag(skb, vid);
6569	}
6570
6571	skb_record_rx_queue(skb, rx_ring->queue_index);
6572
6573	skb->protocol = eth_type_trans(skb, rx_ring->netdev);
6574}
6575
6576static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
6577{
6578	struct igb_ring *rx_ring = q_vector->rx.ring;
6579	struct sk_buff *skb = rx_ring->skb;
6580	unsigned int total_bytes = 0, total_packets = 0;
6581	u16 cleaned_count = igb_desc_unused(rx_ring);
6582
6583	do {
6584		union e1000_adv_rx_desc *rx_desc;
6585
6586		/* return some buffers to hardware, one at a time is too slow */
6587		if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
6588			igb_alloc_rx_buffers(rx_ring, cleaned_count);
6589			cleaned_count = 0;
6590		}
6591
6592		rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
6593
6594		if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD))
6595			break;
6596
6597		/* This memory barrier is needed to keep us from reading
6598		 * any other fields out of the rx_desc until we know the
6599		 * RXD_STAT_DD bit is set
6600		 */
6601		rmb();
6602
6603		/* retrieve a buffer from the ring */
6604		skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
6605
6606		/* exit if we failed to retrieve a buffer */
6607		if (!skb)
6608			break;
6609
6610		cleaned_count++;
6611
6612		/* fetch next buffer in frame if non-eop */
6613		if (igb_is_non_eop(rx_ring, rx_desc))
6614			continue;
6615
6616		/* verify the packet layout is correct */
6617		if (igb_cleanup_headers(rx_ring, rx_desc, skb)) {
6618			skb = NULL;
6619			continue;
6620		}
6621
6622		/* probably a little skewed due to removing CRC */
6623		total_bytes += skb->len;
6624
6625		/* populate checksum, timestamp, VLAN, and protocol */
6626		igb_process_skb_fields(rx_ring, rx_desc, skb);
6627
6628		napi_gro_receive(&q_vector->napi, skb);
6629
6630		/* reset skb pointer */
6631		skb = NULL;
6632
6633		/* update budget accounting */
6634		total_packets++;
6635	} while (likely(total_packets < budget));
6636
6637	/* place incomplete frames back on ring for completion */
6638	rx_ring->skb = skb;
6639
6640	u64_stats_update_begin(&rx_ring->rx_syncp);
6641	rx_ring->rx_stats.packets += total_packets;
6642	rx_ring->rx_stats.bytes += total_bytes;
6643	u64_stats_update_end(&rx_ring->rx_syncp);
6644	q_vector->rx.total_packets += total_packets;
6645	q_vector->rx.total_bytes += total_bytes;
6646
6647	if (cleaned_count)
6648		igb_alloc_rx_buffers(rx_ring, cleaned_count);
6649
6650	return (total_packets < budget);
6651}
6652
6653static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
6654				  struct igb_rx_buffer *bi)
6655{
6656	struct page *page = bi->page;
6657	dma_addr_t dma;
6658
6659	/* since we are recycling buffers we should seldom need to alloc */
6660	if (likely(page))
6661		return true;
6662
6663	/* alloc new page for storage */
6664	page = __skb_alloc_page(GFP_ATOMIC | __GFP_COLD, NULL);
6665	if (unlikely(!page)) {
6666		rx_ring->rx_stats.alloc_failed++;
6667		return false;
6668	}
6669
6670	/* map page for use */
6671	dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
6672
6673	/*
6674	 * if mapping failed free memory back to system since
6675	 * there isn't much point in holding memory we can't use
6676	 */
6677	if (dma_mapping_error(rx_ring->dev, dma)) {
6678		__free_page(page);
6679
6680		rx_ring->rx_stats.alloc_failed++;
6681		return false;
6682	}
6683
6684	bi->dma = dma;
6685	bi->page = page;
6686	bi->page_offset = 0;
6687
6688	return true;
6689}
6690
6691/**
6692 * igb_alloc_rx_buffers - Replace used receive buffers; packet split
6693 * @adapter: address of board private structure
6694 **/
6695void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
6696{
6697	union e1000_adv_rx_desc *rx_desc;
6698	struct igb_rx_buffer *bi;
6699	u16 i = rx_ring->next_to_use;
6700
6701	/* nothing to do */
6702	if (!cleaned_count)
6703		return;
6704
6705	rx_desc = IGB_RX_DESC(rx_ring, i);
6706	bi = &rx_ring->rx_buffer_info[i];
6707	i -= rx_ring->count;
6708
6709	do {
6710		if (!igb_alloc_mapped_page(rx_ring, bi))
6711			break;
6712
6713		/*
6714		 * Refresh the desc even if buffer_addrs didn't change
6715		 * because each write-back erases this info.
6716		 */
6717		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
6718
6719		rx_desc++;
6720		bi++;
6721		i++;
6722		if (unlikely(!i)) {
6723			rx_desc = IGB_RX_DESC(rx_ring, 0);
6724			bi = rx_ring->rx_buffer_info;
6725			i -= rx_ring->count;
6726		}
6727
6728		/* clear the hdr_addr for the next_to_use descriptor */
6729		rx_desc->read.hdr_addr = 0;
6730
6731		cleaned_count--;
6732	} while (cleaned_count);
6733
6734	i += rx_ring->count;
6735
6736	if (rx_ring->next_to_use != i) {
6737		/* record the next descriptor to use */
6738		rx_ring->next_to_use = i;
6739
6740		/* update next to alloc since we have filled the ring */
6741		rx_ring->next_to_alloc = i;
6742
6743		/*
6744		 * Force memory writes to complete before letting h/w
6745		 * know there are new descriptors to fetch.  (Only
6746		 * applicable for weak-ordered memory model archs,
6747		 * such as IA-64).
6748		 */
6749		wmb();
6750		writel(i, rx_ring->tail);
6751	}
6752}
6753
6754/**
6755 * igb_mii_ioctl -
6756 * @netdev:
6757 * @ifreq:
6758 * @cmd:
6759 **/
6760static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6761{
6762	struct igb_adapter *adapter = netdev_priv(netdev);
6763	struct mii_ioctl_data *data = if_mii(ifr);
6764
6765	if (adapter->hw.phy.media_type != e1000_media_type_copper)
6766		return -EOPNOTSUPP;
6767
6768	switch (cmd) {
6769	case SIOCGMIIPHY:
6770		data->phy_id = adapter->hw.phy.addr;
6771		break;
6772	case SIOCGMIIREG:
6773		if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
6774		                     &data->val_out))
6775			return -EIO;
6776		break;
6777	case SIOCSMIIREG:
6778	default:
6779		return -EOPNOTSUPP;
6780	}
6781	return 0;
6782}
6783
6784/**
6785 * igb_ioctl -
6786 * @netdev:
6787 * @ifreq:
6788 * @cmd:
6789 **/
6790static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6791{
6792	switch (cmd) {
6793	case SIOCGMIIPHY:
6794	case SIOCGMIIREG:
6795	case SIOCSMIIREG:
6796		return igb_mii_ioctl(netdev, ifr, cmd);
6797	case SIOCSHWTSTAMP:
6798		return igb_ptp_hwtstamp_ioctl(netdev, ifr, cmd);
6799	default:
6800		return -EOPNOTSUPP;
6801	}
6802}
6803
6804s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6805{
6806	struct igb_adapter *adapter = hw->back;
6807
6808	if (pcie_capability_read_word(adapter->pdev, reg, value))
6809		return -E1000_ERR_CONFIG;
6810
6811	return 0;
6812}
6813
6814s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6815{
6816	struct igb_adapter *adapter = hw->back;
6817
6818	if (pcie_capability_write_word(adapter->pdev, reg, *value))
6819		return -E1000_ERR_CONFIG;
6820
6821	return 0;
6822}
6823
6824static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
6825{
6826	struct igb_adapter *adapter = netdev_priv(netdev);
6827	struct e1000_hw *hw = &adapter->hw;
6828	u32 ctrl, rctl;
6829	bool enable = !!(features & NETIF_F_HW_VLAN_RX);
6830
6831	if (enable) {
6832		/* enable VLAN tag insert/strip */
6833		ctrl = rd32(E1000_CTRL);
6834		ctrl |= E1000_CTRL_VME;
6835		wr32(E1000_CTRL, ctrl);
6836
6837		/* Disable CFI check */
6838		rctl = rd32(E1000_RCTL);
6839		rctl &= ~E1000_RCTL_CFIEN;
6840		wr32(E1000_RCTL, rctl);
6841	} else {
6842		/* disable VLAN tag insert/strip */
6843		ctrl = rd32(E1000_CTRL);
6844		ctrl &= ~E1000_CTRL_VME;
6845		wr32(E1000_CTRL, ctrl);
6846	}
6847
6848	igb_rlpml_set(adapter);
6849}
6850
6851static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
6852{
6853	struct igb_adapter *adapter = netdev_priv(netdev);
6854	struct e1000_hw *hw = &adapter->hw;
6855	int pf_id = adapter->vfs_allocated_count;
6856
6857	/* attempt to add filter to vlvf array */
6858	igb_vlvf_set(adapter, vid, true, pf_id);
6859
6860	/* add the filter since PF can receive vlans w/o entry in vlvf */
6861	igb_vfta_set(hw, vid, true);
6862
6863	set_bit(vid, adapter->active_vlans);
6864
6865	return 0;
6866}
6867
6868static int igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
6869{
6870	struct igb_adapter *adapter = netdev_priv(netdev);
6871	struct e1000_hw *hw = &adapter->hw;
6872	int pf_id = adapter->vfs_allocated_count;
6873	s32 err;
6874
6875	/* remove vlan from VLVF table array */
6876	err = igb_vlvf_set(adapter, vid, false, pf_id);
6877
6878	/* if vid was not present in VLVF just remove it from table */
6879	if (err)
6880		igb_vfta_set(hw, vid, false);
6881
6882	clear_bit(vid, adapter->active_vlans);
6883
6884	return 0;
6885}
6886
6887static void igb_restore_vlan(struct igb_adapter *adapter)
6888{
6889	u16 vid;
6890
6891	igb_vlan_mode(adapter->netdev, adapter->netdev->features);
6892
6893	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
6894		igb_vlan_rx_add_vid(adapter->netdev, vid);
6895}
6896
6897int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
6898{
6899	struct pci_dev *pdev = adapter->pdev;
6900	struct e1000_mac_info *mac = &adapter->hw.mac;
6901
6902	mac->autoneg = 0;
6903
6904	/* Make sure dplx is at most 1 bit and lsb of speed is not set
6905	 * for the switch() below to work */
6906	if ((spd & 1) || (dplx & ~1))
6907		goto err_inval;
6908
6909	/* Fiber NIC's only allow 1000 Gbps Full duplex */
6910	if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes) &&
6911	    spd != SPEED_1000 &&
6912	    dplx != DUPLEX_FULL)
6913		goto err_inval;
6914
6915	switch (spd + dplx) {
6916	case SPEED_10 + DUPLEX_HALF:
6917		mac->forced_speed_duplex = ADVERTISE_10_HALF;
6918		break;
6919	case SPEED_10 + DUPLEX_FULL:
6920		mac->forced_speed_duplex = ADVERTISE_10_FULL;
6921		break;
6922	case SPEED_100 + DUPLEX_HALF:
6923		mac->forced_speed_duplex = ADVERTISE_100_HALF;
6924		break;
6925	case SPEED_100 + DUPLEX_FULL:
6926		mac->forced_speed_duplex = ADVERTISE_100_FULL;
6927		break;
6928	case SPEED_1000 + DUPLEX_FULL:
6929		mac->autoneg = 1;
6930		adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
6931		break;
6932	case SPEED_1000 + DUPLEX_HALF: /* not supported */
6933	default:
6934		goto err_inval;
6935	}
6936
6937	/* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
6938	adapter->hw.phy.mdix = AUTO_ALL_MODES;
6939
6940	return 0;
6941
6942err_inval:
6943	dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
6944	return -EINVAL;
6945}
6946
6947static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
6948			  bool runtime)
6949{
6950	struct net_device *netdev = pci_get_drvdata(pdev);
6951	struct igb_adapter *adapter = netdev_priv(netdev);
6952	struct e1000_hw *hw = &adapter->hw;
6953	u32 ctrl, rctl, status;
6954	u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
6955#ifdef CONFIG_PM
6956	int retval = 0;
6957#endif
6958
6959	netif_device_detach(netdev);
6960
6961	if (netif_running(netdev))
6962		__igb_close(netdev, true);
6963
6964	igb_clear_interrupt_scheme(adapter);
6965
6966#ifdef CONFIG_PM
6967	retval = pci_save_state(pdev);
6968	if (retval)
6969		return retval;
6970#endif
6971
6972	status = rd32(E1000_STATUS);
6973	if (status & E1000_STATUS_LU)
6974		wufc &= ~E1000_WUFC_LNKC;
6975
6976	if (wufc) {
6977		igb_setup_rctl(adapter);
6978		igb_set_rx_mode(netdev);
6979
6980		/* turn on all-multi mode if wake on multicast is enabled */
6981		if (wufc & E1000_WUFC_MC) {
6982			rctl = rd32(E1000_RCTL);
6983			rctl |= E1000_RCTL_MPE;
6984			wr32(E1000_RCTL, rctl);
6985		}
6986
6987		ctrl = rd32(E1000_CTRL);
6988		/* advertise wake from D3Cold */
6989		#define E1000_CTRL_ADVD3WUC 0x00100000
6990		/* phy power management enable */
6991		#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
6992		ctrl |= E1000_CTRL_ADVD3WUC;
6993		wr32(E1000_CTRL, ctrl);
6994
6995		/* Allow time for pending master requests to run */
6996		igb_disable_pcie_master(hw);
6997
6998		wr32(E1000_WUC, E1000_WUC_PME_EN);
6999		wr32(E1000_WUFC, wufc);
7000	} else {
7001		wr32(E1000_WUC, 0);
7002		wr32(E1000_WUFC, 0);
7003	}
7004
7005	*enable_wake = wufc || adapter->en_mng_pt;
7006	if (!*enable_wake)
7007		igb_power_down_link(adapter);
7008	else
7009		igb_power_up_link(adapter);
7010
7011	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
7012	 * would have already happened in close and is redundant. */
7013	igb_release_hw_control(adapter);
7014
7015	pci_disable_device(pdev);
7016
7017	return 0;
7018}
7019
7020#ifdef CONFIG_PM
7021#ifdef CONFIG_PM_SLEEP
7022static int igb_suspend(struct device *dev)
7023{
7024	int retval;
7025	bool wake;
7026	struct pci_dev *pdev = to_pci_dev(dev);
7027
7028	retval = __igb_shutdown(pdev, &wake, 0);
7029	if (retval)
7030		return retval;
7031
7032	if (wake) {
7033		pci_prepare_to_sleep(pdev);
7034	} else {
7035		pci_wake_from_d3(pdev, false);
7036		pci_set_power_state(pdev, PCI_D3hot);
7037	}
7038
7039	return 0;
7040}
7041#endif /* CONFIG_PM_SLEEP */
7042
7043static int igb_resume(struct device *dev)
7044{
7045	struct pci_dev *pdev = to_pci_dev(dev);
7046	struct net_device *netdev = pci_get_drvdata(pdev);
7047	struct igb_adapter *adapter = netdev_priv(netdev);
7048	struct e1000_hw *hw = &adapter->hw;
7049	u32 err;
7050
7051	pci_set_power_state(pdev, PCI_D0);
7052	pci_restore_state(pdev);
7053	pci_save_state(pdev);
7054
7055	err = pci_enable_device_mem(pdev);
7056	if (err) {
7057		dev_err(&pdev->dev,
7058			"igb: Cannot enable PCI device from suspend\n");
7059		return err;
7060	}
7061	pci_set_master(pdev);
7062
7063	pci_enable_wake(pdev, PCI_D3hot, 0);
7064	pci_enable_wake(pdev, PCI_D3cold, 0);
7065
7066	if (igb_init_interrupt_scheme(adapter, true)) {
7067		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
7068		return -ENOMEM;
7069	}
7070
7071	igb_reset(adapter);
7072
7073	/* let the f/w know that the h/w is now under the control of the
7074	 * driver. */
7075	igb_get_hw_control(adapter);
7076
7077	wr32(E1000_WUS, ~0);
7078
7079	if (netdev->flags & IFF_UP) {
7080		rtnl_lock();
7081		err = __igb_open(netdev, true);
7082		rtnl_unlock();
7083		if (err)
7084			return err;
7085	}
7086
7087	netif_device_attach(netdev);
7088	return 0;
7089}
7090
7091#ifdef CONFIG_PM_RUNTIME
7092static int igb_runtime_idle(struct device *dev)
7093{
7094	struct pci_dev *pdev = to_pci_dev(dev);
7095	struct net_device *netdev = pci_get_drvdata(pdev);
7096	struct igb_adapter *adapter = netdev_priv(netdev);
7097
7098	if (!igb_has_link(adapter))
7099		pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
7100
7101	return -EBUSY;
7102}
7103
7104static int igb_runtime_suspend(struct device *dev)
7105{
7106	struct pci_dev *pdev = to_pci_dev(dev);
7107	int retval;
7108	bool wake;
7109
7110	retval = __igb_shutdown(pdev, &wake, 1);
7111	if (retval)
7112		return retval;
7113
7114	if (wake) {
7115		pci_prepare_to_sleep(pdev);
7116	} else {
7117		pci_wake_from_d3(pdev, false);
7118		pci_set_power_state(pdev, PCI_D3hot);
7119	}
7120
7121	return 0;
7122}
7123
7124static int igb_runtime_resume(struct device *dev)
7125{
7126	return igb_resume(dev);
7127}
7128#endif /* CONFIG_PM_RUNTIME */
7129#endif
7130
7131static void igb_shutdown(struct pci_dev *pdev)
7132{
7133	bool wake;
7134
7135	__igb_shutdown(pdev, &wake, 0);
7136
7137	if (system_state == SYSTEM_POWER_OFF) {
7138		pci_wake_from_d3(pdev, wake);
7139		pci_set_power_state(pdev, PCI_D3hot);
7140	}
7141}
7142
7143#ifdef CONFIG_PCI_IOV
7144static int igb_sriov_reinit(struct pci_dev *dev)
7145{
7146	struct net_device *netdev = pci_get_drvdata(dev);
7147	struct igb_adapter *adapter = netdev_priv(netdev);
7148	struct pci_dev *pdev = adapter->pdev;
7149
7150	rtnl_lock();
7151
7152	if (netif_running(netdev))
7153		igb_close(netdev);
7154
7155	igb_clear_interrupt_scheme(adapter);
7156
7157	igb_init_queue_configuration(adapter);
7158
7159	if (igb_init_interrupt_scheme(adapter, true)) {
7160		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
7161		return -ENOMEM;
7162	}
7163
7164	if (netif_running(netdev))
7165		igb_open(netdev);
7166
7167	rtnl_unlock();
7168
7169	return 0;
7170}
7171
7172static int igb_pci_disable_sriov(struct pci_dev *dev)
7173{
7174	int err = igb_disable_sriov(dev);
7175
7176	if (!err)
7177		err = igb_sriov_reinit(dev);
7178
7179	return err;
7180}
7181
7182static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs)
7183{
7184	int err = igb_enable_sriov(dev, num_vfs);
7185
7186	if (err)
7187		goto out;
7188
7189	err = igb_sriov_reinit(dev);
7190	if (!err)
7191		return num_vfs;
7192
7193out:
7194	return err;
7195}
7196
7197#endif
7198static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
7199{
7200#ifdef CONFIG_PCI_IOV
7201	if (num_vfs == 0)
7202		return igb_pci_disable_sriov(dev);
7203	else
7204		return igb_pci_enable_sriov(dev, num_vfs);
7205#endif
7206	return 0;
7207}
7208
7209#ifdef CONFIG_NET_POLL_CONTROLLER
7210/*
7211 * Polling 'interrupt' - used by things like netconsole to send skbs
7212 * without having to re-enable interrupts. It's not called while
7213 * the interrupt routine is executing.
7214 */
7215static void igb_netpoll(struct net_device *netdev)
7216{
7217	struct igb_adapter *adapter = netdev_priv(netdev);
7218	struct e1000_hw *hw = &adapter->hw;
7219	struct igb_q_vector *q_vector;
7220	int i;
7221
7222	for (i = 0; i < adapter->num_q_vectors; i++) {
7223		q_vector = adapter->q_vector[i];
7224		if (adapter->msix_entries)
7225			wr32(E1000_EIMC, q_vector->eims_value);
7226		else
7227			igb_irq_disable(adapter);
7228		napi_schedule(&q_vector->napi);
7229	}
7230}
7231#endif /* CONFIG_NET_POLL_CONTROLLER */
7232
7233/**
7234 * igb_io_error_detected - called when PCI error is detected
7235 * @pdev: Pointer to PCI device
7236 * @state: The current pci connection state
7237 *
7238 * This function is called after a PCI bus error affecting
7239 * this device has been detected.
7240 */
7241static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
7242					      pci_channel_state_t state)
7243{
7244	struct net_device *netdev = pci_get_drvdata(pdev);
7245	struct igb_adapter *adapter = netdev_priv(netdev);
7246
7247	netif_device_detach(netdev);
7248
7249	if (state == pci_channel_io_perm_failure)
7250		return PCI_ERS_RESULT_DISCONNECT;
7251
7252	if (netif_running(netdev))
7253		igb_down(adapter);
7254	pci_disable_device(pdev);
7255
7256	/* Request a slot slot reset. */
7257	return PCI_ERS_RESULT_NEED_RESET;
7258}
7259
7260/**
7261 * igb_io_slot_reset - called after the pci bus has been reset.
7262 * @pdev: Pointer to PCI device
7263 *
7264 * Restart the card from scratch, as if from a cold-boot. Implementation
7265 * resembles the first-half of the igb_resume routine.
7266 */
7267static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
7268{
7269	struct net_device *netdev = pci_get_drvdata(pdev);
7270	struct igb_adapter *adapter = netdev_priv(netdev);
7271	struct e1000_hw *hw = &adapter->hw;
7272	pci_ers_result_t result;
7273	int err;
7274
7275	if (pci_enable_device_mem(pdev)) {
7276		dev_err(&pdev->dev,
7277			"Cannot re-enable PCI device after reset.\n");
7278		result = PCI_ERS_RESULT_DISCONNECT;
7279	} else {
7280		pci_set_master(pdev);
7281		pci_restore_state(pdev);
7282		pci_save_state(pdev);
7283
7284		pci_enable_wake(pdev, PCI_D3hot, 0);
7285		pci_enable_wake(pdev, PCI_D3cold, 0);
7286
7287		igb_reset(adapter);
7288		wr32(E1000_WUS, ~0);
7289		result = PCI_ERS_RESULT_RECOVERED;
7290	}
7291
7292	err = pci_cleanup_aer_uncorrect_error_status(pdev);
7293	if (err) {
7294		dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
7295		        "failed 0x%0x\n", err);
7296		/* non-fatal, continue */
7297	}
7298
7299	return result;
7300}
7301
7302/**
7303 * igb_io_resume - called when traffic can start flowing again.
7304 * @pdev: Pointer to PCI device
7305 *
7306 * This callback is called when the error recovery driver tells us that
7307 * its OK to resume normal operation. Implementation resembles the
7308 * second-half of the igb_resume routine.
7309 */
7310static void igb_io_resume(struct pci_dev *pdev)
7311{
7312	struct net_device *netdev = pci_get_drvdata(pdev);
7313	struct igb_adapter *adapter = netdev_priv(netdev);
7314
7315	if (netif_running(netdev)) {
7316		if (igb_up(adapter)) {
7317			dev_err(&pdev->dev, "igb_up failed after reset\n");
7318			return;
7319		}
7320	}
7321
7322	netif_device_attach(netdev);
7323
7324	/* let the f/w know that the h/w is now under the control of the
7325	 * driver. */
7326	igb_get_hw_control(adapter);
7327}
7328
7329static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
7330                             u8 qsel)
7331{
7332	u32 rar_low, rar_high;
7333	struct e1000_hw *hw = &adapter->hw;
7334
7335	/* HW expects these in little endian so we reverse the byte order
7336	 * from network order (big endian) to little endian
7337	 */
7338	rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
7339	          ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
7340	rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
7341
7342	/* Indicate to hardware the Address is Valid. */
7343	rar_high |= E1000_RAH_AV;
7344
7345	if (hw->mac.type == e1000_82575)
7346		rar_high |= E1000_RAH_POOL_1 * qsel;
7347	else
7348		rar_high |= E1000_RAH_POOL_1 << qsel;
7349
7350	wr32(E1000_RAL(index), rar_low);
7351	wrfl();
7352	wr32(E1000_RAH(index), rar_high);
7353	wrfl();
7354}
7355
7356static int igb_set_vf_mac(struct igb_adapter *adapter,
7357                          int vf, unsigned char *mac_addr)
7358{
7359	struct e1000_hw *hw = &adapter->hw;
7360	/* VF MAC addresses start at end of receive addresses and moves
7361	 * torwards the first, as a result a collision should not be possible */
7362	int rar_entry = hw->mac.rar_entry_count - (vf + 1);
7363
7364	memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
7365
7366	igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
7367
7368	return 0;
7369}
7370
7371static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
7372{
7373	struct igb_adapter *adapter = netdev_priv(netdev);
7374	if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
7375		return -EINVAL;
7376	adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
7377	dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
7378	dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
7379				      " change effective.");
7380	if (test_bit(__IGB_DOWN, &adapter->state)) {
7381		dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
7382			 " but the PF device is not up.\n");
7383		dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
7384			 " attempting to use the VF device.\n");
7385	}
7386	return igb_set_vf_mac(adapter, vf, mac);
7387}
7388
7389static int igb_link_mbps(int internal_link_speed)
7390{
7391	switch (internal_link_speed) {
7392	case SPEED_100:
7393		return 100;
7394	case SPEED_1000:
7395		return 1000;
7396	default:
7397		return 0;
7398	}
7399}
7400
7401static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
7402				  int link_speed)
7403{
7404	int rf_dec, rf_int;
7405	u32 bcnrc_val;
7406
7407	if (tx_rate != 0) {
7408		/* Calculate the rate factor values to set */
7409		rf_int = link_speed / tx_rate;
7410		rf_dec = (link_speed - (rf_int * tx_rate));
7411		rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
7412
7413		bcnrc_val = E1000_RTTBCNRC_RS_ENA;
7414		bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) &
7415		               E1000_RTTBCNRC_RF_INT_MASK);
7416		bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
7417	} else {
7418		bcnrc_val = 0;
7419	}
7420
7421	wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
7422	/*
7423	 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
7424	 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
7425	 */
7426	wr32(E1000_RTTBCNRM, 0x14);
7427	wr32(E1000_RTTBCNRC, bcnrc_val);
7428}
7429
7430static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
7431{
7432	int actual_link_speed, i;
7433	bool reset_rate = false;
7434
7435	/* VF TX rate limit was not set or not supported */
7436	if ((adapter->vf_rate_link_speed == 0) ||
7437	    (adapter->hw.mac.type != e1000_82576))
7438		return;
7439
7440	actual_link_speed = igb_link_mbps(adapter->link_speed);
7441	if (actual_link_speed != adapter->vf_rate_link_speed) {
7442		reset_rate = true;
7443		adapter->vf_rate_link_speed = 0;
7444		dev_info(&adapter->pdev->dev,
7445		         "Link speed has been changed. VF Transmit "
7446		         "rate is disabled\n");
7447	}
7448
7449	for (i = 0; i < adapter->vfs_allocated_count; i++) {
7450		if (reset_rate)
7451			adapter->vf_data[i].tx_rate = 0;
7452
7453		igb_set_vf_rate_limit(&adapter->hw, i,
7454		                      adapter->vf_data[i].tx_rate,
7455		                      actual_link_speed);
7456	}
7457}
7458
7459static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
7460{
7461	struct igb_adapter *adapter = netdev_priv(netdev);
7462	struct e1000_hw *hw = &adapter->hw;
7463	int actual_link_speed;
7464
7465	if (hw->mac.type != e1000_82576)
7466		return -EOPNOTSUPP;
7467
7468	actual_link_speed = igb_link_mbps(adapter->link_speed);
7469	if ((vf >= adapter->vfs_allocated_count) ||
7470	    (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
7471	    (tx_rate < 0) || (tx_rate > actual_link_speed))
7472		return -EINVAL;
7473
7474	adapter->vf_rate_link_speed = actual_link_speed;
7475	adapter->vf_data[vf].tx_rate = (u16)tx_rate;
7476	igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
7477
7478	return 0;
7479}
7480
7481static int igb_ndo_get_vf_config(struct net_device *netdev,
7482				 int vf, struct ifla_vf_info *ivi)
7483{
7484	struct igb_adapter *adapter = netdev_priv(netdev);
7485	if (vf >= adapter->vfs_allocated_count)
7486		return -EINVAL;
7487	ivi->vf = vf;
7488	memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
7489	ivi->tx_rate = adapter->vf_data[vf].tx_rate;
7490	ivi->vlan = adapter->vf_data[vf].pf_vlan;
7491	ivi->qos = adapter->vf_data[vf].pf_qos;
7492	return 0;
7493}
7494
7495static void igb_vmm_control(struct igb_adapter *adapter)
7496{
7497	struct e1000_hw *hw = &adapter->hw;
7498	u32 reg;
7499
7500	switch (hw->mac.type) {
7501	case e1000_82575:
7502	case e1000_i210:
7503	case e1000_i211:
7504	default:
7505		/* replication is not supported for 82575 */
7506		return;
7507	case e1000_82576:
7508		/* notify HW that the MAC is adding vlan tags */
7509		reg = rd32(E1000_DTXCTL);
7510		reg |= E1000_DTXCTL_VLAN_ADDED;
7511		wr32(E1000_DTXCTL, reg);
7512	case e1000_82580:
7513		/* enable replication vlan tag stripping */
7514		reg = rd32(E1000_RPLOLR);
7515		reg |= E1000_RPLOLR_STRVLAN;
7516		wr32(E1000_RPLOLR, reg);
7517	case e1000_i350:
7518		/* none of the above registers are supported by i350 */
7519		break;
7520	}
7521
7522	if (adapter->vfs_allocated_count) {
7523		igb_vmdq_set_loopback_pf(hw, true);
7524		igb_vmdq_set_replication_pf(hw, true);
7525		igb_vmdq_set_anti_spoofing_pf(hw, true,
7526						adapter->vfs_allocated_count);
7527	} else {
7528		igb_vmdq_set_loopback_pf(hw, false);
7529		igb_vmdq_set_replication_pf(hw, false);
7530	}
7531}
7532
7533static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
7534{
7535	struct e1000_hw *hw = &adapter->hw;
7536	u32 dmac_thr;
7537	u16 hwm;
7538
7539	if (hw->mac.type > e1000_82580) {
7540		if (adapter->flags & IGB_FLAG_DMAC) {
7541			u32 reg;
7542
7543			/* force threshold to 0. */
7544			wr32(E1000_DMCTXTH, 0);
7545
7546			/*
7547			 * DMA Coalescing high water mark needs to be greater
7548			 * than the Rx threshold. Set hwm to PBA - max frame
7549			 * size in 16B units, capping it at PBA - 6KB.
7550			 */
7551			hwm = 64 * pba - adapter->max_frame_size / 16;
7552			if (hwm < 64 * (pba - 6))
7553				hwm = 64 * (pba - 6);
7554			reg = rd32(E1000_FCRTC);
7555			reg &= ~E1000_FCRTC_RTH_COAL_MASK;
7556			reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
7557				& E1000_FCRTC_RTH_COAL_MASK);
7558			wr32(E1000_FCRTC, reg);
7559
7560			/*
7561			 * Set the DMA Coalescing Rx threshold to PBA - 2 * max
7562			 * frame size, capping it at PBA - 10KB.
7563			 */
7564			dmac_thr = pba - adapter->max_frame_size / 512;
7565			if (dmac_thr < pba - 10)
7566				dmac_thr = pba - 10;
7567			reg = rd32(E1000_DMACR);
7568			reg &= ~E1000_DMACR_DMACTHR_MASK;
7569			reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
7570				& E1000_DMACR_DMACTHR_MASK);
7571
7572			/* transition to L0x or L1 if available..*/
7573			reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
7574
7575			/* watchdog timer= +-1000 usec in 32usec intervals */
7576			reg |= (1000 >> 5);
7577
7578			/* Disable BMC-to-OS Watchdog Enable */
7579			reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
7580			wr32(E1000_DMACR, reg);
7581
7582			/*
7583			 * no lower threshold to disable
7584			 * coalescing(smart fifb)-UTRESH=0
7585			 */
7586			wr32(E1000_DMCRTRH, 0);
7587
7588			reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
7589
7590			wr32(E1000_DMCTLX, reg);
7591
7592			/*
7593			 * free space in tx packet buffer to wake from
7594			 * DMA coal
7595			 */
7596			wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
7597			     (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
7598
7599			/*
7600			 * make low power state decision controlled
7601			 * by DMA coal
7602			 */
7603			reg = rd32(E1000_PCIEMISC);
7604			reg &= ~E1000_PCIEMISC_LX_DECISION;
7605			wr32(E1000_PCIEMISC, reg);
7606		} /* endif adapter->dmac is not disabled */
7607	} else if (hw->mac.type == e1000_82580) {
7608		u32 reg = rd32(E1000_PCIEMISC);
7609		wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
7610		wr32(E1000_DMACR, 0);
7611	}
7612}
7613
7614/*  igb_read_i2c_byte - Reads 8 bit word over I2C
7615 *  @hw: pointer to hardware structure
7616 *  @byte_offset: byte offset to read
7617 *  @dev_addr: device address
7618 *  @data: value read
7619 *
7620 *  Performs byte read operation over I2C interface at
7621 *  a specified device address.
7622 */
7623s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
7624				u8 dev_addr, u8 *data)
7625{
7626	struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
7627	struct i2c_client *this_client = adapter->i2c_client;
7628	s32 status;
7629	u16 swfw_mask = 0;
7630
7631	if (!this_client)
7632		return E1000_ERR_I2C;
7633
7634	swfw_mask = E1000_SWFW_PHY0_SM;
7635
7636	if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)
7637	    != E1000_SUCCESS)
7638		return E1000_ERR_SWFW_SYNC;
7639
7640	status = i2c_smbus_read_byte_data(this_client, byte_offset);
7641	hw->mac.ops.release_swfw_sync(hw, swfw_mask);
7642
7643	if (status < 0)
7644		return E1000_ERR_I2C;
7645	else {
7646		*data = status;
7647		return E1000_SUCCESS;
7648	}
7649}
7650
7651/*  igb_write_i2c_byte - Writes 8 bit word over I2C
7652 *  @hw: pointer to hardware structure
7653 *  @byte_offset: byte offset to write
7654 *  @dev_addr: device address
7655 *  @data: value to write
7656 *
7657 *  Performs byte write operation over I2C interface at
7658 *  a specified device address.
7659 */
7660s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
7661				 u8 dev_addr, u8 data)
7662{
7663	struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
7664	struct i2c_client *this_client = adapter->i2c_client;
7665	s32 status;
7666	u16 swfw_mask = E1000_SWFW_PHY0_SM;
7667
7668	if (!this_client)
7669		return E1000_ERR_I2C;
7670
7671	if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != E1000_SUCCESS)
7672		return E1000_ERR_SWFW_SYNC;
7673	status = i2c_smbus_write_byte_data(this_client, byte_offset, data);
7674	hw->mac.ops.release_swfw_sync(hw, swfw_mask);
7675
7676	if (status)
7677		return E1000_ERR_I2C;
7678	else
7679		return E1000_SUCCESS;
7680
7681}
7682/* igb_main.c */
7683