ixgb_main.c revision f646968f8f7c624587de729115d802372b9063dd
1/*******************************************************************************
2
3  Intel PRO/10GbE Linux driver
4  Copyright(c) 1999 - 2008 Intel Corporation.
5
6  This program is free software; you can redistribute it and/or modify it
7  under the terms and conditions of the GNU General Public License,
8  version 2, as published by the Free Software Foundation.
9
10  This program is distributed in the hope it will be useful, but WITHOUT
11  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  more details.
14
15  You should have received a copy of the GNU General Public License along with
16  this program; if not, write to the Free Software Foundation, Inc.,
17  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19  The full GNU General Public License is included in this distribution in
20  the file called "COPYING".
21
22  Contact Information:
23  Linux NICS <linux.nics@intel.com>
24  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31#include <linux/prefetch.h>
32#include "ixgb.h"
33
34char ixgb_driver_name[] = "ixgb";
35static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
36
37#define DRIVERNAPI "-NAPI"
38#define DRV_VERSION "1.0.135-k2" DRIVERNAPI
39const char ixgb_driver_version[] = DRV_VERSION;
40static const char ixgb_copyright[] = "Copyright (c) 1999-2008 Intel Corporation.";
41
42#define IXGB_CB_LENGTH 256
43static unsigned int copybreak __read_mostly = IXGB_CB_LENGTH;
44module_param(copybreak, uint, 0644);
45MODULE_PARM_DESC(copybreak,
46	"Maximum size of packet that is copied to a new buffer on receive");
47
48/* ixgb_pci_tbl - PCI Device ID Table
49 *
50 * Wildcard entries (PCI_ANY_ID) should come last
51 * Last entry must be all 0s
52 *
53 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
54 *   Class, Class Mask, private data (not used) }
55 */
56static DEFINE_PCI_DEVICE_TABLE(ixgb_pci_tbl) = {
57	{PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX,
58	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
59	{PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_CX4,
60	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
61	{PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_SR,
62	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
63	{PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_LR,
64	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
65
66	/* required last entry */
67	{0,}
68};
69
70MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl);
71
72/* Local Function Prototypes */
73static int ixgb_init_module(void);
74static void ixgb_exit_module(void);
75static int ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
76static void ixgb_remove(struct pci_dev *pdev);
77static int ixgb_sw_init(struct ixgb_adapter *adapter);
78static int ixgb_open(struct net_device *netdev);
79static int ixgb_close(struct net_device *netdev);
80static void ixgb_configure_tx(struct ixgb_adapter *adapter);
81static void ixgb_configure_rx(struct ixgb_adapter *adapter);
82static void ixgb_setup_rctl(struct ixgb_adapter *adapter);
83static void ixgb_clean_tx_ring(struct ixgb_adapter *adapter);
84static void ixgb_clean_rx_ring(struct ixgb_adapter *adapter);
85static void ixgb_set_multi(struct net_device *netdev);
86static void ixgb_watchdog(unsigned long data);
87static netdev_tx_t ixgb_xmit_frame(struct sk_buff *skb,
88				   struct net_device *netdev);
89static struct net_device_stats *ixgb_get_stats(struct net_device *netdev);
90static int ixgb_change_mtu(struct net_device *netdev, int new_mtu);
91static int ixgb_set_mac(struct net_device *netdev, void *p);
92static irqreturn_t ixgb_intr(int irq, void *data);
93static bool ixgb_clean_tx_irq(struct ixgb_adapter *adapter);
94
95static int ixgb_clean(struct napi_struct *, int);
96static bool ixgb_clean_rx_irq(struct ixgb_adapter *, int *, int);
97static void ixgb_alloc_rx_buffers(struct ixgb_adapter *, int);
98
99static void ixgb_tx_timeout(struct net_device *dev);
100static void ixgb_tx_timeout_task(struct work_struct *work);
101
102static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter);
103static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter);
104static int ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
105static int ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
106static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
107
108#ifdef CONFIG_NET_POLL_CONTROLLER
109/* for netdump / net console */
110static void ixgb_netpoll(struct net_device *dev);
111#endif
112
113static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
114                             enum pci_channel_state state);
115static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev);
116static void ixgb_io_resume (struct pci_dev *pdev);
117
118static const struct pci_error_handlers ixgb_err_handler = {
119	.error_detected = ixgb_io_error_detected,
120	.slot_reset = ixgb_io_slot_reset,
121	.resume = ixgb_io_resume,
122};
123
124static struct pci_driver ixgb_driver = {
125	.name     = ixgb_driver_name,
126	.id_table = ixgb_pci_tbl,
127	.probe    = ixgb_probe,
128	.remove   = ixgb_remove,
129	.err_handler = &ixgb_err_handler
130};
131
132MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
133MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver");
134MODULE_LICENSE("GPL");
135MODULE_VERSION(DRV_VERSION);
136
137#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
138static int debug = -1;
139module_param(debug, int, 0);
140MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
141
142/**
143 * ixgb_init_module - Driver Registration Routine
144 *
145 * ixgb_init_module is the first routine called when the driver is
146 * loaded. All it does is register with the PCI subsystem.
147 **/
148
149static int __init
150ixgb_init_module(void)
151{
152	pr_info("%s - version %s\n", ixgb_driver_string, ixgb_driver_version);
153	pr_info("%s\n", ixgb_copyright);
154
155	return pci_register_driver(&ixgb_driver);
156}
157
158module_init(ixgb_init_module);
159
160/**
161 * ixgb_exit_module - Driver Exit Cleanup Routine
162 *
163 * ixgb_exit_module is called just before the driver is removed
164 * from memory.
165 **/
166
167static void __exit
168ixgb_exit_module(void)
169{
170	pci_unregister_driver(&ixgb_driver);
171}
172
173module_exit(ixgb_exit_module);
174
175/**
176 * ixgb_irq_disable - Mask off interrupt generation on the NIC
177 * @adapter: board private structure
178 **/
179
180static void
181ixgb_irq_disable(struct ixgb_adapter *adapter)
182{
183	IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
184	IXGB_WRITE_FLUSH(&adapter->hw);
185	synchronize_irq(adapter->pdev->irq);
186}
187
188/**
189 * ixgb_irq_enable - Enable default interrupt generation settings
190 * @adapter: board private structure
191 **/
192
193static void
194ixgb_irq_enable(struct ixgb_adapter *adapter)
195{
196	u32 val = IXGB_INT_RXT0 | IXGB_INT_RXDMT0 |
197		  IXGB_INT_TXDW | IXGB_INT_LSC;
198	if (adapter->hw.subsystem_vendor_id == PCI_VENDOR_ID_SUN)
199		val |= IXGB_INT_GPI0;
200	IXGB_WRITE_REG(&adapter->hw, IMS, val);
201	IXGB_WRITE_FLUSH(&adapter->hw);
202}
203
204int
205ixgb_up(struct ixgb_adapter *adapter)
206{
207	struct net_device *netdev = adapter->netdev;
208	int err, irq_flags = IRQF_SHARED;
209	int max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
210	struct ixgb_hw *hw = &adapter->hw;
211
212	/* hardware has been reset, we need to reload some things */
213
214	ixgb_rar_set(hw, netdev->dev_addr, 0);
215	ixgb_set_multi(netdev);
216
217	ixgb_restore_vlan(adapter);
218
219	ixgb_configure_tx(adapter);
220	ixgb_setup_rctl(adapter);
221	ixgb_configure_rx(adapter);
222	ixgb_alloc_rx_buffers(adapter, IXGB_DESC_UNUSED(&adapter->rx_ring));
223
224	/* disable interrupts and get the hardware into a known state */
225	IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
226
227	/* only enable MSI if bus is in PCI-X mode */
228	if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_PCIX_MODE) {
229		err = pci_enable_msi(adapter->pdev);
230		if (!err) {
231			adapter->have_msi = true;
232			irq_flags = 0;
233		}
234		/* proceed to try to request regular interrupt */
235	}
236
237	err = request_irq(adapter->pdev->irq, ixgb_intr, irq_flags,
238	                  netdev->name, netdev);
239	if (err) {
240		if (adapter->have_msi)
241			pci_disable_msi(adapter->pdev);
242		netif_err(adapter, probe, adapter->netdev,
243			  "Unable to allocate interrupt Error: %d\n", err);
244		return err;
245	}
246
247	if ((hw->max_frame_size != max_frame) ||
248		(hw->max_frame_size !=
249		(IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT))) {
250
251		hw->max_frame_size = max_frame;
252
253		IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
254
255		if (hw->max_frame_size >
256		   IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
257			u32 ctrl0 = IXGB_READ_REG(hw, CTRL0);
258
259			if (!(ctrl0 & IXGB_CTRL0_JFE)) {
260				ctrl0 |= IXGB_CTRL0_JFE;
261				IXGB_WRITE_REG(hw, CTRL0, ctrl0);
262			}
263		}
264	}
265
266	clear_bit(__IXGB_DOWN, &adapter->flags);
267
268	napi_enable(&adapter->napi);
269	ixgb_irq_enable(adapter);
270
271	netif_wake_queue(netdev);
272
273	mod_timer(&adapter->watchdog_timer, jiffies);
274
275	return 0;
276}
277
278void
279ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog)
280{
281	struct net_device *netdev = adapter->netdev;
282
283	/* prevent the interrupt handler from restarting watchdog */
284	set_bit(__IXGB_DOWN, &adapter->flags);
285
286	napi_disable(&adapter->napi);
287	/* waiting for NAPI to complete can re-enable interrupts */
288	ixgb_irq_disable(adapter);
289	free_irq(adapter->pdev->irq, netdev);
290
291	if (adapter->have_msi)
292		pci_disable_msi(adapter->pdev);
293
294	if (kill_watchdog)
295		del_timer_sync(&adapter->watchdog_timer);
296
297	adapter->link_speed = 0;
298	adapter->link_duplex = 0;
299	netif_carrier_off(netdev);
300	netif_stop_queue(netdev);
301
302	ixgb_reset(adapter);
303	ixgb_clean_tx_ring(adapter);
304	ixgb_clean_rx_ring(adapter);
305}
306
307void
308ixgb_reset(struct ixgb_adapter *adapter)
309{
310	struct ixgb_hw *hw = &adapter->hw;
311
312	ixgb_adapter_stop(hw);
313	if (!ixgb_init_hw(hw))
314		netif_err(adapter, probe, adapter->netdev, "ixgb_init_hw failed\n");
315
316	/* restore frame size information */
317	IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
318	if (hw->max_frame_size >
319	    IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
320		u32 ctrl0 = IXGB_READ_REG(hw, CTRL0);
321		if (!(ctrl0 & IXGB_CTRL0_JFE)) {
322			ctrl0 |= IXGB_CTRL0_JFE;
323			IXGB_WRITE_REG(hw, CTRL0, ctrl0);
324		}
325	}
326}
327
328static netdev_features_t
329ixgb_fix_features(struct net_device *netdev, netdev_features_t features)
330{
331	/*
332	 * Tx VLAN insertion does not work per HW design when Rx stripping is
333	 * disabled.
334	 */
335	if (!(features & NETIF_F_HW_VLAN_CTAG_RX))
336		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
337
338	return features;
339}
340
341static int
342ixgb_set_features(struct net_device *netdev, netdev_features_t features)
343{
344	struct ixgb_adapter *adapter = netdev_priv(netdev);
345	netdev_features_t changed = features ^ netdev->features;
346
347	if (!(changed & (NETIF_F_RXCSUM|NETIF_F_HW_VLAN_CTAG_RX)))
348		return 0;
349
350	adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
351
352	if (netif_running(netdev)) {
353		ixgb_down(adapter, true);
354		ixgb_up(adapter);
355		ixgb_set_speed_duplex(netdev);
356	} else
357		ixgb_reset(adapter);
358
359	return 0;
360}
361
362
363static const struct net_device_ops ixgb_netdev_ops = {
364	.ndo_open 		= ixgb_open,
365	.ndo_stop		= ixgb_close,
366	.ndo_start_xmit		= ixgb_xmit_frame,
367	.ndo_get_stats		= ixgb_get_stats,
368	.ndo_set_rx_mode	= ixgb_set_multi,
369	.ndo_validate_addr	= eth_validate_addr,
370	.ndo_set_mac_address	= ixgb_set_mac,
371	.ndo_change_mtu		= ixgb_change_mtu,
372	.ndo_tx_timeout		= ixgb_tx_timeout,
373	.ndo_vlan_rx_add_vid	= ixgb_vlan_rx_add_vid,
374	.ndo_vlan_rx_kill_vid	= ixgb_vlan_rx_kill_vid,
375#ifdef CONFIG_NET_POLL_CONTROLLER
376	.ndo_poll_controller	= ixgb_netpoll,
377#endif
378	.ndo_fix_features       = ixgb_fix_features,
379	.ndo_set_features       = ixgb_set_features,
380};
381
382/**
383 * ixgb_probe - Device Initialization Routine
384 * @pdev: PCI device information struct
385 * @ent: entry in ixgb_pci_tbl
386 *
387 * Returns 0 on success, negative on failure
388 *
389 * ixgb_probe initializes an adapter identified by a pci_dev structure.
390 * The OS initialization, configuring of the adapter private structure,
391 * and a hardware reset occur.
392 **/
393
394static int
395ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
396{
397	struct net_device *netdev = NULL;
398	struct ixgb_adapter *adapter;
399	static int cards_found = 0;
400	int pci_using_dac;
401	int i;
402	int err;
403
404	err = pci_enable_device(pdev);
405	if (err)
406		return err;
407
408	pci_using_dac = 0;
409	err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
410	if (!err) {
411		err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
412		if (!err)
413			pci_using_dac = 1;
414	} else {
415		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
416		if (err) {
417			err = dma_set_coherent_mask(&pdev->dev,
418						    DMA_BIT_MASK(32));
419			if (err) {
420				pr_err("No usable DMA configuration, aborting\n");
421				goto err_dma_mask;
422			}
423		}
424	}
425
426	err = pci_request_regions(pdev, ixgb_driver_name);
427	if (err)
428		goto err_request_regions;
429
430	pci_set_master(pdev);
431
432	netdev = alloc_etherdev(sizeof(struct ixgb_adapter));
433	if (!netdev) {
434		err = -ENOMEM;
435		goto err_alloc_etherdev;
436	}
437
438	SET_NETDEV_DEV(netdev, &pdev->dev);
439
440	pci_set_drvdata(pdev, netdev);
441	adapter = netdev_priv(netdev);
442	adapter->netdev = netdev;
443	adapter->pdev = pdev;
444	adapter->hw.back = adapter;
445	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
446
447	adapter->hw.hw_addr = pci_ioremap_bar(pdev, BAR_0);
448	if (!adapter->hw.hw_addr) {
449		err = -EIO;
450		goto err_ioremap;
451	}
452
453	for (i = BAR_1; i <= BAR_5; i++) {
454		if (pci_resource_len(pdev, i) == 0)
455			continue;
456		if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
457			adapter->hw.io_base = pci_resource_start(pdev, i);
458			break;
459		}
460	}
461
462	netdev->netdev_ops = &ixgb_netdev_ops;
463	ixgb_set_ethtool_ops(netdev);
464	netdev->watchdog_timeo = 5 * HZ;
465	netif_napi_add(netdev, &adapter->napi, ixgb_clean, 64);
466
467	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
468
469	adapter->bd_number = cards_found;
470	adapter->link_speed = 0;
471	adapter->link_duplex = 0;
472
473	/* setup the private structure */
474
475	err = ixgb_sw_init(adapter);
476	if (err)
477		goto err_sw_init;
478
479	netdev->hw_features = NETIF_F_SG |
480			   NETIF_F_TSO |
481			   NETIF_F_HW_CSUM |
482			   NETIF_F_HW_VLAN_CTAG_TX |
483			   NETIF_F_HW_VLAN_CTAG_RX;
484	netdev->features = netdev->hw_features |
485			   NETIF_F_HW_VLAN_CTAG_FILTER;
486	netdev->hw_features |= NETIF_F_RXCSUM;
487
488	if (pci_using_dac) {
489		netdev->features |= NETIF_F_HIGHDMA;
490		netdev->vlan_features |= NETIF_F_HIGHDMA;
491	}
492
493	/* make sure the EEPROM is good */
494
495	if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
496		netif_err(adapter, probe, adapter->netdev,
497			  "The EEPROM Checksum Is Not Valid\n");
498		err = -EIO;
499		goto err_eeprom;
500	}
501
502	ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
503
504	if (!is_valid_ether_addr(netdev->dev_addr)) {
505		netif_err(adapter, probe, adapter->netdev, "Invalid MAC Address\n");
506		err = -EIO;
507		goto err_eeprom;
508	}
509
510	adapter->part_num = ixgb_get_ee_pba_number(&adapter->hw);
511
512	init_timer(&adapter->watchdog_timer);
513	adapter->watchdog_timer.function = ixgb_watchdog;
514	adapter->watchdog_timer.data = (unsigned long)adapter;
515
516	INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task);
517
518	strcpy(netdev->name, "eth%d");
519	err = register_netdev(netdev);
520	if (err)
521		goto err_register;
522
523	/* carrier off reporting is important to ethtool even BEFORE open */
524	netif_carrier_off(netdev);
525
526	netif_info(adapter, probe, adapter->netdev,
527		   "Intel(R) PRO/10GbE Network Connection\n");
528	ixgb_check_options(adapter);
529	/* reset the hardware with the new settings */
530
531	ixgb_reset(adapter);
532
533	cards_found++;
534	return 0;
535
536err_register:
537err_sw_init:
538err_eeprom:
539	iounmap(adapter->hw.hw_addr);
540err_ioremap:
541	free_netdev(netdev);
542err_alloc_etherdev:
543	pci_release_regions(pdev);
544err_request_regions:
545err_dma_mask:
546	pci_disable_device(pdev);
547	return err;
548}
549
550/**
551 * ixgb_remove - Device Removal Routine
552 * @pdev: PCI device information struct
553 *
554 * ixgb_remove is called by the PCI subsystem to alert the driver
555 * that it should release a PCI device.  The could be caused by a
556 * Hot-Plug event, or because the driver is going to be removed from
557 * memory.
558 **/
559
560static void
561ixgb_remove(struct pci_dev *pdev)
562{
563	struct net_device *netdev = pci_get_drvdata(pdev);
564	struct ixgb_adapter *adapter = netdev_priv(netdev);
565
566	cancel_work_sync(&adapter->tx_timeout_task);
567
568	unregister_netdev(netdev);
569
570	iounmap(adapter->hw.hw_addr);
571	pci_release_regions(pdev);
572
573	free_netdev(netdev);
574	pci_disable_device(pdev);
575}
576
577/**
578 * ixgb_sw_init - Initialize general software structures (struct ixgb_adapter)
579 * @adapter: board private structure to initialize
580 *
581 * ixgb_sw_init initializes the Adapter private data structure.
582 * Fields are initialized based on PCI device information and
583 * OS network device settings (MTU size).
584 **/
585
586static int
587ixgb_sw_init(struct ixgb_adapter *adapter)
588{
589	struct ixgb_hw *hw = &adapter->hw;
590	struct net_device *netdev = adapter->netdev;
591	struct pci_dev *pdev = adapter->pdev;
592
593	/* PCI config space info */
594
595	hw->vendor_id = pdev->vendor;
596	hw->device_id = pdev->device;
597	hw->subsystem_vendor_id = pdev->subsystem_vendor;
598	hw->subsystem_id = pdev->subsystem_device;
599
600	hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
601	adapter->rx_buffer_len = hw->max_frame_size + 8; /* + 8 for errata */
602
603	if ((hw->device_id == IXGB_DEVICE_ID_82597EX) ||
604	    (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4) ||
605	    (hw->device_id == IXGB_DEVICE_ID_82597EX_LR) ||
606	    (hw->device_id == IXGB_DEVICE_ID_82597EX_SR))
607		hw->mac_type = ixgb_82597;
608	else {
609		/* should never have loaded on this device */
610		netif_err(adapter, probe, adapter->netdev, "unsupported device id\n");
611	}
612
613	/* enable flow control to be programmed */
614	hw->fc.send_xon = 1;
615
616	set_bit(__IXGB_DOWN, &adapter->flags);
617	return 0;
618}
619
620/**
621 * ixgb_open - Called when a network interface is made active
622 * @netdev: network interface device structure
623 *
624 * Returns 0 on success, negative value on failure
625 *
626 * The open entry point is called when a network interface is made
627 * active by the system (IFF_UP).  At this point all resources needed
628 * for transmit and receive operations are allocated, the interrupt
629 * handler is registered with the OS, the watchdog timer is started,
630 * and the stack is notified that the interface is ready.
631 **/
632
633static int
634ixgb_open(struct net_device *netdev)
635{
636	struct ixgb_adapter *adapter = netdev_priv(netdev);
637	int err;
638
639	/* allocate transmit descriptors */
640	err = ixgb_setup_tx_resources(adapter);
641	if (err)
642		goto err_setup_tx;
643
644	netif_carrier_off(netdev);
645
646	/* allocate receive descriptors */
647
648	err = ixgb_setup_rx_resources(adapter);
649	if (err)
650		goto err_setup_rx;
651
652	err = ixgb_up(adapter);
653	if (err)
654		goto err_up;
655
656	netif_start_queue(netdev);
657
658	return 0;
659
660err_up:
661	ixgb_free_rx_resources(adapter);
662err_setup_rx:
663	ixgb_free_tx_resources(adapter);
664err_setup_tx:
665	ixgb_reset(adapter);
666
667	return err;
668}
669
670/**
671 * ixgb_close - Disables a network interface
672 * @netdev: network interface device structure
673 *
674 * Returns 0, this is not allowed to fail
675 *
676 * The close entry point is called when an interface is de-activated
677 * by the OS.  The hardware is still under the drivers control, but
678 * needs to be disabled.  A global MAC reset is issued to stop the
679 * hardware, and all transmit and receive resources are freed.
680 **/
681
682static int
683ixgb_close(struct net_device *netdev)
684{
685	struct ixgb_adapter *adapter = netdev_priv(netdev);
686
687	ixgb_down(adapter, true);
688
689	ixgb_free_tx_resources(adapter);
690	ixgb_free_rx_resources(adapter);
691
692	return 0;
693}
694
695/**
696 * ixgb_setup_tx_resources - allocate Tx resources (Descriptors)
697 * @adapter: board private structure
698 *
699 * Return 0 on success, negative on failure
700 **/
701
702int
703ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
704{
705	struct ixgb_desc_ring *txdr = &adapter->tx_ring;
706	struct pci_dev *pdev = adapter->pdev;
707	int size;
708
709	size = sizeof(struct ixgb_buffer) * txdr->count;
710	txdr->buffer_info = vzalloc(size);
711	if (!txdr->buffer_info)
712		return -ENOMEM;
713
714	/* round up to nearest 4K */
715
716	txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
717	txdr->size = ALIGN(txdr->size, 4096);
718
719	txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
720					GFP_KERNEL | __GFP_ZERO);
721	if (!txdr->desc) {
722		vfree(txdr->buffer_info);
723		return -ENOMEM;
724	}
725
726	txdr->next_to_use = 0;
727	txdr->next_to_clean = 0;
728
729	return 0;
730}
731
732/**
733 * ixgb_configure_tx - Configure 82597 Transmit Unit after Reset.
734 * @adapter: board private structure
735 *
736 * Configure the Tx unit of the MAC after a reset.
737 **/
738
739static void
740ixgb_configure_tx(struct ixgb_adapter *adapter)
741{
742	u64 tdba = adapter->tx_ring.dma;
743	u32 tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc);
744	u32 tctl;
745	struct ixgb_hw *hw = &adapter->hw;
746
747	/* Setup the Base and Length of the Tx Descriptor Ring
748	 * tx_ring.dma can be either a 32 or 64 bit value
749	 */
750
751	IXGB_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
752	IXGB_WRITE_REG(hw, TDBAH, (tdba >> 32));
753
754	IXGB_WRITE_REG(hw, TDLEN, tdlen);
755
756	/* Setup the HW Tx Head and Tail descriptor pointers */
757
758	IXGB_WRITE_REG(hw, TDH, 0);
759	IXGB_WRITE_REG(hw, TDT, 0);
760
761	/* don't set up txdctl, it induces performance problems if configured
762	 * incorrectly */
763	/* Set the Tx Interrupt Delay register */
764
765	IXGB_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
766
767	/* Program the Transmit Control Register */
768
769	tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
770	IXGB_WRITE_REG(hw, TCTL, tctl);
771
772	/* Setup Transmit Descriptor Settings for this adapter */
773	adapter->tx_cmd_type =
774		IXGB_TX_DESC_TYPE |
775		(adapter->tx_int_delay_enable ? IXGB_TX_DESC_CMD_IDE : 0);
776}
777
778/**
779 * ixgb_setup_rx_resources - allocate Rx resources (Descriptors)
780 * @adapter: board private structure
781 *
782 * Returns 0 on success, negative on failure
783 **/
784
785int
786ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
787{
788	struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
789	struct pci_dev *pdev = adapter->pdev;
790	int size;
791
792	size = sizeof(struct ixgb_buffer) * rxdr->count;
793	rxdr->buffer_info = vzalloc(size);
794	if (!rxdr->buffer_info)
795		return -ENOMEM;
796
797	/* Round up to nearest 4K */
798
799	rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
800	rxdr->size = ALIGN(rxdr->size, 4096);
801
802	rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
803					GFP_KERNEL);
804
805	if (!rxdr->desc) {
806		vfree(rxdr->buffer_info);
807		return -ENOMEM;
808	}
809	memset(rxdr->desc, 0, rxdr->size);
810
811	rxdr->next_to_clean = 0;
812	rxdr->next_to_use = 0;
813
814	return 0;
815}
816
817/**
818 * ixgb_setup_rctl - configure the receive control register
819 * @adapter: Board private structure
820 **/
821
822static void
823ixgb_setup_rctl(struct ixgb_adapter *adapter)
824{
825	u32 rctl;
826
827	rctl = IXGB_READ_REG(&adapter->hw, RCTL);
828
829	rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
830
831	rctl |=
832		IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 |
833		IXGB_RCTL_RXEN | IXGB_RCTL_CFF |
834		(adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
835
836	rctl |= IXGB_RCTL_SECRC;
837
838	if (adapter->rx_buffer_len <= IXGB_RXBUFFER_2048)
839		rctl |= IXGB_RCTL_BSIZE_2048;
840	else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_4096)
841		rctl |= IXGB_RCTL_BSIZE_4096;
842	else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_8192)
843		rctl |= IXGB_RCTL_BSIZE_8192;
844	else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_16384)
845		rctl |= IXGB_RCTL_BSIZE_16384;
846
847	IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
848}
849
850/**
851 * ixgb_configure_rx - Configure 82597 Receive Unit after Reset.
852 * @adapter: board private structure
853 *
854 * Configure the Rx unit of the MAC after a reset.
855 **/
856
857static void
858ixgb_configure_rx(struct ixgb_adapter *adapter)
859{
860	u64 rdba = adapter->rx_ring.dma;
861	u32 rdlen = adapter->rx_ring.count * sizeof(struct ixgb_rx_desc);
862	struct ixgb_hw *hw = &adapter->hw;
863	u32 rctl;
864	u32 rxcsum;
865
866	/* make sure receives are disabled while setting up the descriptors */
867
868	rctl = IXGB_READ_REG(hw, RCTL);
869	IXGB_WRITE_REG(hw, RCTL, rctl & ~IXGB_RCTL_RXEN);
870
871	/* set the Receive Delay Timer Register */
872
873	IXGB_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
874
875	/* Setup the Base and Length of the Rx Descriptor Ring */
876
877	IXGB_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
878	IXGB_WRITE_REG(hw, RDBAH, (rdba >> 32));
879
880	IXGB_WRITE_REG(hw, RDLEN, rdlen);
881
882	/* Setup the HW Rx Head and Tail Descriptor Pointers */
883	IXGB_WRITE_REG(hw, RDH, 0);
884	IXGB_WRITE_REG(hw, RDT, 0);
885
886	/* due to the hardware errata with RXDCTL, we are unable to use any of
887	 * the performance enhancing features of it without causing other
888	 * subtle bugs, some of the bugs could include receive length
889	 * corruption at high data rates (WTHRESH > 0) and/or receive
890	 * descriptor ring irregularites (particularly in hardware cache) */
891	IXGB_WRITE_REG(hw, RXDCTL, 0);
892
893	/* Enable Receive Checksum Offload for TCP and UDP */
894	if (adapter->rx_csum) {
895		rxcsum = IXGB_READ_REG(hw, RXCSUM);
896		rxcsum |= IXGB_RXCSUM_TUOFL;
897		IXGB_WRITE_REG(hw, RXCSUM, rxcsum);
898	}
899
900	/* Enable Receives */
901
902	IXGB_WRITE_REG(hw, RCTL, rctl);
903}
904
905/**
906 * ixgb_free_tx_resources - Free Tx Resources
907 * @adapter: board private structure
908 *
909 * Free all transmit software resources
910 **/
911
912void
913ixgb_free_tx_resources(struct ixgb_adapter *adapter)
914{
915	struct pci_dev *pdev = adapter->pdev;
916
917	ixgb_clean_tx_ring(adapter);
918
919	vfree(adapter->tx_ring.buffer_info);
920	adapter->tx_ring.buffer_info = NULL;
921
922	dma_free_coherent(&pdev->dev, adapter->tx_ring.size,
923			  adapter->tx_ring.desc, adapter->tx_ring.dma);
924
925	adapter->tx_ring.desc = NULL;
926}
927
928static void
929ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
930                                struct ixgb_buffer *buffer_info)
931{
932	if (buffer_info->dma) {
933		if (buffer_info->mapped_as_page)
934			dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
935				       buffer_info->length, DMA_TO_DEVICE);
936		else
937			dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
938					 buffer_info->length, DMA_TO_DEVICE);
939		buffer_info->dma = 0;
940	}
941
942	if (buffer_info->skb) {
943		dev_kfree_skb_any(buffer_info->skb);
944		buffer_info->skb = NULL;
945	}
946	buffer_info->time_stamp = 0;
947	/* these fields must always be initialized in tx
948	 * buffer_info->length = 0;
949	 * buffer_info->next_to_watch = 0; */
950}
951
952/**
953 * ixgb_clean_tx_ring - Free Tx Buffers
954 * @adapter: board private structure
955 **/
956
957static void
958ixgb_clean_tx_ring(struct ixgb_adapter *adapter)
959{
960	struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
961	struct ixgb_buffer *buffer_info;
962	unsigned long size;
963	unsigned int i;
964
965	/* Free all the Tx ring sk_buffs */
966
967	for (i = 0; i < tx_ring->count; i++) {
968		buffer_info = &tx_ring->buffer_info[i];
969		ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
970	}
971
972	size = sizeof(struct ixgb_buffer) * tx_ring->count;
973	memset(tx_ring->buffer_info, 0, size);
974
975	/* Zero out the descriptor ring */
976
977	memset(tx_ring->desc, 0, tx_ring->size);
978
979	tx_ring->next_to_use = 0;
980	tx_ring->next_to_clean = 0;
981
982	IXGB_WRITE_REG(&adapter->hw, TDH, 0);
983	IXGB_WRITE_REG(&adapter->hw, TDT, 0);
984}
985
986/**
987 * ixgb_free_rx_resources - Free Rx Resources
988 * @adapter: board private structure
989 *
990 * Free all receive software resources
991 **/
992
993void
994ixgb_free_rx_resources(struct ixgb_adapter *adapter)
995{
996	struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
997	struct pci_dev *pdev = adapter->pdev;
998
999	ixgb_clean_rx_ring(adapter);
1000
1001	vfree(rx_ring->buffer_info);
1002	rx_ring->buffer_info = NULL;
1003
1004	dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
1005			  rx_ring->dma);
1006
1007	rx_ring->desc = NULL;
1008}
1009
1010/**
1011 * ixgb_clean_rx_ring - Free Rx Buffers
1012 * @adapter: board private structure
1013 **/
1014
1015static void
1016ixgb_clean_rx_ring(struct ixgb_adapter *adapter)
1017{
1018	struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
1019	struct ixgb_buffer *buffer_info;
1020	struct pci_dev *pdev = adapter->pdev;
1021	unsigned long size;
1022	unsigned int i;
1023
1024	/* Free all the Rx ring sk_buffs */
1025
1026	for (i = 0; i < rx_ring->count; i++) {
1027		buffer_info = &rx_ring->buffer_info[i];
1028		if (buffer_info->dma) {
1029			dma_unmap_single(&pdev->dev,
1030					 buffer_info->dma,
1031					 buffer_info->length,
1032					 DMA_FROM_DEVICE);
1033			buffer_info->dma = 0;
1034			buffer_info->length = 0;
1035		}
1036
1037		if (buffer_info->skb) {
1038			dev_kfree_skb(buffer_info->skb);
1039			buffer_info->skb = NULL;
1040		}
1041	}
1042
1043	size = sizeof(struct ixgb_buffer) * rx_ring->count;
1044	memset(rx_ring->buffer_info, 0, size);
1045
1046	/* Zero out the descriptor ring */
1047
1048	memset(rx_ring->desc, 0, rx_ring->size);
1049
1050	rx_ring->next_to_clean = 0;
1051	rx_ring->next_to_use = 0;
1052
1053	IXGB_WRITE_REG(&adapter->hw, RDH, 0);
1054	IXGB_WRITE_REG(&adapter->hw, RDT, 0);
1055}
1056
1057/**
1058 * ixgb_set_mac - Change the Ethernet Address of the NIC
1059 * @netdev: network interface device structure
1060 * @p: pointer to an address structure
1061 *
1062 * Returns 0 on success, negative on failure
1063 **/
1064
1065static int
1066ixgb_set_mac(struct net_device *netdev, void *p)
1067{
1068	struct ixgb_adapter *adapter = netdev_priv(netdev);
1069	struct sockaddr *addr = p;
1070
1071	if (!is_valid_ether_addr(addr->sa_data))
1072		return -EADDRNOTAVAIL;
1073
1074	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1075
1076	ixgb_rar_set(&adapter->hw, addr->sa_data, 0);
1077
1078	return 0;
1079}
1080
1081/**
1082 * ixgb_set_multi - Multicast and Promiscuous mode set
1083 * @netdev: network interface device structure
1084 *
1085 * The set_multi entry point is called whenever the multicast address
1086 * list or the network interface flags are updated.  This routine is
1087 * responsible for configuring the hardware for proper multicast,
1088 * promiscuous mode, and all-multi behavior.
1089 **/
1090
1091static void
1092ixgb_set_multi(struct net_device *netdev)
1093{
1094	struct ixgb_adapter *adapter = netdev_priv(netdev);
1095	struct ixgb_hw *hw = &adapter->hw;
1096	struct netdev_hw_addr *ha;
1097	u32 rctl;
1098
1099	/* Check for Promiscuous and All Multicast modes */
1100
1101	rctl = IXGB_READ_REG(hw, RCTL);
1102
1103	if (netdev->flags & IFF_PROMISC) {
1104		rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1105		/* disable VLAN filtering */
1106		rctl &= ~IXGB_RCTL_CFIEN;
1107		rctl &= ~IXGB_RCTL_VFE;
1108	} else {
1109		if (netdev->flags & IFF_ALLMULTI) {
1110			rctl |= IXGB_RCTL_MPE;
1111			rctl &= ~IXGB_RCTL_UPE;
1112		} else {
1113			rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1114		}
1115		/* enable VLAN filtering */
1116		rctl |= IXGB_RCTL_VFE;
1117		rctl &= ~IXGB_RCTL_CFIEN;
1118	}
1119
1120	if (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
1121		rctl |= IXGB_RCTL_MPE;
1122		IXGB_WRITE_REG(hw, RCTL, rctl);
1123	} else {
1124		u8 *mta = kmalloc(IXGB_MAX_NUM_MULTICAST_ADDRESSES *
1125			      ETH_ALEN, GFP_ATOMIC);
1126		u8 *addr;
1127		if (!mta)
1128			goto alloc_failed;
1129
1130		IXGB_WRITE_REG(hw, RCTL, rctl);
1131
1132		addr = mta;
1133		netdev_for_each_mc_addr(ha, netdev) {
1134			memcpy(addr, ha->addr, ETH_ALEN);
1135			addr += ETH_ALEN;
1136		}
1137
1138		ixgb_mc_addr_list_update(hw, mta, netdev_mc_count(netdev), 0);
1139		kfree(mta);
1140	}
1141
1142alloc_failed:
1143	if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
1144		ixgb_vlan_strip_enable(adapter);
1145	else
1146		ixgb_vlan_strip_disable(adapter);
1147
1148}
1149
1150/**
1151 * ixgb_watchdog - Timer Call-back
1152 * @data: pointer to netdev cast into an unsigned long
1153 **/
1154
1155static void
1156ixgb_watchdog(unsigned long data)
1157{
1158	struct ixgb_adapter *adapter = (struct ixgb_adapter *)data;
1159	struct net_device *netdev = adapter->netdev;
1160	struct ixgb_desc_ring *txdr = &adapter->tx_ring;
1161
1162	ixgb_check_for_link(&adapter->hw);
1163
1164	if (ixgb_check_for_bad_link(&adapter->hw)) {
1165		/* force the reset path */
1166		netif_stop_queue(netdev);
1167	}
1168
1169	if (adapter->hw.link_up) {
1170		if (!netif_carrier_ok(netdev)) {
1171			netdev_info(netdev,
1172				    "NIC Link is Up 10 Gbps Full Duplex, Flow Control: %s\n",
1173				    (adapter->hw.fc.type == ixgb_fc_full) ?
1174				    "RX/TX" :
1175				    (adapter->hw.fc.type == ixgb_fc_rx_pause) ?
1176				     "RX" :
1177				    (adapter->hw.fc.type == ixgb_fc_tx_pause) ?
1178				    "TX" : "None");
1179			adapter->link_speed = 10000;
1180			adapter->link_duplex = FULL_DUPLEX;
1181			netif_carrier_on(netdev);
1182		}
1183	} else {
1184		if (netif_carrier_ok(netdev)) {
1185			adapter->link_speed = 0;
1186			adapter->link_duplex = 0;
1187			netdev_info(netdev, "NIC Link is Down\n");
1188			netif_carrier_off(netdev);
1189		}
1190	}
1191
1192	ixgb_update_stats(adapter);
1193
1194	if (!netif_carrier_ok(netdev)) {
1195		if (IXGB_DESC_UNUSED(txdr) + 1 < txdr->count) {
1196			/* We've lost link, so the controller stops DMA,
1197			 * but we've got queued Tx work that's never going
1198			 * to get done, so reset controller to flush Tx.
1199			 * (Do the reset outside of interrupt context). */
1200			schedule_work(&adapter->tx_timeout_task);
1201			/* return immediately since reset is imminent */
1202			return;
1203		}
1204	}
1205
1206	/* Force detection of hung controller every watchdog period */
1207	adapter->detect_tx_hung = true;
1208
1209	/* generate an interrupt to force clean up of any stragglers */
1210	IXGB_WRITE_REG(&adapter->hw, ICS, IXGB_INT_TXDW);
1211
1212	/* Reset the timer */
1213	mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1214}
1215
1216#define IXGB_TX_FLAGS_CSUM		0x00000001
1217#define IXGB_TX_FLAGS_VLAN		0x00000002
1218#define IXGB_TX_FLAGS_TSO		0x00000004
1219
1220static int
1221ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
1222{
1223	struct ixgb_context_desc *context_desc;
1224	unsigned int i;
1225	u8 ipcss, ipcso, tucss, tucso, hdr_len;
1226	u16 ipcse, tucse, mss;
1227	int err;
1228
1229	if (likely(skb_is_gso(skb))) {
1230		struct ixgb_buffer *buffer_info;
1231		struct iphdr *iph;
1232
1233		if (skb_header_cloned(skb)) {
1234			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1235			if (err)
1236				return err;
1237		}
1238
1239		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1240		mss = skb_shinfo(skb)->gso_size;
1241		iph = ip_hdr(skb);
1242		iph->tot_len = 0;
1243		iph->check = 0;
1244		tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1245							 iph->daddr, 0,
1246							 IPPROTO_TCP, 0);
1247		ipcss = skb_network_offset(skb);
1248		ipcso = (void *)&(iph->check) - (void *)skb->data;
1249		ipcse = skb_transport_offset(skb) - 1;
1250		tucss = skb_transport_offset(skb);
1251		tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
1252		tucse = 0;
1253
1254		i = adapter->tx_ring.next_to_use;
1255		context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
1256		buffer_info = &adapter->tx_ring.buffer_info[i];
1257		WARN_ON(buffer_info->dma != 0);
1258
1259		context_desc->ipcss = ipcss;
1260		context_desc->ipcso = ipcso;
1261		context_desc->ipcse = cpu_to_le16(ipcse);
1262		context_desc->tucss = tucss;
1263		context_desc->tucso = tucso;
1264		context_desc->tucse = cpu_to_le16(tucse);
1265		context_desc->mss = cpu_to_le16(mss);
1266		context_desc->hdr_len = hdr_len;
1267		context_desc->status = 0;
1268		context_desc->cmd_type_len = cpu_to_le32(
1269						  IXGB_CONTEXT_DESC_TYPE
1270						| IXGB_CONTEXT_DESC_CMD_TSE
1271						| IXGB_CONTEXT_DESC_CMD_IP
1272						| IXGB_CONTEXT_DESC_CMD_TCP
1273						| IXGB_CONTEXT_DESC_CMD_IDE
1274						| (skb->len - (hdr_len)));
1275
1276
1277		if (++i == adapter->tx_ring.count) i = 0;
1278		adapter->tx_ring.next_to_use = i;
1279
1280		return 1;
1281	}
1282
1283	return 0;
1284}
1285
1286static bool
1287ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
1288{
1289	struct ixgb_context_desc *context_desc;
1290	unsigned int i;
1291	u8 css, cso;
1292
1293	if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1294		struct ixgb_buffer *buffer_info;
1295		css = skb_checksum_start_offset(skb);
1296		cso = css + skb->csum_offset;
1297
1298		i = adapter->tx_ring.next_to_use;
1299		context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
1300		buffer_info = &adapter->tx_ring.buffer_info[i];
1301		WARN_ON(buffer_info->dma != 0);
1302
1303		context_desc->tucss = css;
1304		context_desc->tucso = cso;
1305		context_desc->tucse = 0;
1306		/* zero out any previously existing data in one instruction */
1307		*(u32 *)&(context_desc->ipcss) = 0;
1308		context_desc->status = 0;
1309		context_desc->hdr_len = 0;
1310		context_desc->mss = 0;
1311		context_desc->cmd_type_len =
1312			cpu_to_le32(IXGB_CONTEXT_DESC_TYPE
1313				    | IXGB_TX_DESC_CMD_IDE);
1314
1315		if (++i == adapter->tx_ring.count) i = 0;
1316		adapter->tx_ring.next_to_use = i;
1317
1318		return true;
1319	}
1320
1321	return false;
1322}
1323
1324#define IXGB_MAX_TXD_PWR	14
1325#define IXGB_MAX_DATA_PER_TXD	(1<<IXGB_MAX_TXD_PWR)
1326
1327static int
1328ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1329	    unsigned int first)
1330{
1331	struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1332	struct pci_dev *pdev = adapter->pdev;
1333	struct ixgb_buffer *buffer_info;
1334	int len = skb_headlen(skb);
1335	unsigned int offset = 0, size, count = 0, i;
1336	unsigned int mss = skb_shinfo(skb)->gso_size;
1337	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
1338	unsigned int f;
1339
1340	i = tx_ring->next_to_use;
1341
1342	while (len) {
1343		buffer_info = &tx_ring->buffer_info[i];
1344		size = min(len, IXGB_MAX_DATA_PER_TXD);
1345		/* Workaround for premature desc write-backs
1346		 * in TSO mode.  Append 4-byte sentinel desc */
1347		if (unlikely(mss && !nr_frags && size == len && size > 8))
1348			size -= 4;
1349
1350		buffer_info->length = size;
1351		WARN_ON(buffer_info->dma != 0);
1352		buffer_info->time_stamp = jiffies;
1353		buffer_info->mapped_as_page = false;
1354		buffer_info->dma = dma_map_single(&pdev->dev,
1355						  skb->data + offset,
1356						  size, DMA_TO_DEVICE);
1357		if (dma_mapping_error(&pdev->dev, buffer_info->dma))
1358			goto dma_error;
1359		buffer_info->next_to_watch = 0;
1360
1361		len -= size;
1362		offset += size;
1363		count++;
1364		if (len) {
1365			i++;
1366			if (i == tx_ring->count)
1367				i = 0;
1368		}
1369	}
1370
1371	for (f = 0; f < nr_frags; f++) {
1372		const struct skb_frag_struct *frag;
1373
1374		frag = &skb_shinfo(skb)->frags[f];
1375		len = skb_frag_size(frag);
1376		offset = 0;
1377
1378		while (len) {
1379			i++;
1380			if (i == tx_ring->count)
1381				i = 0;
1382
1383			buffer_info = &tx_ring->buffer_info[i];
1384			size = min(len, IXGB_MAX_DATA_PER_TXD);
1385
1386			/* Workaround for premature desc write-backs
1387			 * in TSO mode.  Append 4-byte sentinel desc */
1388			if (unlikely(mss && (f == (nr_frags - 1))
1389				     && size == len && size > 8))
1390				size -= 4;
1391
1392			buffer_info->length = size;
1393			buffer_info->time_stamp = jiffies;
1394			buffer_info->mapped_as_page = true;
1395			buffer_info->dma =
1396				skb_frag_dma_map(&pdev->dev, frag, offset, size,
1397						 DMA_TO_DEVICE);
1398			if (dma_mapping_error(&pdev->dev, buffer_info->dma))
1399				goto dma_error;
1400			buffer_info->next_to_watch = 0;
1401
1402			len -= size;
1403			offset += size;
1404			count++;
1405		}
1406	}
1407	tx_ring->buffer_info[i].skb = skb;
1408	tx_ring->buffer_info[first].next_to_watch = i;
1409
1410	return count;
1411
1412dma_error:
1413	dev_err(&pdev->dev, "TX DMA map failed\n");
1414	buffer_info->dma = 0;
1415	if (count)
1416		count--;
1417
1418	while (count--) {
1419		if (i==0)
1420			i += tx_ring->count;
1421		i--;
1422		buffer_info = &tx_ring->buffer_info[i];
1423		ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
1424	}
1425
1426	return 0;
1427}
1428
1429static void
1430ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
1431{
1432	struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1433	struct ixgb_tx_desc *tx_desc = NULL;
1434	struct ixgb_buffer *buffer_info;
1435	u32 cmd_type_len = adapter->tx_cmd_type;
1436	u8 status = 0;
1437	u8 popts = 0;
1438	unsigned int i;
1439
1440	if (tx_flags & IXGB_TX_FLAGS_TSO) {
1441		cmd_type_len |= IXGB_TX_DESC_CMD_TSE;
1442		popts |= (IXGB_TX_DESC_POPTS_IXSM | IXGB_TX_DESC_POPTS_TXSM);
1443	}
1444
1445	if (tx_flags & IXGB_TX_FLAGS_CSUM)
1446		popts |= IXGB_TX_DESC_POPTS_TXSM;
1447
1448	if (tx_flags & IXGB_TX_FLAGS_VLAN)
1449		cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
1450
1451	i = tx_ring->next_to_use;
1452
1453	while (count--) {
1454		buffer_info = &tx_ring->buffer_info[i];
1455		tx_desc = IXGB_TX_DESC(*tx_ring, i);
1456		tx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
1457		tx_desc->cmd_type_len =
1458			cpu_to_le32(cmd_type_len | buffer_info->length);
1459		tx_desc->status = status;
1460		tx_desc->popts = popts;
1461		tx_desc->vlan = cpu_to_le16(vlan_id);
1462
1463		if (++i == tx_ring->count) i = 0;
1464	}
1465
1466	tx_desc->cmd_type_len |=
1467		cpu_to_le32(IXGB_TX_DESC_CMD_EOP | IXGB_TX_DESC_CMD_RS);
1468
1469	/* Force memory writes to complete before letting h/w
1470	 * know there are new descriptors to fetch.  (Only
1471	 * applicable for weak-ordered memory model archs,
1472	 * such as IA-64). */
1473	wmb();
1474
1475	tx_ring->next_to_use = i;
1476	IXGB_WRITE_REG(&adapter->hw, TDT, i);
1477}
1478
1479static int __ixgb_maybe_stop_tx(struct net_device *netdev, int size)
1480{
1481	struct ixgb_adapter *adapter = netdev_priv(netdev);
1482	struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1483
1484	netif_stop_queue(netdev);
1485	/* Herbert's original patch had:
1486	 *  smp_mb__after_netif_stop_queue();
1487	 * but since that doesn't exist yet, just open code it. */
1488	smp_mb();
1489
1490	/* We need to check again in a case another CPU has just
1491	 * made room available. */
1492	if (likely(IXGB_DESC_UNUSED(tx_ring) < size))
1493		return -EBUSY;
1494
1495	/* A reprieve! */
1496	netif_start_queue(netdev);
1497	++adapter->restart_queue;
1498	return 0;
1499}
1500
1501static int ixgb_maybe_stop_tx(struct net_device *netdev,
1502                              struct ixgb_desc_ring *tx_ring, int size)
1503{
1504	if (likely(IXGB_DESC_UNUSED(tx_ring) >= size))
1505		return 0;
1506	return __ixgb_maybe_stop_tx(netdev, size);
1507}
1508
1509
1510/* Tx Descriptors needed, worst case */
1511#define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \
1512			 (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
1513#define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) /* skb->date */ + \
1514	MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 /* for context */ \
1515	+ 1 /* one more needed for sentinel TSO workaround */
1516
1517static netdev_tx_t
1518ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1519{
1520	struct ixgb_adapter *adapter = netdev_priv(netdev);
1521	unsigned int first;
1522	unsigned int tx_flags = 0;
1523	int vlan_id = 0;
1524	int count = 0;
1525	int tso;
1526
1527	if (test_bit(__IXGB_DOWN, &adapter->flags)) {
1528		dev_kfree_skb(skb);
1529		return NETDEV_TX_OK;
1530	}
1531
1532	if (skb->len <= 0) {
1533		dev_kfree_skb(skb);
1534		return NETDEV_TX_OK;
1535	}
1536
1537	if (unlikely(ixgb_maybe_stop_tx(netdev, &adapter->tx_ring,
1538                     DESC_NEEDED)))
1539		return NETDEV_TX_BUSY;
1540
1541	if (vlan_tx_tag_present(skb)) {
1542		tx_flags |= IXGB_TX_FLAGS_VLAN;
1543		vlan_id = vlan_tx_tag_get(skb);
1544	}
1545
1546	first = adapter->tx_ring.next_to_use;
1547
1548	tso = ixgb_tso(adapter, skb);
1549	if (tso < 0) {
1550		dev_kfree_skb(skb);
1551		return NETDEV_TX_OK;
1552	}
1553
1554	if (likely(tso))
1555		tx_flags |= IXGB_TX_FLAGS_TSO;
1556	else if (ixgb_tx_csum(adapter, skb))
1557		tx_flags |= IXGB_TX_FLAGS_CSUM;
1558
1559	count = ixgb_tx_map(adapter, skb, first);
1560
1561	if (count) {
1562		ixgb_tx_queue(adapter, count, vlan_id, tx_flags);
1563		/* Make sure there is space in the ring for the next send. */
1564		ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED);
1565
1566	} else {
1567		dev_kfree_skb_any(skb);
1568		adapter->tx_ring.buffer_info[first].time_stamp = 0;
1569		adapter->tx_ring.next_to_use = first;
1570	}
1571
1572	return NETDEV_TX_OK;
1573}
1574
1575/**
1576 * ixgb_tx_timeout - Respond to a Tx Hang
1577 * @netdev: network interface device structure
1578 **/
1579
1580static void
1581ixgb_tx_timeout(struct net_device *netdev)
1582{
1583	struct ixgb_adapter *adapter = netdev_priv(netdev);
1584
1585	/* Do the reset outside of interrupt context */
1586	schedule_work(&adapter->tx_timeout_task);
1587}
1588
1589static void
1590ixgb_tx_timeout_task(struct work_struct *work)
1591{
1592	struct ixgb_adapter *adapter =
1593		container_of(work, struct ixgb_adapter, tx_timeout_task);
1594
1595	adapter->tx_timeout_count++;
1596	ixgb_down(adapter, true);
1597	ixgb_up(adapter);
1598}
1599
1600/**
1601 * ixgb_get_stats - Get System Network Statistics
1602 * @netdev: network interface device structure
1603 *
1604 * Returns the address of the device statistics structure.
1605 * The statistics are actually updated from the timer callback.
1606 **/
1607
1608static struct net_device_stats *
1609ixgb_get_stats(struct net_device *netdev)
1610{
1611	return &netdev->stats;
1612}
1613
1614/**
1615 * ixgb_change_mtu - Change the Maximum Transfer Unit
1616 * @netdev: network interface device structure
1617 * @new_mtu: new value for maximum frame size
1618 *
1619 * Returns 0 on success, negative on failure
1620 **/
1621
1622static int
1623ixgb_change_mtu(struct net_device *netdev, int new_mtu)
1624{
1625	struct ixgb_adapter *adapter = netdev_priv(netdev);
1626	int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
1627	int old_max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
1628
1629	/* MTU < 68 is an error for IPv4 traffic, just don't allow it */
1630	if ((new_mtu < 68) ||
1631	    (max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) {
1632		netif_err(adapter, probe, adapter->netdev,
1633			  "Invalid MTU setting %d\n", new_mtu);
1634		return -EINVAL;
1635	}
1636
1637	if (old_max_frame == max_frame)
1638		return 0;
1639
1640	if (netif_running(netdev))
1641		ixgb_down(adapter, true);
1642
1643	adapter->rx_buffer_len = max_frame + 8; /* + 8 for errata */
1644
1645	netdev->mtu = new_mtu;
1646
1647	if (netif_running(netdev))
1648		ixgb_up(adapter);
1649
1650	return 0;
1651}
1652
1653/**
1654 * ixgb_update_stats - Update the board statistics counters.
1655 * @adapter: board private structure
1656 **/
1657
1658void
1659ixgb_update_stats(struct ixgb_adapter *adapter)
1660{
1661	struct net_device *netdev = adapter->netdev;
1662	struct pci_dev *pdev = adapter->pdev;
1663
1664	/* Prevent stats update while adapter is being reset */
1665	if (pci_channel_offline(pdev))
1666		return;
1667
1668	if ((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
1669	   (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
1670		u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL);
1671		u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL);
1672		u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH);
1673		u64 bcast = ((u64)bcast_h << 32) | bcast_l;
1674
1675		multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32);
1676		/* fix up multicast stats by removing broadcasts */
1677		if (multi >= bcast)
1678			multi -= bcast;
1679
1680		adapter->stats.mprcl += (multi & 0xFFFFFFFF);
1681		adapter->stats.mprch += (multi >> 32);
1682		adapter->stats.bprcl += bcast_l;
1683		adapter->stats.bprch += bcast_h;
1684	} else {
1685		adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
1686		adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
1687		adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
1688		adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
1689	}
1690	adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
1691	adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
1692	adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
1693	adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
1694	adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
1695	adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
1696	adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
1697	adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH);
1698	adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL);
1699	adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH);
1700	adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL);
1701	adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH);
1702	adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL);
1703	adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH);
1704	adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC);
1705	adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC);
1706	adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC);
1707	adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC);
1708	adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS);
1709	adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC);
1710	adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC);
1711	adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC);
1712	adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL);
1713	adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH);
1714	adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL);
1715	adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH);
1716	adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL);
1717	adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH);
1718	adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL);
1719	adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH);
1720	adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL);
1721	adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH);
1722	adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL);
1723	adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH);
1724	adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL);
1725	adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH);
1726	adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL);
1727	adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH);
1728	adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL);
1729	adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH);
1730	adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC);
1731	adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C);
1732	adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC);
1733	adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC);
1734	adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC);
1735	adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC);
1736	adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC);
1737	adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC);
1738	adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC);
1739	adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC);
1740	adapter->stats.mcftc += IXGB_READ_REG(&adapter->hw, MCFTC);
1741	adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC);
1742	adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC);
1743	adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC);
1744	adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC);
1745	adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC);
1746
1747	/* Fill out the OS statistics structure */
1748
1749	netdev->stats.rx_packets = adapter->stats.gprcl;
1750	netdev->stats.tx_packets = adapter->stats.gptcl;
1751	netdev->stats.rx_bytes = adapter->stats.gorcl;
1752	netdev->stats.tx_bytes = adapter->stats.gotcl;
1753	netdev->stats.multicast = adapter->stats.mprcl;
1754	netdev->stats.collisions = 0;
1755
1756	/* ignore RLEC as it reports errors for padded (<64bytes) frames
1757	 * with a length in the type/len field */
1758	netdev->stats.rx_errors =
1759	    /* adapter->stats.rnbc + */ adapter->stats.crcerrs +
1760	    adapter->stats.ruc +
1761	    adapter->stats.roc /*+ adapter->stats.rlec */  +
1762	    adapter->stats.icbc +
1763	    adapter->stats.ecbc + adapter->stats.mpc;
1764
1765	/* see above
1766	 * netdev->stats.rx_length_errors = adapter->stats.rlec;
1767	 */
1768
1769	netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
1770	netdev->stats.rx_fifo_errors = adapter->stats.mpc;
1771	netdev->stats.rx_missed_errors = adapter->stats.mpc;
1772	netdev->stats.rx_over_errors = adapter->stats.mpc;
1773
1774	netdev->stats.tx_errors = 0;
1775	netdev->stats.rx_frame_errors = 0;
1776	netdev->stats.tx_aborted_errors = 0;
1777	netdev->stats.tx_carrier_errors = 0;
1778	netdev->stats.tx_fifo_errors = 0;
1779	netdev->stats.tx_heartbeat_errors = 0;
1780	netdev->stats.tx_window_errors = 0;
1781}
1782
1783#define IXGB_MAX_INTR 10
1784/**
1785 * ixgb_intr - Interrupt Handler
1786 * @irq: interrupt number
1787 * @data: pointer to a network interface device structure
1788 **/
1789
1790static irqreturn_t
1791ixgb_intr(int irq, void *data)
1792{
1793	struct net_device *netdev = data;
1794	struct ixgb_adapter *adapter = netdev_priv(netdev);
1795	struct ixgb_hw *hw = &adapter->hw;
1796	u32 icr = IXGB_READ_REG(hw, ICR);
1797
1798	if (unlikely(!icr))
1799		return IRQ_NONE;  /* Not our interrupt */
1800
1801	if (unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)))
1802		if (!test_bit(__IXGB_DOWN, &adapter->flags))
1803			mod_timer(&adapter->watchdog_timer, jiffies);
1804
1805	if (napi_schedule_prep(&adapter->napi)) {
1806
1807		/* Disable interrupts and register for poll. The flush
1808		  of the posted write is intentionally left out.
1809		*/
1810
1811		IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
1812		__napi_schedule(&adapter->napi);
1813	}
1814	return IRQ_HANDLED;
1815}
1816
1817/**
1818 * ixgb_clean - NAPI Rx polling callback
1819 * @adapter: board private structure
1820 **/
1821
1822static int
1823ixgb_clean(struct napi_struct *napi, int budget)
1824{
1825	struct ixgb_adapter *adapter = container_of(napi, struct ixgb_adapter, napi);
1826	int work_done = 0;
1827
1828	ixgb_clean_tx_irq(adapter);
1829	ixgb_clean_rx_irq(adapter, &work_done, budget);
1830
1831	/* If budget not fully consumed, exit the polling mode */
1832	if (work_done < budget) {
1833		napi_complete(napi);
1834		if (!test_bit(__IXGB_DOWN, &adapter->flags))
1835			ixgb_irq_enable(adapter);
1836	}
1837
1838	return work_done;
1839}
1840
1841/**
1842 * ixgb_clean_tx_irq - Reclaim resources after transmit completes
1843 * @adapter: board private structure
1844 **/
1845
1846static bool
1847ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
1848{
1849	struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1850	struct net_device *netdev = adapter->netdev;
1851	struct ixgb_tx_desc *tx_desc, *eop_desc;
1852	struct ixgb_buffer *buffer_info;
1853	unsigned int i, eop;
1854	bool cleaned = false;
1855
1856	i = tx_ring->next_to_clean;
1857	eop = tx_ring->buffer_info[i].next_to_watch;
1858	eop_desc = IXGB_TX_DESC(*tx_ring, eop);
1859
1860	while (eop_desc->status & IXGB_TX_DESC_STATUS_DD) {
1861
1862		rmb(); /* read buffer_info after eop_desc */
1863		for (cleaned = false; !cleaned; ) {
1864			tx_desc = IXGB_TX_DESC(*tx_ring, i);
1865			buffer_info = &tx_ring->buffer_info[i];
1866
1867			if (tx_desc->popts &
1868			   (IXGB_TX_DESC_POPTS_TXSM |
1869			    IXGB_TX_DESC_POPTS_IXSM))
1870				adapter->hw_csum_tx_good++;
1871
1872			ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
1873
1874			*(u32 *)&(tx_desc->status) = 0;
1875
1876			cleaned = (i == eop);
1877			if (++i == tx_ring->count) i = 0;
1878		}
1879
1880		eop = tx_ring->buffer_info[i].next_to_watch;
1881		eop_desc = IXGB_TX_DESC(*tx_ring, eop);
1882	}
1883
1884	tx_ring->next_to_clean = i;
1885
1886	if (unlikely(cleaned && netif_carrier_ok(netdev) &&
1887		     IXGB_DESC_UNUSED(tx_ring) >= DESC_NEEDED)) {
1888		/* Make sure that anybody stopping the queue after this
1889		 * sees the new next_to_clean. */
1890		smp_mb();
1891
1892		if (netif_queue_stopped(netdev) &&
1893		    !(test_bit(__IXGB_DOWN, &adapter->flags))) {
1894			netif_wake_queue(netdev);
1895			++adapter->restart_queue;
1896		}
1897	}
1898
1899	if (adapter->detect_tx_hung) {
1900		/* detect a transmit hang in hardware, this serializes the
1901		 * check with the clearing of time_stamp and movement of i */
1902		adapter->detect_tx_hung = false;
1903		if (tx_ring->buffer_info[eop].time_stamp &&
1904		   time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + HZ)
1905		   && !(IXGB_READ_REG(&adapter->hw, STATUS) &
1906		        IXGB_STATUS_TXOFF)) {
1907			/* detected Tx unit hang */
1908			netif_err(adapter, drv, adapter->netdev,
1909				  "Detected Tx Unit Hang\n"
1910				  "  TDH                  <%x>\n"
1911				  "  TDT                  <%x>\n"
1912				  "  next_to_use          <%x>\n"
1913				  "  next_to_clean        <%x>\n"
1914				  "buffer_info[next_to_clean]\n"
1915				  "  time_stamp           <%lx>\n"
1916				  "  next_to_watch        <%x>\n"
1917				  "  jiffies              <%lx>\n"
1918				  "  next_to_watch.status <%x>\n",
1919				  IXGB_READ_REG(&adapter->hw, TDH),
1920				  IXGB_READ_REG(&adapter->hw, TDT),
1921				  tx_ring->next_to_use,
1922				  tx_ring->next_to_clean,
1923				  tx_ring->buffer_info[eop].time_stamp,
1924				  eop,
1925				  jiffies,
1926				  eop_desc->status);
1927			netif_stop_queue(netdev);
1928		}
1929	}
1930
1931	return cleaned;
1932}
1933
1934/**
1935 * ixgb_rx_checksum - Receive Checksum Offload for 82597.
1936 * @adapter: board private structure
1937 * @rx_desc: receive descriptor
1938 * @sk_buff: socket buffer with received data
1939 **/
1940
1941static void
1942ixgb_rx_checksum(struct ixgb_adapter *adapter,
1943                 struct ixgb_rx_desc *rx_desc,
1944                 struct sk_buff *skb)
1945{
1946	/* Ignore Checksum bit is set OR
1947	 * TCP Checksum has not been calculated
1948	 */
1949	if ((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) ||
1950	   (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) {
1951		skb_checksum_none_assert(skb);
1952		return;
1953	}
1954
1955	/* At this point we know the hardware did the TCP checksum */
1956	/* now look at the TCP checksum error bit */
1957	if (rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) {
1958		/* let the stack verify checksum errors */
1959		skb_checksum_none_assert(skb);
1960		adapter->hw_csum_rx_error++;
1961	} else {
1962		/* TCP checksum is good */
1963		skb->ip_summed = CHECKSUM_UNNECESSARY;
1964		adapter->hw_csum_rx_good++;
1965	}
1966}
1967
1968/*
1969 * this should improve performance for small packets with large amounts
1970 * of reassembly being done in the stack
1971 */
1972static void ixgb_check_copybreak(struct net_device *netdev,
1973				 struct ixgb_buffer *buffer_info,
1974				 u32 length, struct sk_buff **skb)
1975{
1976	struct sk_buff *new_skb;
1977
1978	if (length > copybreak)
1979		return;
1980
1981	new_skb = netdev_alloc_skb_ip_align(netdev, length);
1982	if (!new_skb)
1983		return;
1984
1985	skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
1986				       (*skb)->data - NET_IP_ALIGN,
1987				       length + NET_IP_ALIGN);
1988	/* save the skb in buffer_info as good */
1989	buffer_info->skb = *skb;
1990	*skb = new_skb;
1991}
1992
1993/**
1994 * ixgb_clean_rx_irq - Send received data up the network stack,
1995 * @adapter: board private structure
1996 **/
1997
1998static bool
1999ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
2000{
2001	struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
2002	struct net_device *netdev = adapter->netdev;
2003	struct pci_dev *pdev = adapter->pdev;
2004	struct ixgb_rx_desc *rx_desc, *next_rxd;
2005	struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer;
2006	u32 length;
2007	unsigned int i, j;
2008	int cleaned_count = 0;
2009	bool cleaned = false;
2010
2011	i = rx_ring->next_to_clean;
2012	rx_desc = IXGB_RX_DESC(*rx_ring, i);
2013	buffer_info = &rx_ring->buffer_info[i];
2014
2015	while (rx_desc->status & IXGB_RX_DESC_STATUS_DD) {
2016		struct sk_buff *skb;
2017		u8 status;
2018
2019		if (*work_done >= work_to_do)
2020			break;
2021
2022		(*work_done)++;
2023		rmb();	/* read descriptor and rx_buffer_info after status DD */
2024		status = rx_desc->status;
2025		skb = buffer_info->skb;
2026		buffer_info->skb = NULL;
2027
2028		prefetch(skb->data - NET_IP_ALIGN);
2029
2030		if (++i == rx_ring->count)
2031			i = 0;
2032		next_rxd = IXGB_RX_DESC(*rx_ring, i);
2033		prefetch(next_rxd);
2034
2035		j = i + 1;
2036		if (j == rx_ring->count)
2037			j = 0;
2038		next2_buffer = &rx_ring->buffer_info[j];
2039		prefetch(next2_buffer);
2040
2041		next_buffer = &rx_ring->buffer_info[i];
2042
2043		cleaned = true;
2044		cleaned_count++;
2045
2046		dma_unmap_single(&pdev->dev,
2047				 buffer_info->dma,
2048				 buffer_info->length,
2049				 DMA_FROM_DEVICE);
2050		buffer_info->dma = 0;
2051
2052		length = le16_to_cpu(rx_desc->length);
2053		rx_desc->length = 0;
2054
2055		if (unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) {
2056
2057			/* All receives must fit into a single buffer */
2058
2059			pr_debug("Receive packet consumed multiple buffers length<%x>\n",
2060				 length);
2061
2062			dev_kfree_skb_irq(skb);
2063			goto rxdesc_done;
2064		}
2065
2066		if (unlikely(rx_desc->errors &
2067		    (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE |
2068		     IXGB_RX_DESC_ERRORS_P | IXGB_RX_DESC_ERRORS_RXE))) {
2069			dev_kfree_skb_irq(skb);
2070			goto rxdesc_done;
2071		}
2072
2073		ixgb_check_copybreak(netdev, buffer_info, length, &skb);
2074
2075		/* Good Receive */
2076		skb_put(skb, length);
2077
2078		/* Receive Checksum Offload */
2079		ixgb_rx_checksum(adapter, rx_desc, skb);
2080
2081		skb->protocol = eth_type_trans(skb, netdev);
2082		if (status & IXGB_RX_DESC_STATUS_VP)
2083			__vlan_hwaccel_put_tag(skb,
2084					       le16_to_cpu(rx_desc->special));
2085
2086		netif_receive_skb(skb);
2087
2088rxdesc_done:
2089		/* clean up descriptor, might be written over by hw */
2090		rx_desc->status = 0;
2091
2092		/* return some buffers to hardware, one at a time is too slow */
2093		if (unlikely(cleaned_count >= IXGB_RX_BUFFER_WRITE)) {
2094			ixgb_alloc_rx_buffers(adapter, cleaned_count);
2095			cleaned_count = 0;
2096		}
2097
2098		/* use prefetched values */
2099		rx_desc = next_rxd;
2100		buffer_info = next_buffer;
2101	}
2102
2103	rx_ring->next_to_clean = i;
2104
2105	cleaned_count = IXGB_DESC_UNUSED(rx_ring);
2106	if (cleaned_count)
2107		ixgb_alloc_rx_buffers(adapter, cleaned_count);
2108
2109	return cleaned;
2110}
2111
2112/**
2113 * ixgb_alloc_rx_buffers - Replace used receive buffers
2114 * @adapter: address of board private structure
2115 **/
2116
2117static void
2118ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter, int cleaned_count)
2119{
2120	struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
2121	struct net_device *netdev = adapter->netdev;
2122	struct pci_dev *pdev = adapter->pdev;
2123	struct ixgb_rx_desc *rx_desc;
2124	struct ixgb_buffer *buffer_info;
2125	struct sk_buff *skb;
2126	unsigned int i;
2127	long cleancount;
2128
2129	i = rx_ring->next_to_use;
2130	buffer_info = &rx_ring->buffer_info[i];
2131	cleancount = IXGB_DESC_UNUSED(rx_ring);
2132
2133
2134	/* leave three descriptors unused */
2135	while (--cleancount > 2 && cleaned_count--) {
2136		/* recycle! its good for you */
2137		skb = buffer_info->skb;
2138		if (skb) {
2139			skb_trim(skb, 0);
2140			goto map_skb;
2141		}
2142
2143		skb = netdev_alloc_skb_ip_align(netdev, adapter->rx_buffer_len);
2144		if (unlikely(!skb)) {
2145			/* Better luck next round */
2146			adapter->alloc_rx_buff_failed++;
2147			break;
2148		}
2149
2150		buffer_info->skb = skb;
2151		buffer_info->length = adapter->rx_buffer_len;
2152map_skb:
2153		buffer_info->dma = dma_map_single(&pdev->dev,
2154		                                  skb->data,
2155		                                  adapter->rx_buffer_len,
2156						  DMA_FROM_DEVICE);
2157		if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
2158			adapter->alloc_rx_buff_failed++;
2159			break;
2160		}
2161
2162		rx_desc = IXGB_RX_DESC(*rx_ring, i);
2163		rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
2164		/* guarantee DD bit not set now before h/w gets descriptor
2165		 * this is the rest of the workaround for h/w double
2166		 * writeback. */
2167		rx_desc->status = 0;
2168
2169
2170		if (++i == rx_ring->count)
2171			i = 0;
2172		buffer_info = &rx_ring->buffer_info[i];
2173	}
2174
2175	if (likely(rx_ring->next_to_use != i)) {
2176		rx_ring->next_to_use = i;
2177		if (unlikely(i-- == 0))
2178			i = (rx_ring->count - 1);
2179
2180		/* Force memory writes to complete before letting h/w
2181		 * know there are new descriptors to fetch.  (Only
2182		 * applicable for weak-ordered memory model archs, such
2183		 * as IA-64). */
2184		wmb();
2185		IXGB_WRITE_REG(&adapter->hw, RDT, i);
2186	}
2187}
2188
2189static void
2190ixgb_vlan_strip_enable(struct ixgb_adapter *adapter)
2191{
2192	u32 ctrl;
2193
2194	/* enable VLAN tag insert/strip */
2195	ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2196	ctrl |= IXGB_CTRL0_VME;
2197	IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2198}
2199
2200static void
2201ixgb_vlan_strip_disable(struct ixgb_adapter *adapter)
2202{
2203	u32 ctrl;
2204
2205	/* disable VLAN tag insert/strip */
2206	ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2207	ctrl &= ~IXGB_CTRL0_VME;
2208	IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2209}
2210
2211static int
2212ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
2213{
2214	struct ixgb_adapter *adapter = netdev_priv(netdev);
2215	u32 vfta, index;
2216
2217	/* add VID to filter table */
2218
2219	index = (vid >> 5) & 0x7F;
2220	vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2221	vfta |= (1 << (vid & 0x1F));
2222	ixgb_write_vfta(&adapter->hw, index, vfta);
2223	set_bit(vid, adapter->active_vlans);
2224
2225	return 0;
2226}
2227
2228static int
2229ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2230{
2231	struct ixgb_adapter *adapter = netdev_priv(netdev);
2232	u32 vfta, index;
2233
2234	/* remove VID from filter table */
2235
2236	index = (vid >> 5) & 0x7F;
2237	vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2238	vfta &= ~(1 << (vid & 0x1F));
2239	ixgb_write_vfta(&adapter->hw, index, vfta);
2240	clear_bit(vid, adapter->active_vlans);
2241
2242	return 0;
2243}
2244
2245static void
2246ixgb_restore_vlan(struct ixgb_adapter *adapter)
2247{
2248	u16 vid;
2249
2250	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2251		ixgb_vlan_rx_add_vid(adapter->netdev, vid);
2252}
2253
2254#ifdef CONFIG_NET_POLL_CONTROLLER
2255/*
2256 * Polling 'interrupt' - used by things like netconsole to send skbs
2257 * without having to re-enable interrupts. It's not called while
2258 * the interrupt routine is executing.
2259 */
2260
2261static void ixgb_netpoll(struct net_device *dev)
2262{
2263	struct ixgb_adapter *adapter = netdev_priv(dev);
2264
2265	disable_irq(adapter->pdev->irq);
2266	ixgb_intr(adapter->pdev->irq, dev);
2267	enable_irq(adapter->pdev->irq);
2268}
2269#endif
2270
2271/**
2272 * ixgb_io_error_detected - called when PCI error is detected
2273 * @pdev:    pointer to pci device with error
2274 * @state:   pci channel state after error
2275 *
2276 * This callback is called by the PCI subsystem whenever
2277 * a PCI bus error is detected.
2278 */
2279static pci_ers_result_t ixgb_io_error_detected(struct pci_dev *pdev,
2280                                               enum pci_channel_state state)
2281{
2282	struct net_device *netdev = pci_get_drvdata(pdev);
2283	struct ixgb_adapter *adapter = netdev_priv(netdev);
2284
2285	netif_device_detach(netdev);
2286
2287	if (state == pci_channel_io_perm_failure)
2288		return PCI_ERS_RESULT_DISCONNECT;
2289
2290	if (netif_running(netdev))
2291		ixgb_down(adapter, true);
2292
2293	pci_disable_device(pdev);
2294
2295	/* Request a slot reset. */
2296	return PCI_ERS_RESULT_NEED_RESET;
2297}
2298
2299/**
2300 * ixgb_io_slot_reset - called after the pci bus has been reset.
2301 * @pdev    pointer to pci device with error
2302 *
2303 * This callback is called after the PCI bus has been reset.
2304 * Basically, this tries to restart the card from scratch.
2305 * This is a shortened version of the device probe/discovery code,
2306 * it resembles the first-half of the ixgb_probe() routine.
2307 */
2308static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev)
2309{
2310	struct net_device *netdev = pci_get_drvdata(pdev);
2311	struct ixgb_adapter *adapter = netdev_priv(netdev);
2312
2313	if (pci_enable_device(pdev)) {
2314		netif_err(adapter, probe, adapter->netdev,
2315			  "Cannot re-enable PCI device after reset\n");
2316		return PCI_ERS_RESULT_DISCONNECT;
2317	}
2318
2319	/* Perform card reset only on one instance of the card */
2320	if (0 != PCI_FUNC (pdev->devfn))
2321		return PCI_ERS_RESULT_RECOVERED;
2322
2323	pci_set_master(pdev);
2324
2325	netif_carrier_off(netdev);
2326	netif_stop_queue(netdev);
2327	ixgb_reset(adapter);
2328
2329	/* Make sure the EEPROM is good */
2330	if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
2331		netif_err(adapter, probe, adapter->netdev,
2332			  "After reset, the EEPROM checksum is not valid\n");
2333		return PCI_ERS_RESULT_DISCONNECT;
2334	}
2335	ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
2336	memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
2337
2338	if (!is_valid_ether_addr(netdev->perm_addr)) {
2339		netif_err(adapter, probe, adapter->netdev,
2340			  "After reset, invalid MAC address\n");
2341		return PCI_ERS_RESULT_DISCONNECT;
2342	}
2343
2344	return PCI_ERS_RESULT_RECOVERED;
2345}
2346
2347/**
2348 * ixgb_io_resume - called when its OK to resume normal operations
2349 * @pdev    pointer to pci device with error
2350 *
2351 * The error recovery driver tells us that its OK to resume
2352 * normal operation. Implementation resembles the second-half
2353 * of the ixgb_probe() routine.
2354 */
2355static void ixgb_io_resume(struct pci_dev *pdev)
2356{
2357	struct net_device *netdev = pci_get_drvdata(pdev);
2358	struct ixgb_adapter *adapter = netdev_priv(netdev);
2359
2360	pci_set_master(pdev);
2361
2362	if (netif_running(netdev)) {
2363		if (ixgb_up(adapter)) {
2364			pr_err("can't bring device back up after reset\n");
2365			return;
2366		}
2367	}
2368
2369	netif_device_attach(netdev);
2370	mod_timer(&adapter->watchdog_timer, jiffies);
2371}
2372
2373/* ixgb_main.c */
2374