ixgbevf_main.c revision bc0c715167c68ac2e737e221a80fc2a413f48155
1/*******************************************************************************
2
3  Intel 82599 Virtual Function driver
4  Copyright(c) 1999 - 2014 Intel Corporation.
5
6  This program is free software; you can redistribute it and/or modify it
7  under the terms and conditions of the GNU General Public License,
8  version 2, as published by the Free Software Foundation.
9
10  This program is distributed in the hope it will be useful, but WITHOUT
11  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  more details.
14
15  You should have received a copy of the GNU General Public License along with
16  this program; if not, write to the Free Software Foundation, Inc.,
17  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19  The full GNU General Public License is included in this distribution in
20  the file called "COPYING".
21
22  Contact Information:
23  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28
29/******************************************************************************
30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
31******************************************************************************/
32
33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
35#include <linux/types.h>
36#include <linux/bitops.h>
37#include <linux/module.h>
38#include <linux/pci.h>
39#include <linux/netdevice.h>
40#include <linux/vmalloc.h>
41#include <linux/string.h>
42#include <linux/in.h>
43#include <linux/ip.h>
44#include <linux/tcp.h>
45#include <linux/sctp.h>
46#include <linux/ipv6.h>
47#include <linux/slab.h>
48#include <net/checksum.h>
49#include <net/ip6_checksum.h>
50#include <linux/ethtool.h>
51#include <linux/if.h>
52#include <linux/if_vlan.h>
53#include <linux/prefetch.h>
54
55#include "ixgbevf.h"
56
57const char ixgbevf_driver_name[] = "ixgbevf";
58static const char ixgbevf_driver_string[] =
59	"Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
60
61#define DRV_VERSION "2.12.1-k"
62const char ixgbevf_driver_version[] = DRV_VERSION;
63static char ixgbevf_copyright[] =
64	"Copyright (c) 2009 - 2012 Intel Corporation.";
65
66static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
67	[board_82599_vf] = &ixgbevf_82599_vf_info,
68	[board_X540_vf]  = &ixgbevf_X540_vf_info,
69};
70
71/* ixgbevf_pci_tbl - PCI Device ID Table
72 *
73 * Wildcard entries (PCI_ANY_ID) should come last
74 * Last entry must be all 0s
75 *
76 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
77 *   Class, Class Mask, private data (not used) }
78 */
79static DEFINE_PCI_DEVICE_TABLE(ixgbevf_pci_tbl) = {
80	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
81	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
82	/* required last entry */
83	{0, }
84};
85MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
86
87MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
88MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
89MODULE_LICENSE("GPL");
90MODULE_VERSION(DRV_VERSION);
91
92#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
93static int debug = -1;
94module_param(debug, int, 0);
95MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
96
97/* forward decls */
98static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
99static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
100static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
101
102static void ixgbevf_remove_adapter(struct ixgbe_hw *hw)
103{
104	struct ixgbevf_adapter *adapter = hw->back;
105
106	if (!hw->hw_addr)
107		return;
108	hw->hw_addr = NULL;
109	dev_err(&adapter->pdev->dev, "Adapter removed\n");
110	schedule_work(&adapter->watchdog_task);
111}
112
113static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
114{
115	u32 value;
116
117	/* The following check not only optimizes a bit by not
118	 * performing a read on the status register when the
119	 * register just read was a status register read that
120	 * returned IXGBE_FAILED_READ_REG. It also blocks any
121	 * potential recursion.
122	 */
123	if (reg == IXGBE_VFSTATUS) {
124		ixgbevf_remove_adapter(hw);
125		return;
126	}
127	value = ixgbevf_read_reg(hw, IXGBE_VFSTATUS);
128	if (value == IXGBE_FAILED_READ_REG)
129		ixgbevf_remove_adapter(hw);
130}
131
132u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg)
133{
134	u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
135	u32 value;
136
137	if (IXGBE_REMOVED(reg_addr))
138		return IXGBE_FAILED_READ_REG;
139	value = readl(reg_addr + reg);
140	if (unlikely(value == IXGBE_FAILED_READ_REG))
141		ixgbevf_check_remove(hw, reg);
142	return value;
143}
144
145static inline void ixgbevf_release_rx_desc(struct ixgbevf_ring *rx_ring,
146					   u32 val)
147{
148	rx_ring->next_to_use = val;
149
150	/*
151	 * Force memory writes to complete before letting h/w
152	 * know there are new descriptors to fetch.  (Only
153	 * applicable for weak-ordered memory model archs,
154	 * such as IA-64).
155	 */
156	wmb();
157	ixgbevf_write_tail(rx_ring, val);
158}
159
160/**
161 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
162 * @adapter: pointer to adapter struct
163 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
164 * @queue: queue to map the corresponding interrupt to
165 * @msix_vector: the vector to map to the corresponding queue
166 */
167static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
168			     u8 queue, u8 msix_vector)
169{
170	u32 ivar, index;
171	struct ixgbe_hw *hw = &adapter->hw;
172	if (direction == -1) {
173		/* other causes */
174		msix_vector |= IXGBE_IVAR_ALLOC_VAL;
175		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
176		ivar &= ~0xFF;
177		ivar |= msix_vector;
178		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
179	} else {
180		/* tx or rx causes */
181		msix_vector |= IXGBE_IVAR_ALLOC_VAL;
182		index = ((16 * (queue & 1)) + (8 * direction));
183		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
184		ivar &= ~(0xFF << index);
185		ivar |= (msix_vector << index);
186		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
187	}
188}
189
190static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
191					struct ixgbevf_tx_buffer *tx_buffer)
192{
193	if (tx_buffer->skb) {
194		dev_kfree_skb_any(tx_buffer->skb);
195		if (dma_unmap_len(tx_buffer, len))
196			dma_unmap_single(tx_ring->dev,
197					 dma_unmap_addr(tx_buffer, dma),
198					 dma_unmap_len(tx_buffer, len),
199					 DMA_TO_DEVICE);
200	} else if (dma_unmap_len(tx_buffer, len)) {
201		dma_unmap_page(tx_ring->dev,
202			       dma_unmap_addr(tx_buffer, dma),
203			       dma_unmap_len(tx_buffer, len),
204			       DMA_TO_DEVICE);
205	}
206	tx_buffer->next_to_watch = NULL;
207	tx_buffer->skb = NULL;
208	dma_unmap_len_set(tx_buffer, len, 0);
209	/* tx_buffer must be completely set up in the transmit path */
210}
211
212#define IXGBE_MAX_TXD_PWR	14
213#define IXGBE_MAX_DATA_PER_TXD	(1 << IXGBE_MAX_TXD_PWR)
214
215/* Tx Descriptors needed, worst case */
216#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
217#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
218
219static void ixgbevf_tx_timeout(struct net_device *netdev);
220
221/**
222 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
223 * @q_vector: board private structure
224 * @tx_ring: tx ring to clean
225 **/
226static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
227				 struct ixgbevf_ring *tx_ring)
228{
229	struct ixgbevf_adapter *adapter = q_vector->adapter;
230	struct ixgbevf_tx_buffer *tx_buffer;
231	union ixgbe_adv_tx_desc *tx_desc;
232	unsigned int total_bytes = 0, total_packets = 0;
233	unsigned int budget = tx_ring->count / 2;
234	unsigned int i = tx_ring->next_to_clean;
235
236	if (test_bit(__IXGBEVF_DOWN, &adapter->state))
237		return true;
238
239	tx_buffer = &tx_ring->tx_buffer_info[i];
240	tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
241	i -= tx_ring->count;
242
243	do {
244		union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
245
246		/* if next_to_watch is not set then there is no work pending */
247		if (!eop_desc)
248			break;
249
250		/* prevent any other reads prior to eop_desc */
251		read_barrier_depends();
252
253		/* if DD is not set pending work has not been completed */
254		if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
255			break;
256
257		/* clear next_to_watch to prevent false hangs */
258		tx_buffer->next_to_watch = NULL;
259
260		/* update the statistics for this packet */
261		total_bytes += tx_buffer->bytecount;
262		total_packets += tx_buffer->gso_segs;
263
264		/* free the skb */
265		dev_kfree_skb_any(tx_buffer->skb);
266
267		/* unmap skb header data */
268		dma_unmap_single(tx_ring->dev,
269				 dma_unmap_addr(tx_buffer, dma),
270				 dma_unmap_len(tx_buffer, len),
271				 DMA_TO_DEVICE);
272
273		/* clear tx_buffer data */
274		tx_buffer->skb = NULL;
275		dma_unmap_len_set(tx_buffer, len, 0);
276
277		/* unmap remaining buffers */
278		while (tx_desc != eop_desc) {
279			tx_buffer++;
280			tx_desc++;
281			i++;
282			if (unlikely(!i)) {
283				i -= tx_ring->count;
284				tx_buffer = tx_ring->tx_buffer_info;
285				tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
286			}
287
288			/* unmap any remaining paged data */
289			if (dma_unmap_len(tx_buffer, len)) {
290				dma_unmap_page(tx_ring->dev,
291					       dma_unmap_addr(tx_buffer, dma),
292					       dma_unmap_len(tx_buffer, len),
293					       DMA_TO_DEVICE);
294				dma_unmap_len_set(tx_buffer, len, 0);
295			}
296		}
297
298		/* move us one more past the eop_desc for start of next pkt */
299		tx_buffer++;
300		tx_desc++;
301		i++;
302		if (unlikely(!i)) {
303			i -= tx_ring->count;
304			tx_buffer = tx_ring->tx_buffer_info;
305			tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
306		}
307
308		/* issue prefetch for next Tx descriptor */
309		prefetch(tx_desc);
310
311		/* update budget accounting */
312		budget--;
313	} while (likely(budget));
314
315	i += tx_ring->count;
316	tx_ring->next_to_clean = i;
317	u64_stats_update_begin(&tx_ring->syncp);
318	tx_ring->stats.bytes += total_bytes;
319	tx_ring->stats.packets += total_packets;
320	u64_stats_update_end(&tx_ring->syncp);
321	q_vector->tx.total_bytes += total_bytes;
322	q_vector->tx.total_packets += total_packets;
323
324#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
325	if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
326		     (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
327		/* Make sure that anybody stopping the queue after this
328		 * sees the new next_to_clean.
329		 */
330		smp_mb();
331
332		if (__netif_subqueue_stopped(tx_ring->netdev,
333					     tx_ring->queue_index) &&
334		    !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
335			netif_wake_subqueue(tx_ring->netdev,
336					    tx_ring->queue_index);
337			++tx_ring->tx_stats.restart_queue;
338		}
339	}
340
341	return !!budget;
342}
343
344/**
345 * ixgbevf_receive_skb - Send a completed packet up the stack
346 * @q_vector: structure containing interrupt and ring information
347 * @skb: packet to send up
348 * @status: hardware indication of status of receive
349 * @rx_desc: rx descriptor
350 **/
351static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
352				struct sk_buff *skb, u8 status,
353				union ixgbe_adv_rx_desc *rx_desc)
354{
355	struct ixgbevf_adapter *adapter = q_vector->adapter;
356	bool is_vlan = (status & IXGBE_RXD_STAT_VP);
357	u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
358
359	if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
360		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
361
362	if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
363		napi_gro_receive(&q_vector->napi, skb);
364	else
365		netif_rx(skb);
366}
367
368/**
369 * ixgbevf_rx_skb - Helper function to determine proper Rx method
370 * @q_vector: structure containing interrupt and ring information
371 * @skb: packet to send up
372 * @status: hardware indication of status of receive
373 * @rx_desc: rx descriptor
374 **/
375static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
376			   struct sk_buff *skb, u8 status,
377			   union ixgbe_adv_rx_desc *rx_desc)
378{
379#ifdef CONFIG_NET_RX_BUSY_POLL
380	skb_mark_napi_id(skb, &q_vector->napi);
381
382	if (ixgbevf_qv_busy_polling(q_vector)) {
383		netif_receive_skb(skb);
384		/* exit early if we busy polled */
385		return;
386	}
387#endif /* CONFIG_NET_RX_BUSY_POLL */
388
389	ixgbevf_receive_skb(q_vector, skb, status, rx_desc);
390}
391
392/**
393 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
394 * @ring: pointer to Rx descriptor ring structure
395 * @status_err: hardware indication of status of receive
396 * @skb: skb currently being received and modified
397 **/
398static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
399				       u32 status_err, struct sk_buff *skb)
400{
401	skb_checksum_none_assert(skb);
402
403	/* Rx csum disabled */
404	if (!(ring->netdev->features & NETIF_F_RXCSUM))
405		return;
406
407	/* if IP and error */
408	if ((status_err & IXGBE_RXD_STAT_IPCS) &&
409	    (status_err & IXGBE_RXDADV_ERR_IPE)) {
410		ring->rx_stats.csum_err++;
411		return;
412	}
413
414	if (!(status_err & IXGBE_RXD_STAT_L4CS))
415		return;
416
417	if (status_err & IXGBE_RXDADV_ERR_TCPE) {
418		ring->rx_stats.csum_err++;
419		return;
420	}
421
422	/* It must be a TCP or UDP packet with a valid checksum */
423	skb->ip_summed = CHECKSUM_UNNECESSARY;
424}
425
426/**
427 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
428 * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on
429 **/
430static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
431				     int cleaned_count)
432{
433	union ixgbe_adv_rx_desc *rx_desc;
434	struct ixgbevf_rx_buffer *bi;
435	unsigned int i = rx_ring->next_to_use;
436
437	while (cleaned_count--) {
438		rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
439		bi = &rx_ring->rx_buffer_info[i];
440
441		if (!bi->skb) {
442			struct sk_buff *skb;
443
444			skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
445							rx_ring->rx_buf_len);
446			if (!skb)
447				goto no_buffers;
448
449			bi->skb = skb;
450
451			bi->dma = dma_map_single(rx_ring->dev, skb->data,
452						 rx_ring->rx_buf_len,
453						 DMA_FROM_DEVICE);
454			if (dma_mapping_error(rx_ring->dev, bi->dma)) {
455				dev_kfree_skb(skb);
456				bi->skb = NULL;
457				dev_err(rx_ring->dev, "Rx DMA map failed\n");
458				break;
459			}
460		}
461		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
462
463		i++;
464		if (i == rx_ring->count)
465			i = 0;
466	}
467
468no_buffers:
469	rx_ring->rx_stats.alloc_rx_buff_failed++;
470	if (rx_ring->next_to_use != i)
471		ixgbevf_release_rx_desc(rx_ring, i);
472}
473
474static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
475					     u32 qmask)
476{
477	struct ixgbe_hw *hw = &adapter->hw;
478
479	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
480}
481
482static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
483				struct ixgbevf_ring *rx_ring,
484				int budget)
485{
486	union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
487	struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
488	struct sk_buff *skb;
489	unsigned int i;
490	u32 len, staterr;
491	int cleaned_count = 0;
492	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
493
494	i = rx_ring->next_to_clean;
495	rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
496	staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
497	rx_buffer_info = &rx_ring->rx_buffer_info[i];
498
499	while (staterr & IXGBE_RXD_STAT_DD) {
500		if (!budget)
501			break;
502		budget--;
503
504		rmb(); /* read descriptor and rx_buffer_info after status DD */
505		len = le16_to_cpu(rx_desc->wb.upper.length);
506		skb = rx_buffer_info->skb;
507		prefetch(skb->data - NET_IP_ALIGN);
508		rx_buffer_info->skb = NULL;
509
510		if (rx_buffer_info->dma) {
511			dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
512					 rx_ring->rx_buf_len,
513					 DMA_FROM_DEVICE);
514			rx_buffer_info->dma = 0;
515			skb_put(skb, len);
516		}
517
518		i++;
519		if (i == rx_ring->count)
520			i = 0;
521
522		next_rxd = IXGBEVF_RX_DESC(rx_ring, i);
523		prefetch(next_rxd);
524		cleaned_count++;
525
526		next_buffer = &rx_ring->rx_buffer_info[i];
527
528		if (!(staterr & IXGBE_RXD_STAT_EOP)) {
529			skb->next = next_buffer->skb;
530			IXGBE_CB(skb->next)->prev = skb;
531			rx_ring->rx_stats.non_eop_descs++;
532			goto next_desc;
533		}
534
535		/* we should not be chaining buffers, if we did drop the skb */
536		if (IXGBE_CB(skb)->prev) {
537			do {
538				struct sk_buff *this = skb;
539				skb = IXGBE_CB(skb)->prev;
540				dev_kfree_skb(this);
541			} while (skb);
542			goto next_desc;
543		}
544
545		/* ERR_MASK will only have valid bits if EOP set */
546		if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
547			dev_kfree_skb_irq(skb);
548			goto next_desc;
549		}
550
551		ixgbevf_rx_checksum(rx_ring, staterr, skb);
552
553		/* probably a little skewed due to removing CRC */
554		total_rx_bytes += skb->len;
555		total_rx_packets++;
556
557		skb->protocol = eth_type_trans(skb, rx_ring->netdev);
558
559		/* Workaround hardware that can't do proper VEPA multicast
560		 * source pruning.
561		 */
562		if ((skb->pkt_type == PACKET_BROADCAST ||
563		    skb->pkt_type == PACKET_MULTICAST) &&
564		    ether_addr_equal(rx_ring->netdev->dev_addr,
565				     eth_hdr(skb)->h_source)) {
566			dev_kfree_skb_irq(skb);
567			goto next_desc;
568		}
569
570		ixgbevf_rx_skb(q_vector, skb, staterr, rx_desc);
571
572next_desc:
573		rx_desc->wb.upper.status_error = 0;
574
575		/* return some buffers to hardware, one at a time is too slow */
576		if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
577			ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
578			cleaned_count = 0;
579		}
580
581		/* use prefetched values */
582		rx_desc = next_rxd;
583		rx_buffer_info = &rx_ring->rx_buffer_info[i];
584
585		staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
586	}
587
588	rx_ring->next_to_clean = i;
589	cleaned_count = ixgbevf_desc_unused(rx_ring);
590
591	if (cleaned_count)
592		ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
593
594	u64_stats_update_begin(&rx_ring->syncp);
595	rx_ring->stats.packets += total_rx_packets;
596	rx_ring->stats.bytes += total_rx_bytes;
597	u64_stats_update_end(&rx_ring->syncp);
598	q_vector->rx.total_packets += total_rx_packets;
599	q_vector->rx.total_bytes += total_rx_bytes;
600
601	return total_rx_packets;
602}
603
604/**
605 * ixgbevf_poll - NAPI polling calback
606 * @napi: napi struct with our devices info in it
607 * @budget: amount of work driver is allowed to do this pass, in packets
608 *
609 * This function will clean more than one or more rings associated with a
610 * q_vector.
611 **/
612static int ixgbevf_poll(struct napi_struct *napi, int budget)
613{
614	struct ixgbevf_q_vector *q_vector =
615		container_of(napi, struct ixgbevf_q_vector, napi);
616	struct ixgbevf_adapter *adapter = q_vector->adapter;
617	struct ixgbevf_ring *ring;
618	int per_ring_budget;
619	bool clean_complete = true;
620
621	ixgbevf_for_each_ring(ring, q_vector->tx)
622		clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
623
624#ifdef CONFIG_NET_RX_BUSY_POLL
625	if (!ixgbevf_qv_lock_napi(q_vector))
626		return budget;
627#endif
628
629	/* attempt to distribute budget to each queue fairly, but don't allow
630	 * the budget to go below 1 because we'll exit polling */
631	if (q_vector->rx.count > 1)
632		per_ring_budget = max(budget/q_vector->rx.count, 1);
633	else
634		per_ring_budget = budget;
635
636	adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
637	ixgbevf_for_each_ring(ring, q_vector->rx)
638		clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring,
639							per_ring_budget)
640				   < per_ring_budget);
641	adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
642
643#ifdef CONFIG_NET_RX_BUSY_POLL
644	ixgbevf_qv_unlock_napi(q_vector);
645#endif
646
647	/* If all work not completed, return budget and keep polling */
648	if (!clean_complete)
649		return budget;
650	/* all work done, exit the polling mode */
651	napi_complete(napi);
652	if (adapter->rx_itr_setting & 1)
653		ixgbevf_set_itr(q_vector);
654	if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
655	    !test_bit(__IXGBEVF_REMOVING, &adapter->state))
656		ixgbevf_irq_enable_queues(adapter,
657					  1 << q_vector->v_idx);
658
659	return 0;
660}
661
662/**
663 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
664 * @q_vector: structure containing interrupt and ring information
665 */
666void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
667{
668	struct ixgbevf_adapter *adapter = q_vector->adapter;
669	struct ixgbe_hw *hw = &adapter->hw;
670	int v_idx = q_vector->v_idx;
671	u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
672
673	/*
674	 * set the WDIS bit to not clear the timer bits and cause an
675	 * immediate assertion of the interrupt
676	 */
677	itr_reg |= IXGBE_EITR_CNT_WDIS;
678
679	IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
680}
681
682#ifdef CONFIG_NET_RX_BUSY_POLL
683/* must be called with local_bh_disable()d */
684static int ixgbevf_busy_poll_recv(struct napi_struct *napi)
685{
686	struct ixgbevf_q_vector *q_vector =
687			container_of(napi, struct ixgbevf_q_vector, napi);
688	struct ixgbevf_adapter *adapter = q_vector->adapter;
689	struct ixgbevf_ring  *ring;
690	int found = 0;
691
692	if (test_bit(__IXGBEVF_DOWN, &adapter->state))
693		return LL_FLUSH_FAILED;
694
695	if (!ixgbevf_qv_lock_poll(q_vector))
696		return LL_FLUSH_BUSY;
697
698	ixgbevf_for_each_ring(ring, q_vector->rx) {
699		found = ixgbevf_clean_rx_irq(q_vector, ring, 4);
700#ifdef BP_EXTENDED_STATS
701		if (found)
702			ring->stats.cleaned += found;
703		else
704			ring->stats.misses++;
705#endif
706		if (found)
707			break;
708	}
709
710	ixgbevf_qv_unlock_poll(q_vector);
711
712	return found;
713}
714#endif /* CONFIG_NET_RX_BUSY_POLL */
715
716/**
717 * ixgbevf_configure_msix - Configure MSI-X hardware
718 * @adapter: board private structure
719 *
720 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
721 * interrupts.
722 **/
723static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
724{
725	struct ixgbevf_q_vector *q_vector;
726	int q_vectors, v_idx;
727
728	q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
729	adapter->eims_enable_mask = 0;
730
731	/*
732	 * Populate the IVAR table and set the ITR values to the
733	 * corresponding register.
734	 */
735	for (v_idx = 0; v_idx < q_vectors; v_idx++) {
736		struct ixgbevf_ring *ring;
737		q_vector = adapter->q_vector[v_idx];
738
739		ixgbevf_for_each_ring(ring, q_vector->rx)
740			ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
741
742		ixgbevf_for_each_ring(ring, q_vector->tx)
743			ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
744
745		if (q_vector->tx.ring && !q_vector->rx.ring) {
746			/* tx only vector */
747			if (adapter->tx_itr_setting == 1)
748				q_vector->itr = IXGBE_10K_ITR;
749			else
750				q_vector->itr = adapter->tx_itr_setting;
751		} else {
752			/* rx or rx/tx vector */
753			if (adapter->rx_itr_setting == 1)
754				q_vector->itr = IXGBE_20K_ITR;
755			else
756				q_vector->itr = adapter->rx_itr_setting;
757		}
758
759		/* add q_vector eims value to global eims_enable_mask */
760		adapter->eims_enable_mask |= 1 << v_idx;
761
762		ixgbevf_write_eitr(q_vector);
763	}
764
765	ixgbevf_set_ivar(adapter, -1, 1, v_idx);
766	/* setup eims_other and add value to global eims_enable_mask */
767	adapter->eims_other = 1 << v_idx;
768	adapter->eims_enable_mask |= adapter->eims_other;
769}
770
771enum latency_range {
772	lowest_latency = 0,
773	low_latency = 1,
774	bulk_latency = 2,
775	latency_invalid = 255
776};
777
778/**
779 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
780 * @q_vector: structure containing interrupt and ring information
781 * @ring_container: structure containing ring performance data
782 *
783 *      Stores a new ITR value based on packets and byte
784 *      counts during the last interrupt.  The advantage of per interrupt
785 *      computation is faster updates and more accurate ITR for the current
786 *      traffic pattern.  Constants in this function were computed
787 *      based on theoretical maximum wire speed and thresholds were set based
788 *      on testing data as well as attempting to minimize response time
789 *      while increasing bulk throughput.
790 **/
791static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
792			       struct ixgbevf_ring_container *ring_container)
793{
794	int bytes = ring_container->total_bytes;
795	int packets = ring_container->total_packets;
796	u32 timepassed_us;
797	u64 bytes_perint;
798	u8 itr_setting = ring_container->itr;
799
800	if (packets == 0)
801		return;
802
803	/* simple throttlerate management
804	 *    0-20MB/s lowest (100000 ints/s)
805	 *   20-100MB/s low   (20000 ints/s)
806	 *  100-1249MB/s bulk (8000 ints/s)
807	 */
808	/* what was last interrupt timeslice? */
809	timepassed_us = q_vector->itr >> 2;
810	bytes_perint = bytes / timepassed_us; /* bytes/usec */
811
812	switch (itr_setting) {
813	case lowest_latency:
814		if (bytes_perint > 10)
815			itr_setting = low_latency;
816		break;
817	case low_latency:
818		if (bytes_perint > 20)
819			itr_setting = bulk_latency;
820		else if (bytes_perint <= 10)
821			itr_setting = lowest_latency;
822		break;
823	case bulk_latency:
824		if (bytes_perint <= 20)
825			itr_setting = low_latency;
826		break;
827	}
828
829	/* clear work counters since we have the values we need */
830	ring_container->total_bytes = 0;
831	ring_container->total_packets = 0;
832
833	/* write updated itr to ring container */
834	ring_container->itr = itr_setting;
835}
836
837static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
838{
839	u32 new_itr = q_vector->itr;
840	u8 current_itr;
841
842	ixgbevf_update_itr(q_vector, &q_vector->tx);
843	ixgbevf_update_itr(q_vector, &q_vector->rx);
844
845	current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
846
847	switch (current_itr) {
848	/* counts and packets in update_itr are dependent on these numbers */
849	case lowest_latency:
850		new_itr = IXGBE_100K_ITR;
851		break;
852	case low_latency:
853		new_itr = IXGBE_20K_ITR;
854		break;
855	case bulk_latency:
856	default:
857		new_itr = IXGBE_8K_ITR;
858		break;
859	}
860
861	if (new_itr != q_vector->itr) {
862		/* do an exponential smoothing */
863		new_itr = (10 * new_itr * q_vector->itr) /
864			  ((9 * new_itr) + q_vector->itr);
865
866		/* save the algorithm value here */
867		q_vector->itr = new_itr;
868
869		ixgbevf_write_eitr(q_vector);
870	}
871}
872
873static irqreturn_t ixgbevf_msix_other(int irq, void *data)
874{
875	struct ixgbevf_adapter *adapter = data;
876	struct ixgbe_hw *hw = &adapter->hw;
877
878	hw->mac.get_link_status = 1;
879
880	if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
881	    !test_bit(__IXGBEVF_REMOVING, &adapter->state))
882		mod_timer(&adapter->watchdog_timer, jiffies);
883
884	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
885
886	return IRQ_HANDLED;
887}
888
889/**
890 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
891 * @irq: unused
892 * @data: pointer to our q_vector struct for this interrupt vector
893 **/
894static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
895{
896	struct ixgbevf_q_vector *q_vector = data;
897
898	/* EIAM disabled interrupts (on this vector) for us */
899	if (q_vector->rx.ring || q_vector->tx.ring)
900		napi_schedule(&q_vector->napi);
901
902	return IRQ_HANDLED;
903}
904
905static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
906				     int r_idx)
907{
908	struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
909
910	a->rx_ring[r_idx]->next = q_vector->rx.ring;
911	q_vector->rx.ring = a->rx_ring[r_idx];
912	q_vector->rx.count++;
913}
914
915static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
916				     int t_idx)
917{
918	struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
919
920	a->tx_ring[t_idx]->next = q_vector->tx.ring;
921	q_vector->tx.ring = a->tx_ring[t_idx];
922	q_vector->tx.count++;
923}
924
925/**
926 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
927 * @adapter: board private structure to initialize
928 *
929 * This function maps descriptor rings to the queue-specific vectors
930 * we were allotted through the MSI-X enabling code.  Ideally, we'd have
931 * one vector per ring/queue, but on a constrained vector budget, we
932 * group the rings as "efficiently" as possible.  You would add new
933 * mapping configurations in here.
934 **/
935static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
936{
937	int q_vectors;
938	int v_start = 0;
939	int rxr_idx = 0, txr_idx = 0;
940	int rxr_remaining = adapter->num_rx_queues;
941	int txr_remaining = adapter->num_tx_queues;
942	int i, j;
943	int rqpv, tqpv;
944	int err = 0;
945
946	q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
947
948	/*
949	 * The ideal configuration...
950	 * We have enough vectors to map one per queue.
951	 */
952	if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
953		for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
954			map_vector_to_rxq(adapter, v_start, rxr_idx);
955
956		for (; txr_idx < txr_remaining; v_start++, txr_idx++)
957			map_vector_to_txq(adapter, v_start, txr_idx);
958		goto out;
959	}
960
961	/*
962	 * If we don't have enough vectors for a 1-to-1
963	 * mapping, we'll have to group them so there are
964	 * multiple queues per vector.
965	 */
966	/* Re-adjusting *qpv takes care of the remainder. */
967	for (i = v_start; i < q_vectors; i++) {
968		rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
969		for (j = 0; j < rqpv; j++) {
970			map_vector_to_rxq(adapter, i, rxr_idx);
971			rxr_idx++;
972			rxr_remaining--;
973		}
974	}
975	for (i = v_start; i < q_vectors; i++) {
976		tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
977		for (j = 0; j < tqpv; j++) {
978			map_vector_to_txq(adapter, i, txr_idx);
979			txr_idx++;
980			txr_remaining--;
981		}
982	}
983
984out:
985	return err;
986}
987
988/**
989 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
990 * @adapter: board private structure
991 *
992 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
993 * interrupts from the kernel.
994 **/
995static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
996{
997	struct net_device *netdev = adapter->netdev;
998	int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
999	int vector, err;
1000	int ri = 0, ti = 0;
1001
1002	for (vector = 0; vector < q_vectors; vector++) {
1003		struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
1004		struct msix_entry *entry = &adapter->msix_entries[vector];
1005
1006		if (q_vector->tx.ring && q_vector->rx.ring) {
1007			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1008				 "%s-%s-%d", netdev->name, "TxRx", ri++);
1009			ti++;
1010		} else if (q_vector->rx.ring) {
1011			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1012				 "%s-%s-%d", netdev->name, "rx", ri++);
1013		} else if (q_vector->tx.ring) {
1014			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1015				 "%s-%s-%d", netdev->name, "tx", ti++);
1016		} else {
1017			/* skip this unused q_vector */
1018			continue;
1019		}
1020		err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
1021				  q_vector->name, q_vector);
1022		if (err) {
1023			hw_dbg(&adapter->hw,
1024			       "request_irq failed for MSIX interrupt "
1025			       "Error: %d\n", err);
1026			goto free_queue_irqs;
1027		}
1028	}
1029
1030	err = request_irq(adapter->msix_entries[vector].vector,
1031			  &ixgbevf_msix_other, 0, netdev->name, adapter);
1032	if (err) {
1033		hw_dbg(&adapter->hw,
1034		       "request_irq for msix_other failed: %d\n", err);
1035		goto free_queue_irqs;
1036	}
1037
1038	return 0;
1039
1040free_queue_irqs:
1041	while (vector) {
1042		vector--;
1043		free_irq(adapter->msix_entries[vector].vector,
1044			 adapter->q_vector[vector]);
1045	}
1046	/* This failure is non-recoverable - it indicates the system is
1047	 * out of MSIX vector resources and the VF driver cannot run
1048	 * without them.  Set the number of msix vectors to zero
1049	 * indicating that not enough can be allocated.  The error
1050	 * will be returned to the user indicating device open failed.
1051	 * Any further attempts to force the driver to open will also
1052	 * fail.  The only way to recover is to unload the driver and
1053	 * reload it again.  If the system has recovered some MSIX
1054	 * vectors then it may succeed.
1055	 */
1056	adapter->num_msix_vectors = 0;
1057	return err;
1058}
1059
1060static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
1061{
1062	int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1063
1064	for (i = 0; i < q_vectors; i++) {
1065		struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
1066		q_vector->rx.ring = NULL;
1067		q_vector->tx.ring = NULL;
1068		q_vector->rx.count = 0;
1069		q_vector->tx.count = 0;
1070	}
1071}
1072
1073/**
1074 * ixgbevf_request_irq - initialize interrupts
1075 * @adapter: board private structure
1076 *
1077 * Attempts to configure interrupts using the best available
1078 * capabilities of the hardware and kernel.
1079 **/
1080static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
1081{
1082	int err = 0;
1083
1084	err = ixgbevf_request_msix_irqs(adapter);
1085
1086	if (err)
1087		hw_dbg(&adapter->hw,
1088		       "request_irq failed, Error %d\n", err);
1089
1090	return err;
1091}
1092
1093static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1094{
1095	int i, q_vectors;
1096
1097	q_vectors = adapter->num_msix_vectors;
1098	i = q_vectors - 1;
1099
1100	free_irq(adapter->msix_entries[i].vector, adapter);
1101	i--;
1102
1103	for (; i >= 0; i--) {
1104		/* free only the irqs that were actually requested */
1105		if (!adapter->q_vector[i]->rx.ring &&
1106		    !adapter->q_vector[i]->tx.ring)
1107			continue;
1108
1109		free_irq(adapter->msix_entries[i].vector,
1110			 adapter->q_vector[i]);
1111	}
1112
1113	ixgbevf_reset_q_vectors(adapter);
1114}
1115
1116/**
1117 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1118 * @adapter: board private structure
1119 **/
1120static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1121{
1122	struct ixgbe_hw *hw = &adapter->hw;
1123	int i;
1124
1125	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
1126	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
1127	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
1128
1129	IXGBE_WRITE_FLUSH(hw);
1130
1131	for (i = 0; i < adapter->num_msix_vectors; i++)
1132		synchronize_irq(adapter->msix_entries[i].vector);
1133}
1134
1135/**
1136 * ixgbevf_irq_enable - Enable default interrupt generation settings
1137 * @adapter: board private structure
1138 **/
1139static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
1140{
1141	struct ixgbe_hw *hw = &adapter->hw;
1142
1143	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1144	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1145	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
1146}
1147
1148/**
1149 * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset
1150 * @adapter: board private structure
1151 * @ring: structure containing ring specific data
1152 *
1153 * Configure the Tx descriptor ring after a reset.
1154 **/
1155static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
1156				      struct ixgbevf_ring *ring)
1157{
1158	struct ixgbe_hw *hw = &adapter->hw;
1159	u64 tdba = ring->dma;
1160	int wait_loop = 10;
1161	u32 txdctl = IXGBE_TXDCTL_ENABLE;
1162	u8 reg_idx = ring->reg_idx;
1163
1164	/* disable queue to avoid issues while updating state */
1165	IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
1166	IXGBE_WRITE_FLUSH(hw);
1167
1168	IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
1169	IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32);
1170	IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx),
1171			ring->count * sizeof(union ixgbe_adv_tx_desc));
1172
1173	/* disable head writeback */
1174	IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0);
1175	IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0);
1176
1177	/* enable relaxed ordering */
1178	IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx),
1179			(IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1180			 IXGBE_DCA_TXCTRL_DATA_RRO_EN));
1181
1182	/* reset head and tail pointers */
1183	IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0);
1184	IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0);
1185	ring->tail = adapter->io_addr + IXGBE_VFTDT(reg_idx);
1186
1187	/* reset ntu and ntc to place SW in sync with hardwdare */
1188	ring->next_to_clean = 0;
1189	ring->next_to_use = 0;
1190
1191	/* In order to avoid issues WTHRESH + PTHRESH should always be equal
1192	 * to or less than the number of on chip descriptors, which is
1193	 * currently 40.
1194	 */
1195	txdctl |= (8 << 16);    /* WTHRESH = 8 */
1196
1197	/* Setting PTHRESH to 32 both improves performance */
1198	txdctl |= (1 << 8) |    /* HTHRESH = 1 */
1199		  32;          /* PTHRESH = 32 */
1200
1201	IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
1202
1203	/* poll to verify queue is enabled */
1204	do {
1205		usleep_range(1000, 2000);
1206		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx));
1207	}  while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
1208	if (!wait_loop)
1209		pr_err("Could not enable Tx Queue %d\n", reg_idx);
1210}
1211
1212/**
1213 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1214 * @adapter: board private structure
1215 *
1216 * Configure the Tx unit of the MAC after a reset.
1217 **/
1218static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1219{
1220	u32 i;
1221
1222	/* Setup the HW Tx Head and Tail descriptor pointers */
1223	for (i = 0; i < adapter->num_tx_queues; i++)
1224		ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]);
1225}
1226
1227#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT	2
1228
1229static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1230{
1231	struct ixgbevf_ring *rx_ring;
1232	struct ixgbe_hw *hw = &adapter->hw;
1233	u32 srrctl;
1234
1235	rx_ring = adapter->rx_ring[index];
1236
1237	srrctl = IXGBE_SRRCTL_DROP_EN;
1238
1239	srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1240
1241	srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
1242		  IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1243
1244	IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1245}
1246
1247static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
1248{
1249	struct ixgbe_hw *hw = &adapter->hw;
1250
1251	/* PSRTYPE must be initialized in 82599 */
1252	u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1253		      IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1254		      IXGBE_PSRTYPE_L2HDR;
1255
1256	if (adapter->num_rx_queues > 1)
1257		psrtype |= 1 << 29;
1258
1259	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1260}
1261
1262static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
1263{
1264	struct ixgbe_hw *hw = &adapter->hw;
1265	struct net_device *netdev = adapter->netdev;
1266	int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1267	int i;
1268	u16 rx_buf_len;
1269
1270	/* notify the PF of our intent to use this size of frame */
1271	ixgbevf_rlpml_set_vf(hw, max_frame);
1272
1273	/* PF will allow an extra 4 bytes past for vlan tagged frames */
1274	max_frame += VLAN_HLEN;
1275
1276	/*
1277	 * Allocate buffer sizes that fit well into 32K and
1278	 * take into account max frame size of 9.5K
1279	 */
1280	if ((hw->mac.type == ixgbe_mac_X540_vf) &&
1281	    (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE))
1282		rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1283	else if (max_frame <= IXGBEVF_RXBUFFER_2K)
1284		rx_buf_len = IXGBEVF_RXBUFFER_2K;
1285	else if (max_frame <= IXGBEVF_RXBUFFER_4K)
1286		rx_buf_len = IXGBEVF_RXBUFFER_4K;
1287	else if (max_frame <= IXGBEVF_RXBUFFER_8K)
1288		rx_buf_len = IXGBEVF_RXBUFFER_8K;
1289	else
1290		rx_buf_len = IXGBEVF_RXBUFFER_10K;
1291
1292	for (i = 0; i < adapter->num_rx_queues; i++)
1293		adapter->rx_ring[i]->rx_buf_len = rx_buf_len;
1294}
1295
1296#define IXGBEVF_MAX_RX_DESC_POLL 10
1297static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
1298				     struct ixgbevf_ring *ring)
1299{
1300	struct ixgbe_hw *hw = &adapter->hw;
1301	int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1302	u32 rxdctl;
1303	u8 reg_idx = ring->reg_idx;
1304
1305	if (IXGBE_REMOVED(hw->hw_addr))
1306		return;
1307	rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1308	rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1309
1310	/* write value back with RXDCTL.ENABLE bit cleared */
1311	IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1312
1313	/* the hardware may take up to 100us to really disable the rx queue */
1314	do {
1315		udelay(10);
1316		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1317	} while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
1318
1319	if (!wait_loop)
1320		pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
1321		       reg_idx);
1322}
1323
1324static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1325					 struct ixgbevf_ring *ring)
1326{
1327	struct ixgbe_hw *hw = &adapter->hw;
1328	int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1329	u32 rxdctl;
1330	u8 reg_idx = ring->reg_idx;
1331
1332	if (IXGBE_REMOVED(hw->hw_addr))
1333		return;
1334	do {
1335		usleep_range(1000, 2000);
1336		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1337	} while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
1338
1339	if (!wait_loop)
1340		pr_err("RXDCTL.ENABLE queue %d not set while polling\n",
1341		       reg_idx);
1342}
1343
1344static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
1345				      struct ixgbevf_ring *ring)
1346{
1347	struct ixgbe_hw *hw = &adapter->hw;
1348	u64 rdba = ring->dma;
1349	u32 rxdctl;
1350	u8 reg_idx = ring->reg_idx;
1351
1352	/* disable queue to avoid issues while updating state */
1353	rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1354	ixgbevf_disable_rx_queue(adapter, ring);
1355
1356	IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
1357	IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32);
1358	IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
1359			ring->count * sizeof(union ixgbe_adv_rx_desc));
1360
1361	/* enable relaxed ordering */
1362	IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
1363			IXGBE_DCA_RXCTRL_DESC_RRO_EN);
1364
1365	/* reset head and tail pointers */
1366	IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
1367	IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
1368	ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx);
1369
1370	/* reset ntu and ntc to place SW in sync with hardwdare */
1371	ring->next_to_clean = 0;
1372	ring->next_to_use = 0;
1373
1374	ixgbevf_configure_srrctl(adapter, reg_idx);
1375
1376	/* prevent DMA from exceeding buffer space available */
1377	rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
1378	rxdctl |= ring->rx_buf_len | IXGBE_RXDCTL_RLPML_EN;
1379	rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1380	IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1381
1382	ixgbevf_rx_desc_queue_enable(adapter, ring);
1383	ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring));
1384}
1385
1386/**
1387 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1388 * @adapter: board private structure
1389 *
1390 * Configure the Rx unit of the MAC after a reset.
1391 **/
1392static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1393{
1394	int i;
1395
1396	ixgbevf_setup_psrtype(adapter);
1397
1398	/* set_rx_buffer_len must be called before ring initialization */
1399	ixgbevf_set_rx_buffer_len(adapter);
1400
1401	/* Setup the HW Rx Head and Tail Descriptor Pointers and
1402	 * the Base and Length of the Rx Descriptor Ring */
1403	for (i = 0; i < adapter->num_rx_queues; i++)
1404		ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]);
1405}
1406
1407static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
1408				   __be16 proto, u16 vid)
1409{
1410	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1411	struct ixgbe_hw *hw = &adapter->hw;
1412	int err;
1413
1414	spin_lock_bh(&adapter->mbx_lock);
1415
1416	/* add VID to filter table */
1417	err = hw->mac.ops.set_vfta(hw, vid, 0, true);
1418
1419	spin_unlock_bh(&adapter->mbx_lock);
1420
1421	/* translate error return types so error makes sense */
1422	if (err == IXGBE_ERR_MBX)
1423		return -EIO;
1424
1425	if (err == IXGBE_ERR_INVALID_ARGUMENT)
1426		return -EACCES;
1427
1428	set_bit(vid, adapter->active_vlans);
1429
1430	return err;
1431}
1432
1433static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,
1434				    __be16 proto, u16 vid)
1435{
1436	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1437	struct ixgbe_hw *hw = &adapter->hw;
1438	int err = -EOPNOTSUPP;
1439
1440	spin_lock_bh(&adapter->mbx_lock);
1441
1442	/* remove VID from filter table */
1443	err = hw->mac.ops.set_vfta(hw, vid, 0, false);
1444
1445	spin_unlock_bh(&adapter->mbx_lock);
1446
1447	clear_bit(vid, adapter->active_vlans);
1448
1449	return err;
1450}
1451
1452static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1453{
1454	u16 vid;
1455
1456	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1457		ixgbevf_vlan_rx_add_vid(adapter->netdev,
1458					htons(ETH_P_8021Q), vid);
1459}
1460
1461static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
1462{
1463	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1464	struct ixgbe_hw *hw = &adapter->hw;
1465	int count = 0;
1466
1467	if ((netdev_uc_count(netdev)) > 10) {
1468		pr_err("Too many unicast filters - No Space\n");
1469		return -ENOSPC;
1470	}
1471
1472	if (!netdev_uc_empty(netdev)) {
1473		struct netdev_hw_addr *ha;
1474		netdev_for_each_uc_addr(ha, netdev) {
1475			hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
1476			udelay(200);
1477		}
1478	} else {
1479		/*
1480		 * If the list is empty then send message to PF driver to
1481		 * clear all macvlans on this VF.
1482		 */
1483		hw->mac.ops.set_uc_addr(hw, 0, NULL);
1484	}
1485
1486	return count;
1487}
1488
1489/**
1490 * ixgbevf_set_rx_mode - Multicast and unicast set
1491 * @netdev: network interface device structure
1492 *
1493 * The set_rx_method entry point is called whenever the multicast address
1494 * list, unicast address list or the network interface flags are updated.
1495 * This routine is responsible for configuring the hardware for proper
1496 * multicast mode and configuring requested unicast filters.
1497 **/
1498static void ixgbevf_set_rx_mode(struct net_device *netdev)
1499{
1500	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1501	struct ixgbe_hw *hw = &adapter->hw;
1502
1503	spin_lock_bh(&adapter->mbx_lock);
1504
1505	/* reprogram multicast list */
1506	hw->mac.ops.update_mc_addr_list(hw, netdev);
1507
1508	ixgbevf_write_uc_addr_list(netdev);
1509
1510	spin_unlock_bh(&adapter->mbx_lock);
1511}
1512
1513static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1514{
1515	int q_idx;
1516	struct ixgbevf_q_vector *q_vector;
1517	int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1518
1519	for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1520		q_vector = adapter->q_vector[q_idx];
1521#ifdef CONFIG_NET_RX_BUSY_POLL
1522		ixgbevf_qv_init_lock(adapter->q_vector[q_idx]);
1523#endif
1524		napi_enable(&q_vector->napi);
1525	}
1526}
1527
1528static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1529{
1530	int q_idx;
1531	struct ixgbevf_q_vector *q_vector;
1532	int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1533
1534	for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1535		q_vector = adapter->q_vector[q_idx];
1536		napi_disable(&q_vector->napi);
1537#ifdef CONFIG_NET_RX_BUSY_POLL
1538		while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) {
1539			pr_info("QV %d locked\n", q_idx);
1540			usleep_range(1000, 20000);
1541		}
1542#endif /* CONFIG_NET_RX_BUSY_POLL */
1543	}
1544}
1545
1546static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
1547{
1548	struct ixgbe_hw *hw = &adapter->hw;
1549	unsigned int def_q = 0;
1550	unsigned int num_tcs = 0;
1551	unsigned int num_rx_queues = 1;
1552	int err;
1553
1554	spin_lock_bh(&adapter->mbx_lock);
1555
1556	/* fetch queue configuration from the PF */
1557	err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1558
1559	spin_unlock_bh(&adapter->mbx_lock);
1560
1561	if (err)
1562		return err;
1563
1564	if (num_tcs > 1) {
1565		/* update default Tx ring register index */
1566		adapter->tx_ring[0]->reg_idx = def_q;
1567
1568		/* we need as many queues as traffic classes */
1569		num_rx_queues = num_tcs;
1570	}
1571
1572	/* if we have a bad config abort request queue reset */
1573	if (adapter->num_rx_queues != num_rx_queues) {
1574		/* force mailbox timeout to prevent further messages */
1575		hw->mbx.timeout = 0;
1576
1577		/* wait for watchdog to come around and bail us out */
1578		adapter->flags |= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
1579	}
1580
1581	return 0;
1582}
1583
1584static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1585{
1586	ixgbevf_configure_dcb(adapter);
1587
1588	ixgbevf_set_rx_mode(adapter->netdev);
1589
1590	ixgbevf_restore_vlan(adapter);
1591
1592	ixgbevf_configure_tx(adapter);
1593	ixgbevf_configure_rx(adapter);
1594}
1595
1596static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
1597{
1598	/* Only save pre-reset stats if there are some */
1599	if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
1600		adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
1601			adapter->stats.base_vfgprc;
1602		adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
1603			adapter->stats.base_vfgptc;
1604		adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
1605			adapter->stats.base_vfgorc;
1606		adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
1607			adapter->stats.base_vfgotc;
1608		adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
1609			adapter->stats.base_vfmprc;
1610	}
1611}
1612
1613static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
1614{
1615	struct ixgbe_hw *hw = &adapter->hw;
1616
1617	adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1618	adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1619	adapter->stats.last_vfgorc |=
1620		(((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1621	adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1622	adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1623	adapter->stats.last_vfgotc |=
1624		(((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1625	adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1626
1627	adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
1628	adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
1629	adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
1630	adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
1631	adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
1632}
1633
1634static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
1635{
1636	struct ixgbe_hw *hw = &adapter->hw;
1637	int api[] = { ixgbe_mbox_api_11,
1638		      ixgbe_mbox_api_10,
1639		      ixgbe_mbox_api_unknown };
1640	int err = 0, idx = 0;
1641
1642	spin_lock_bh(&adapter->mbx_lock);
1643
1644	while (api[idx] != ixgbe_mbox_api_unknown) {
1645		err = ixgbevf_negotiate_api_version(hw, api[idx]);
1646		if (!err)
1647			break;
1648		idx++;
1649	}
1650
1651	spin_unlock_bh(&adapter->mbx_lock);
1652}
1653
1654static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1655{
1656	struct net_device *netdev = adapter->netdev;
1657	struct ixgbe_hw *hw = &adapter->hw;
1658
1659	ixgbevf_configure_msix(adapter);
1660
1661	spin_lock_bh(&adapter->mbx_lock);
1662
1663	if (is_valid_ether_addr(hw->mac.addr))
1664		hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
1665	else
1666		hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
1667
1668	spin_unlock_bh(&adapter->mbx_lock);
1669
1670	smp_mb__before_clear_bit();
1671	clear_bit(__IXGBEVF_DOWN, &adapter->state);
1672	ixgbevf_napi_enable_all(adapter);
1673
1674	/* enable transmits */
1675	netif_tx_start_all_queues(netdev);
1676
1677	ixgbevf_save_reset_stats(adapter);
1678	ixgbevf_init_last_counter_stats(adapter);
1679
1680	hw->mac.get_link_status = 1;
1681	mod_timer(&adapter->watchdog_timer, jiffies);
1682}
1683
1684void ixgbevf_up(struct ixgbevf_adapter *adapter)
1685{
1686	struct ixgbe_hw *hw = &adapter->hw;
1687
1688	ixgbevf_configure(adapter);
1689
1690	ixgbevf_up_complete(adapter);
1691
1692	/* clear any pending interrupts, may auto mask */
1693	IXGBE_READ_REG(hw, IXGBE_VTEICR);
1694
1695	ixgbevf_irq_enable(adapter);
1696}
1697
1698/**
1699 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
1700 * @rx_ring: ring to free buffers from
1701 **/
1702static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
1703{
1704	unsigned long size;
1705	unsigned int i;
1706
1707	if (!rx_ring->rx_buffer_info)
1708		return;
1709
1710	/* Free all the Rx ring sk_buffs */
1711	for (i = 0; i < rx_ring->count; i++) {
1712		struct ixgbevf_rx_buffer *rx_buffer_info;
1713
1714		rx_buffer_info = &rx_ring->rx_buffer_info[i];
1715		if (rx_buffer_info->dma) {
1716			dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
1717					 rx_ring->rx_buf_len,
1718					 DMA_FROM_DEVICE);
1719			rx_buffer_info->dma = 0;
1720		}
1721		if (rx_buffer_info->skb) {
1722			struct sk_buff *skb = rx_buffer_info->skb;
1723			rx_buffer_info->skb = NULL;
1724			do {
1725				struct sk_buff *this = skb;
1726				skb = IXGBE_CB(skb)->prev;
1727				dev_kfree_skb(this);
1728			} while (skb);
1729		}
1730	}
1731
1732	size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
1733	memset(rx_ring->rx_buffer_info, 0, size);
1734
1735	/* Zero out the descriptor ring */
1736	memset(rx_ring->desc, 0, rx_ring->size);
1737}
1738
1739/**
1740 * ixgbevf_clean_tx_ring - Free Tx Buffers
1741 * @tx_ring: ring to be cleaned
1742 **/
1743static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
1744{
1745	struct ixgbevf_tx_buffer *tx_buffer_info;
1746	unsigned long size;
1747	unsigned int i;
1748
1749	if (!tx_ring->tx_buffer_info)
1750		return;
1751
1752	/* Free all the Tx ring sk_buffs */
1753	for (i = 0; i < tx_ring->count; i++) {
1754		tx_buffer_info = &tx_ring->tx_buffer_info[i];
1755		ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
1756	}
1757
1758	size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
1759	memset(tx_ring->tx_buffer_info, 0, size);
1760
1761	memset(tx_ring->desc, 0, tx_ring->size);
1762}
1763
1764/**
1765 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
1766 * @adapter: board private structure
1767 **/
1768static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
1769{
1770	int i;
1771
1772	for (i = 0; i < adapter->num_rx_queues; i++)
1773		ixgbevf_clean_rx_ring(adapter->rx_ring[i]);
1774}
1775
1776/**
1777 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
1778 * @adapter: board private structure
1779 **/
1780static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
1781{
1782	int i;
1783
1784	for (i = 0; i < adapter->num_tx_queues; i++)
1785		ixgbevf_clean_tx_ring(adapter->tx_ring[i]);
1786}
1787
1788void ixgbevf_down(struct ixgbevf_adapter *adapter)
1789{
1790	struct net_device *netdev = adapter->netdev;
1791	struct ixgbe_hw *hw = &adapter->hw;
1792	int i;
1793
1794	/* signal that we are down to the interrupt handler */
1795	if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state))
1796		return; /* do nothing if already down */
1797
1798	/* disable all enabled rx queues */
1799	for (i = 0; i < adapter->num_rx_queues; i++)
1800		ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
1801
1802	netif_tx_disable(netdev);
1803
1804	msleep(10);
1805
1806	netif_tx_stop_all_queues(netdev);
1807
1808	ixgbevf_irq_disable(adapter);
1809
1810	ixgbevf_napi_disable_all(adapter);
1811
1812	del_timer_sync(&adapter->watchdog_timer);
1813	/* can't call flush scheduled work here because it can deadlock
1814	 * if linkwatch_event tries to acquire the rtnl_lock which we are
1815	 * holding */
1816	while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
1817		msleep(1);
1818
1819	/* disable transmits in the hardware now that interrupts are off */
1820	for (i = 0; i < adapter->num_tx_queues; i++) {
1821		u8 reg_idx = adapter->tx_ring[i]->reg_idx;
1822
1823		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
1824				IXGBE_TXDCTL_SWFLSH);
1825	}
1826
1827	netif_carrier_off(netdev);
1828
1829	if (!pci_channel_offline(adapter->pdev))
1830		ixgbevf_reset(adapter);
1831
1832	ixgbevf_clean_all_tx_rings(adapter);
1833	ixgbevf_clean_all_rx_rings(adapter);
1834}
1835
1836void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
1837{
1838	WARN_ON(in_interrupt());
1839
1840	while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
1841		msleep(1);
1842
1843	ixgbevf_down(adapter);
1844	ixgbevf_up(adapter);
1845
1846	clear_bit(__IXGBEVF_RESETTING, &adapter->state);
1847}
1848
1849void ixgbevf_reset(struct ixgbevf_adapter *adapter)
1850{
1851	struct ixgbe_hw *hw = &adapter->hw;
1852	struct net_device *netdev = adapter->netdev;
1853
1854	if (hw->mac.ops.reset_hw(hw)) {
1855		hw_dbg(hw, "PF still resetting\n");
1856	} else {
1857		hw->mac.ops.init_hw(hw);
1858		ixgbevf_negotiate_api(adapter);
1859	}
1860
1861	if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1862		memcpy(netdev->dev_addr, adapter->hw.mac.addr,
1863		       netdev->addr_len);
1864		memcpy(netdev->perm_addr, adapter->hw.mac.addr,
1865		       netdev->addr_len);
1866	}
1867}
1868
1869static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1870					int vectors)
1871{
1872	int vector_threshold;
1873
1874	/* We'll want at least 2 (vector_threshold):
1875	 * 1) TxQ[0] + RxQ[0] handler
1876	 * 2) Other (Link Status Change, etc.)
1877	 */
1878	vector_threshold = MIN_MSIX_COUNT;
1879
1880	/* The more we get, the more we will assign to Tx/Rx Cleanup
1881	 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1882	 * Right now, we simply care about how many we'll get; we'll
1883	 * set them up later while requesting irq's.
1884	 */
1885	vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
1886					vector_threshold, vectors);
1887
1888	if (vectors < 0) {
1889		dev_err(&adapter->pdev->dev,
1890			"Unable to allocate MSI-X interrupts\n");
1891		kfree(adapter->msix_entries);
1892		adapter->msix_entries = NULL;
1893		return vectors;
1894	}
1895
1896	/* Adjust for only the vectors we'll use, which is minimum
1897	 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
1898	 * vectors we were allocated.
1899	 */
1900	adapter->num_msix_vectors = vectors;
1901
1902	return 0;
1903}
1904
1905/**
1906 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
1907 * @adapter: board private structure to initialize
1908 *
1909 * This is the top level queue allocation routine.  The order here is very
1910 * important, starting with the "most" number of features turned on at once,
1911 * and ending with the smallest set of features.  This way large combinations
1912 * can be allocated if they're turned on, and smaller combinations are the
1913 * fallthrough conditions.
1914 *
1915 **/
1916static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
1917{
1918	struct ixgbe_hw *hw = &adapter->hw;
1919	unsigned int def_q = 0;
1920	unsigned int num_tcs = 0;
1921	int err;
1922
1923	/* Start with base case */
1924	adapter->num_rx_queues = 1;
1925	adapter->num_tx_queues = 1;
1926
1927	spin_lock_bh(&adapter->mbx_lock);
1928
1929	/* fetch queue configuration from the PF */
1930	err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1931
1932	spin_unlock_bh(&adapter->mbx_lock);
1933
1934	if (err)
1935		return;
1936
1937	/* we need as many queues as traffic classes */
1938	if (num_tcs > 1)
1939		adapter->num_rx_queues = num_tcs;
1940}
1941
1942/**
1943 * ixgbevf_alloc_queues - Allocate memory for all rings
1944 * @adapter: board private structure to initialize
1945 *
1946 * We allocate one ring per queue at run-time since we don't know the
1947 * number of queues at compile-time.  The polling_netdev array is
1948 * intended for Multiqueue, but should work fine with a single queue.
1949 **/
1950static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
1951{
1952	struct ixgbevf_ring *ring;
1953	int rx = 0, tx = 0;
1954
1955	for (; tx < adapter->num_tx_queues; tx++) {
1956		ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1957		if (!ring)
1958			goto err_allocation;
1959
1960		ring->dev = &adapter->pdev->dev;
1961		ring->netdev = adapter->netdev;
1962		ring->count = adapter->tx_ring_count;
1963		ring->queue_index = tx;
1964		ring->reg_idx = tx;
1965
1966		adapter->tx_ring[tx] = ring;
1967	}
1968
1969	for (; rx < adapter->num_rx_queues; rx++) {
1970		ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1971		if (!ring)
1972			goto err_allocation;
1973
1974		ring->dev = &adapter->pdev->dev;
1975		ring->netdev = adapter->netdev;
1976
1977		ring->count = adapter->rx_ring_count;
1978		ring->queue_index = rx;
1979		ring->reg_idx = rx;
1980
1981		adapter->rx_ring[rx] = ring;
1982	}
1983
1984	return 0;
1985
1986err_allocation:
1987	while (tx) {
1988		kfree(adapter->tx_ring[--tx]);
1989		adapter->tx_ring[tx] = NULL;
1990	}
1991
1992	while (rx) {
1993		kfree(adapter->rx_ring[--rx]);
1994		adapter->rx_ring[rx] = NULL;
1995	}
1996	return -ENOMEM;
1997}
1998
1999/**
2000 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
2001 * @adapter: board private structure to initialize
2002 *
2003 * Attempt to configure the interrupts using the best available
2004 * capabilities of the hardware and the kernel.
2005 **/
2006static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
2007{
2008	struct net_device *netdev = adapter->netdev;
2009	int err = 0;
2010	int vector, v_budget;
2011
2012	/*
2013	 * It's easy to be greedy for MSI-X vectors, but it really
2014	 * doesn't do us much good if we have a lot more vectors
2015	 * than CPU's.  So let's be conservative and only ask for
2016	 * (roughly) the same number of vectors as there are CPU's.
2017	 * The default is to use pairs of vectors.
2018	 */
2019	v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
2020	v_budget = min_t(int, v_budget, num_online_cpus());
2021	v_budget += NON_Q_VECTORS;
2022
2023	/* A failure in MSI-X entry allocation isn't fatal, but it does
2024	 * mean we disable MSI-X capabilities of the adapter. */
2025	adapter->msix_entries = kcalloc(v_budget,
2026					sizeof(struct msix_entry), GFP_KERNEL);
2027	if (!adapter->msix_entries) {
2028		err = -ENOMEM;
2029		goto out;
2030	}
2031
2032	for (vector = 0; vector < v_budget; vector++)
2033		adapter->msix_entries[vector].entry = vector;
2034
2035	err = ixgbevf_acquire_msix_vectors(adapter, v_budget);
2036	if (err)
2037		goto out;
2038
2039	err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
2040	if (err)
2041		goto out;
2042
2043	err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
2044
2045out:
2046	return err;
2047}
2048
2049/**
2050 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
2051 * @adapter: board private structure to initialize
2052 *
2053 * We allocate one q_vector per queue interrupt.  If allocation fails we
2054 * return -ENOMEM.
2055 **/
2056static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
2057{
2058	int q_idx, num_q_vectors;
2059	struct ixgbevf_q_vector *q_vector;
2060
2061	num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2062
2063	for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2064		q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
2065		if (!q_vector)
2066			goto err_out;
2067		q_vector->adapter = adapter;
2068		q_vector->v_idx = q_idx;
2069		netif_napi_add(adapter->netdev, &q_vector->napi,
2070			       ixgbevf_poll, 64);
2071#ifdef CONFIG_NET_RX_BUSY_POLL
2072		napi_hash_add(&q_vector->napi);
2073#endif
2074		adapter->q_vector[q_idx] = q_vector;
2075	}
2076
2077	return 0;
2078
2079err_out:
2080	while (q_idx) {
2081		q_idx--;
2082		q_vector = adapter->q_vector[q_idx];
2083#ifdef CONFIG_NET_RX_BUSY_POLL
2084		napi_hash_del(&q_vector->napi);
2085#endif
2086		netif_napi_del(&q_vector->napi);
2087		kfree(q_vector);
2088		adapter->q_vector[q_idx] = NULL;
2089	}
2090	return -ENOMEM;
2091}
2092
2093/**
2094 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2095 * @adapter: board private structure to initialize
2096 *
2097 * This function frees the memory allocated to the q_vectors.  In addition if
2098 * NAPI is enabled it will delete any references to the NAPI struct prior
2099 * to freeing the q_vector.
2100 **/
2101static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
2102{
2103	int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2104
2105	for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2106		struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
2107
2108		adapter->q_vector[q_idx] = NULL;
2109#ifdef CONFIG_NET_RX_BUSY_POLL
2110		napi_hash_del(&q_vector->napi);
2111#endif
2112		netif_napi_del(&q_vector->napi);
2113		kfree(q_vector);
2114	}
2115}
2116
2117/**
2118 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2119 * @adapter: board private structure
2120 *
2121 **/
2122static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2123{
2124	pci_disable_msix(adapter->pdev);
2125	kfree(adapter->msix_entries);
2126	adapter->msix_entries = NULL;
2127}
2128
2129/**
2130 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2131 * @adapter: board private structure to initialize
2132 *
2133 **/
2134static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2135{
2136	int err;
2137
2138	/* Number of supported queues */
2139	ixgbevf_set_num_queues(adapter);
2140
2141	err = ixgbevf_set_interrupt_capability(adapter);
2142	if (err) {
2143		hw_dbg(&adapter->hw,
2144		       "Unable to setup interrupt capabilities\n");
2145		goto err_set_interrupt;
2146	}
2147
2148	err = ixgbevf_alloc_q_vectors(adapter);
2149	if (err) {
2150		hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
2151		       "vectors\n");
2152		goto err_alloc_q_vectors;
2153	}
2154
2155	err = ixgbevf_alloc_queues(adapter);
2156	if (err) {
2157		pr_err("Unable to allocate memory for queues\n");
2158		goto err_alloc_queues;
2159	}
2160
2161	hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
2162	       "Tx Queue count = %u\n",
2163	       (adapter->num_rx_queues > 1) ? "Enabled" :
2164	       "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2165
2166	set_bit(__IXGBEVF_DOWN, &adapter->state);
2167
2168	return 0;
2169err_alloc_queues:
2170	ixgbevf_free_q_vectors(adapter);
2171err_alloc_q_vectors:
2172	ixgbevf_reset_interrupt_capability(adapter);
2173err_set_interrupt:
2174	return err;
2175}
2176
2177/**
2178 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
2179 * @adapter: board private structure to clear interrupt scheme on
2180 *
2181 * We go through and clear interrupt specific resources and reset the structure
2182 * to pre-load conditions
2183 **/
2184static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
2185{
2186	int i;
2187
2188	for (i = 0; i < adapter->num_tx_queues; i++) {
2189		kfree(adapter->tx_ring[i]);
2190		adapter->tx_ring[i] = NULL;
2191	}
2192	for (i = 0; i < adapter->num_rx_queues; i++) {
2193		kfree(adapter->rx_ring[i]);
2194		adapter->rx_ring[i] = NULL;
2195	}
2196
2197	adapter->num_tx_queues = 0;
2198	adapter->num_rx_queues = 0;
2199
2200	ixgbevf_free_q_vectors(adapter);
2201	ixgbevf_reset_interrupt_capability(adapter);
2202}
2203
2204/**
2205 * ixgbevf_sw_init - Initialize general software structures
2206 * (struct ixgbevf_adapter)
2207 * @adapter: board private structure to initialize
2208 *
2209 * ixgbevf_sw_init initializes the Adapter private data structure.
2210 * Fields are initialized based on PCI device information and
2211 * OS network device settings (MTU size).
2212 **/
2213static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
2214{
2215	struct ixgbe_hw *hw = &adapter->hw;
2216	struct pci_dev *pdev = adapter->pdev;
2217	struct net_device *netdev = adapter->netdev;
2218	int err;
2219
2220	/* PCI config space info */
2221
2222	hw->vendor_id = pdev->vendor;
2223	hw->device_id = pdev->device;
2224	hw->revision_id = pdev->revision;
2225	hw->subsystem_vendor_id = pdev->subsystem_vendor;
2226	hw->subsystem_device_id = pdev->subsystem_device;
2227
2228	hw->mbx.ops.init_params(hw);
2229
2230	/* assume legacy case in which PF would only give VF 2 queues */
2231	hw->mac.max_tx_queues = 2;
2232	hw->mac.max_rx_queues = 2;
2233
2234	/* lock to protect mailbox accesses */
2235	spin_lock_init(&adapter->mbx_lock);
2236
2237	err = hw->mac.ops.reset_hw(hw);
2238	if (err) {
2239		dev_info(&pdev->dev,
2240			 "PF still in reset state.  Is the PF interface up?\n");
2241	} else {
2242		err = hw->mac.ops.init_hw(hw);
2243		if (err) {
2244			pr_err("init_shared_code failed: %d\n", err);
2245			goto out;
2246		}
2247		ixgbevf_negotiate_api(adapter);
2248		err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2249		if (err)
2250			dev_info(&pdev->dev, "Error reading MAC address\n");
2251		else if (is_zero_ether_addr(adapter->hw.mac.addr))
2252			dev_info(&pdev->dev,
2253				 "MAC address not assigned by administrator.\n");
2254		memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
2255	}
2256
2257	if (!is_valid_ether_addr(netdev->dev_addr)) {
2258		dev_info(&pdev->dev, "Assigning random MAC address\n");
2259		eth_hw_addr_random(netdev);
2260		memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len);
2261	}
2262
2263	/* Enable dynamic interrupt throttling rates */
2264	adapter->rx_itr_setting = 1;
2265	adapter->tx_itr_setting = 1;
2266
2267	/* set default ring sizes */
2268	adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
2269	adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
2270
2271	set_bit(__IXGBEVF_DOWN, &adapter->state);
2272	return 0;
2273
2274out:
2275	return err;
2276}
2277
2278#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter)	\
2279	{							\
2280		u32 current_counter = IXGBE_READ_REG(hw, reg);	\
2281		if (current_counter < last_counter)		\
2282			counter += 0x100000000LL;		\
2283		last_counter = current_counter;			\
2284		counter &= 0xFFFFFFFF00000000LL;		\
2285		counter |= current_counter;			\
2286	}
2287
2288#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2289	{								 \
2290		u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb);	 \
2291		u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb);	 \
2292		u64 current_counter = (current_counter_msb << 32) |      \
2293			current_counter_lsb;                             \
2294		if (current_counter < last_counter)			 \
2295			counter += 0x1000000000LL;			 \
2296		last_counter = current_counter;				 \
2297		counter &= 0xFFFFFFF000000000LL;			 \
2298		counter |= current_counter;				 \
2299	}
2300/**
2301 * ixgbevf_update_stats - Update the board statistics counters.
2302 * @adapter: board private structure
2303 **/
2304void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2305{
2306	struct ixgbe_hw *hw = &adapter->hw;
2307	int i;
2308
2309	if (!adapter->link_up)
2310		return;
2311
2312	UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
2313				adapter->stats.vfgprc);
2314	UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
2315				adapter->stats.vfgptc);
2316	UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2317				adapter->stats.last_vfgorc,
2318				adapter->stats.vfgorc);
2319	UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2320				adapter->stats.last_vfgotc,
2321				adapter->stats.vfgotc);
2322	UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
2323				adapter->stats.vfmprc);
2324
2325	for (i = 0;  i  < adapter->num_rx_queues;  i++) {
2326		adapter->hw_csum_rx_error +=
2327			adapter->rx_ring[i]->hw_csum_rx_error;
2328		adapter->rx_ring[i]->hw_csum_rx_error = 0;
2329	}
2330}
2331
2332/**
2333 * ixgbevf_watchdog - Timer Call-back
2334 * @data: pointer to adapter cast into an unsigned long
2335 **/
2336static void ixgbevf_watchdog(unsigned long data)
2337{
2338	struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
2339	struct ixgbe_hw *hw = &adapter->hw;
2340	u32 eics = 0;
2341	int i;
2342
2343	/*
2344	 * Do the watchdog outside of interrupt context due to the lovely
2345	 * delays that some of the newer hardware requires
2346	 */
2347
2348	if (test_bit(__IXGBEVF_DOWN, &adapter->state))
2349		goto watchdog_short_circuit;
2350
2351	/* get one bit for every active tx/rx interrupt vector */
2352	for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2353		struct ixgbevf_q_vector *qv = adapter->q_vector[i];
2354		if (qv->rx.ring || qv->tx.ring)
2355			eics |= 1 << i;
2356	}
2357
2358	IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
2359
2360watchdog_short_circuit:
2361	schedule_work(&adapter->watchdog_task);
2362}
2363
2364/**
2365 * ixgbevf_tx_timeout - Respond to a Tx Hang
2366 * @netdev: network interface device structure
2367 **/
2368static void ixgbevf_tx_timeout(struct net_device *netdev)
2369{
2370	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2371
2372	/* Do the reset outside of interrupt context */
2373	schedule_work(&adapter->reset_task);
2374}
2375
2376static void ixgbevf_reset_task(struct work_struct *work)
2377{
2378	struct ixgbevf_adapter *adapter;
2379	adapter = container_of(work, struct ixgbevf_adapter, reset_task);
2380
2381	/* If we're already down or resetting, just bail */
2382	if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2383	    test_bit(__IXGBEVF_REMOVING, &adapter->state) ||
2384	    test_bit(__IXGBEVF_RESETTING, &adapter->state))
2385		return;
2386
2387	adapter->tx_timeout_count++;
2388
2389	ixgbevf_reinit_locked(adapter);
2390}
2391
2392/**
2393 * ixgbevf_watchdog_task - worker thread to bring link up
2394 * @work: pointer to work_struct containing our data
2395 **/
2396static void ixgbevf_watchdog_task(struct work_struct *work)
2397{
2398	struct ixgbevf_adapter *adapter = container_of(work,
2399						       struct ixgbevf_adapter,
2400						       watchdog_task);
2401	struct net_device *netdev = adapter->netdev;
2402	struct ixgbe_hw *hw = &adapter->hw;
2403	u32 link_speed = adapter->link_speed;
2404	bool link_up = adapter->link_up;
2405	s32 need_reset;
2406
2407	if (IXGBE_REMOVED(hw->hw_addr)) {
2408		if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
2409			rtnl_lock();
2410			ixgbevf_down(adapter);
2411			rtnl_unlock();
2412		}
2413		return;
2414	}
2415	ixgbevf_queue_reset_subtask(adapter);
2416
2417	adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
2418
2419	/*
2420	 * Always check the link on the watchdog because we have
2421	 * no LSC interrupt
2422	 */
2423	spin_lock_bh(&adapter->mbx_lock);
2424
2425	need_reset = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
2426
2427	spin_unlock_bh(&adapter->mbx_lock);
2428
2429	if (need_reset) {
2430		adapter->link_up = link_up;
2431		adapter->link_speed = link_speed;
2432		netif_carrier_off(netdev);
2433		netif_tx_stop_all_queues(netdev);
2434		schedule_work(&adapter->reset_task);
2435		goto pf_has_reset;
2436	}
2437	adapter->link_up = link_up;
2438	adapter->link_speed = link_speed;
2439
2440	if (link_up) {
2441		if (!netif_carrier_ok(netdev)) {
2442			char *link_speed_string;
2443			switch (link_speed) {
2444			case IXGBE_LINK_SPEED_10GB_FULL:
2445				link_speed_string = "10 Gbps";
2446				break;
2447			case IXGBE_LINK_SPEED_1GB_FULL:
2448				link_speed_string = "1 Gbps";
2449				break;
2450			case IXGBE_LINK_SPEED_100_FULL:
2451				link_speed_string = "100 Mbps";
2452				break;
2453			default:
2454				link_speed_string = "unknown speed";
2455				break;
2456			}
2457			dev_info(&adapter->pdev->dev,
2458				"NIC Link is Up, %s\n", link_speed_string);
2459			netif_carrier_on(netdev);
2460			netif_tx_wake_all_queues(netdev);
2461		}
2462	} else {
2463		adapter->link_up = false;
2464		adapter->link_speed = 0;
2465		if (netif_carrier_ok(netdev)) {
2466			dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
2467			netif_carrier_off(netdev);
2468			netif_tx_stop_all_queues(netdev);
2469		}
2470	}
2471
2472	ixgbevf_update_stats(adapter);
2473
2474pf_has_reset:
2475	/* Reset the timer */
2476	if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
2477	    !test_bit(__IXGBEVF_REMOVING, &adapter->state))
2478		mod_timer(&adapter->watchdog_timer,
2479			  round_jiffies(jiffies + (2 * HZ)));
2480
2481	adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
2482}
2483
2484/**
2485 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2486 * @tx_ring: Tx descriptor ring for a specific queue
2487 *
2488 * Free all transmit software resources
2489 **/
2490void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring)
2491{
2492	ixgbevf_clean_tx_ring(tx_ring);
2493
2494	vfree(tx_ring->tx_buffer_info);
2495	tx_ring->tx_buffer_info = NULL;
2496
2497	/* if not set, then don't free */
2498	if (!tx_ring->desc)
2499		return;
2500
2501	dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc,
2502			  tx_ring->dma);
2503
2504	tx_ring->desc = NULL;
2505}
2506
2507/**
2508 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2509 * @adapter: board private structure
2510 *
2511 * Free all transmit software resources
2512 **/
2513static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2514{
2515	int i;
2516
2517	for (i = 0; i < adapter->num_tx_queues; i++)
2518		if (adapter->tx_ring[i]->desc)
2519			ixgbevf_free_tx_resources(adapter->tx_ring[i]);
2520}
2521
2522/**
2523 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2524 * @tx_ring:    tx descriptor ring (for a specific queue) to setup
2525 *
2526 * Return 0 on success, negative on failure
2527 **/
2528int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
2529{
2530	int size;
2531
2532	size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2533	tx_ring->tx_buffer_info = vzalloc(size);
2534	if (!tx_ring->tx_buffer_info)
2535		goto err;
2536
2537	/* round up to nearest 4K */
2538	tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2539	tx_ring->size = ALIGN(tx_ring->size, 4096);
2540
2541	tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size,
2542					   &tx_ring->dma, GFP_KERNEL);
2543	if (!tx_ring->desc)
2544		goto err;
2545
2546	return 0;
2547
2548err:
2549	vfree(tx_ring->tx_buffer_info);
2550	tx_ring->tx_buffer_info = NULL;
2551	hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
2552	       "descriptor ring\n");
2553	return -ENOMEM;
2554}
2555
2556/**
2557 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2558 * @adapter: board private structure
2559 *
2560 * If this function returns with an error, then it's possible one or
2561 * more of the rings is populated (while the rest are not).  It is the
2562 * callers duty to clean those orphaned rings.
2563 *
2564 * Return 0 on success, negative on failure
2565 **/
2566static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
2567{
2568	int i, err = 0;
2569
2570	for (i = 0; i < adapter->num_tx_queues; i++) {
2571		err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]);
2572		if (!err)
2573			continue;
2574		hw_dbg(&adapter->hw,
2575		       "Allocation for Tx Queue %u failed\n", i);
2576		break;
2577	}
2578
2579	return err;
2580}
2581
2582/**
2583 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
2584 * @rx_ring:    rx descriptor ring (for a specific queue) to setup
2585 *
2586 * Returns 0 on success, negative on failure
2587 **/
2588int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring)
2589{
2590	int size;
2591
2592	size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
2593	rx_ring->rx_buffer_info = vzalloc(size);
2594	if (!rx_ring->rx_buffer_info)
2595		goto err;
2596
2597	/* Round up to nearest 4K */
2598	rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2599	rx_ring->size = ALIGN(rx_ring->size, 4096);
2600
2601	rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size,
2602					   &rx_ring->dma, GFP_KERNEL);
2603
2604	if (!rx_ring->desc)
2605		goto err;
2606
2607	return 0;
2608err:
2609	vfree(rx_ring->rx_buffer_info);
2610	rx_ring->rx_buffer_info = NULL;
2611	dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n");
2612	return -ENOMEM;
2613}
2614
2615/**
2616 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
2617 * @adapter: board private structure
2618 *
2619 * If this function returns with an error, then it's possible one or
2620 * more of the rings is populated (while the rest are not).  It is the
2621 * callers duty to clean those orphaned rings.
2622 *
2623 * Return 0 on success, negative on failure
2624 **/
2625static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
2626{
2627	int i, err = 0;
2628
2629	for (i = 0; i < adapter->num_rx_queues; i++) {
2630		err = ixgbevf_setup_rx_resources(adapter->rx_ring[i]);
2631		if (!err)
2632			continue;
2633		hw_dbg(&adapter->hw,
2634		       "Allocation for Rx Queue %u failed\n", i);
2635		break;
2636	}
2637	return err;
2638}
2639
2640/**
2641 * ixgbevf_free_rx_resources - Free Rx Resources
2642 * @rx_ring: ring to clean the resources from
2643 *
2644 * Free all receive software resources
2645 **/
2646void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring)
2647{
2648	ixgbevf_clean_rx_ring(rx_ring);
2649
2650	vfree(rx_ring->rx_buffer_info);
2651	rx_ring->rx_buffer_info = NULL;
2652
2653	dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc,
2654			  rx_ring->dma);
2655
2656	rx_ring->desc = NULL;
2657}
2658
2659/**
2660 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
2661 * @adapter: board private structure
2662 *
2663 * Free all receive software resources
2664 **/
2665static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
2666{
2667	int i;
2668
2669	for (i = 0; i < adapter->num_rx_queues; i++)
2670		if (adapter->rx_ring[i]->desc)
2671			ixgbevf_free_rx_resources(adapter->rx_ring[i]);
2672}
2673
2674/**
2675 * ixgbevf_open - Called when a network interface is made active
2676 * @netdev: network interface device structure
2677 *
2678 * Returns 0 on success, negative value on failure
2679 *
2680 * The open entry point is called when a network interface is made
2681 * active by the system (IFF_UP).  At this point all resources needed
2682 * for transmit and receive operations are allocated, the interrupt
2683 * handler is registered with the OS, the watchdog timer is started,
2684 * and the stack is notified that the interface is ready.
2685 **/
2686static int ixgbevf_open(struct net_device *netdev)
2687{
2688	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2689	struct ixgbe_hw *hw = &adapter->hw;
2690	int err;
2691
2692	/* A previous failure to open the device because of a lack of
2693	 * available MSIX vector resources may have reset the number
2694	 * of msix vectors variable to zero.  The only way to recover
2695	 * is to unload/reload the driver and hope that the system has
2696	 * been able to recover some MSIX vector resources.
2697	 */
2698	if (!adapter->num_msix_vectors)
2699		return -ENOMEM;
2700
2701	/* disallow open during test */
2702	if (test_bit(__IXGBEVF_TESTING, &adapter->state))
2703		return -EBUSY;
2704
2705	if (hw->adapter_stopped) {
2706		ixgbevf_reset(adapter);
2707		/* if adapter is still stopped then PF isn't up and
2708		 * the vf can't start. */
2709		if (hw->adapter_stopped) {
2710			err = IXGBE_ERR_MBX;
2711			pr_err("Unable to start - perhaps the PF Driver isn't "
2712			       "up yet\n");
2713			goto err_setup_reset;
2714		}
2715	}
2716
2717	/* allocate transmit descriptors */
2718	err = ixgbevf_setup_all_tx_resources(adapter);
2719	if (err)
2720		goto err_setup_tx;
2721
2722	/* allocate receive descriptors */
2723	err = ixgbevf_setup_all_rx_resources(adapter);
2724	if (err)
2725		goto err_setup_rx;
2726
2727	ixgbevf_configure(adapter);
2728
2729	/*
2730	 * Map the Tx/Rx rings to the vectors we were allotted.
2731	 * if request_irq will be called in this function map_rings
2732	 * must be called *before* up_complete
2733	 */
2734	ixgbevf_map_rings_to_vectors(adapter);
2735
2736	ixgbevf_up_complete(adapter);
2737
2738	/* clear any pending interrupts, may auto mask */
2739	IXGBE_READ_REG(hw, IXGBE_VTEICR);
2740	err = ixgbevf_request_irq(adapter);
2741	if (err)
2742		goto err_req_irq;
2743
2744	ixgbevf_irq_enable(adapter);
2745
2746	return 0;
2747
2748err_req_irq:
2749	ixgbevf_down(adapter);
2750err_setup_rx:
2751	ixgbevf_free_all_rx_resources(adapter);
2752err_setup_tx:
2753	ixgbevf_free_all_tx_resources(adapter);
2754	ixgbevf_reset(adapter);
2755
2756err_setup_reset:
2757
2758	return err;
2759}
2760
2761/**
2762 * ixgbevf_close - Disables a network interface
2763 * @netdev: network interface device structure
2764 *
2765 * Returns 0, this is not allowed to fail
2766 *
2767 * The close entry point is called when an interface is de-activated
2768 * by the OS.  The hardware is still under the drivers control, but
2769 * needs to be disabled.  A global MAC reset is issued to stop the
2770 * hardware, and all transmit and receive resources are freed.
2771 **/
2772static int ixgbevf_close(struct net_device *netdev)
2773{
2774	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2775
2776	ixgbevf_down(adapter);
2777	ixgbevf_free_irq(adapter);
2778
2779	ixgbevf_free_all_tx_resources(adapter);
2780	ixgbevf_free_all_rx_resources(adapter);
2781
2782	return 0;
2783}
2784
2785static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
2786{
2787	struct net_device *dev = adapter->netdev;
2788
2789	if (!(adapter->flags & IXGBEVF_FLAG_QUEUE_RESET_REQUESTED))
2790		return;
2791
2792	adapter->flags &= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
2793
2794	/* if interface is down do nothing */
2795	if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2796	    test_bit(__IXGBEVF_RESETTING, &adapter->state))
2797		return;
2798
2799	/* Hardware has to reinitialize queues and interrupts to
2800	 * match packet buffer alignment. Unfortunately, the
2801	 * hardware is not flexible enough to do this dynamically.
2802	 */
2803	if (netif_running(dev))
2804		ixgbevf_close(dev);
2805
2806	ixgbevf_clear_interrupt_scheme(adapter);
2807	ixgbevf_init_interrupt_scheme(adapter);
2808
2809	if (netif_running(dev))
2810		ixgbevf_open(dev);
2811}
2812
2813static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
2814				u32 vlan_macip_lens, u32 type_tucmd,
2815				u32 mss_l4len_idx)
2816{
2817	struct ixgbe_adv_tx_context_desc *context_desc;
2818	u16 i = tx_ring->next_to_use;
2819
2820	context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
2821
2822	i++;
2823	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2824
2825	/* set bits to identify this as an advanced context descriptor */
2826	type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2827
2828	context_desc->vlan_macip_lens	= cpu_to_le32(vlan_macip_lens);
2829	context_desc->seqnum_seed	= 0;
2830	context_desc->type_tucmd_mlhl	= cpu_to_le32(type_tucmd);
2831	context_desc->mss_l4len_idx	= cpu_to_le32(mss_l4len_idx);
2832}
2833
2834static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
2835		       struct ixgbevf_tx_buffer *first,
2836		       u8 *hdr_len)
2837{
2838	struct sk_buff *skb = first->skb;
2839	u32 vlan_macip_lens, type_tucmd;
2840	u32 mss_l4len_idx, l4len;
2841
2842	if (skb->ip_summed != CHECKSUM_PARTIAL)
2843		return 0;
2844
2845	if (!skb_is_gso(skb))
2846		return 0;
2847
2848	if (skb_header_cloned(skb)) {
2849		int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2850		if (err)
2851			return err;
2852	}
2853
2854	/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2855	type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
2856
2857	if (skb->protocol == htons(ETH_P_IP)) {
2858		struct iphdr *iph = ip_hdr(skb);
2859		iph->tot_len = 0;
2860		iph->check = 0;
2861		tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2862							 iph->daddr, 0,
2863							 IPPROTO_TCP,
2864							 0);
2865		type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2866		first->tx_flags |= IXGBE_TX_FLAGS_TSO |
2867				   IXGBE_TX_FLAGS_CSUM |
2868				   IXGBE_TX_FLAGS_IPV4;
2869	} else if (skb_is_gso_v6(skb)) {
2870		ipv6_hdr(skb)->payload_len = 0;
2871		tcp_hdr(skb)->check =
2872		    ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2873				     &ipv6_hdr(skb)->daddr,
2874				     0, IPPROTO_TCP, 0);
2875		first->tx_flags |= IXGBE_TX_FLAGS_TSO |
2876				   IXGBE_TX_FLAGS_CSUM;
2877	}
2878
2879	/* compute header lengths */
2880	l4len = tcp_hdrlen(skb);
2881	*hdr_len += l4len;
2882	*hdr_len = skb_transport_offset(skb) + l4len;
2883
2884	/* update gso size and bytecount with header size */
2885	first->gso_segs = skb_shinfo(skb)->gso_segs;
2886	first->bytecount += (first->gso_segs - 1) * *hdr_len;
2887
2888	/* mss_l4len_id: use 1 as index for TSO */
2889	mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
2890	mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
2891	mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
2892
2893	/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
2894	vlan_macip_lens = skb_network_header_len(skb);
2895	vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2896	vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2897
2898	ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2899			    type_tucmd, mss_l4len_idx);
2900
2901	return 1;
2902}
2903
2904static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
2905			    struct ixgbevf_tx_buffer *first)
2906{
2907	struct sk_buff *skb = first->skb;
2908	u32 vlan_macip_lens = 0;
2909	u32 mss_l4len_idx = 0;
2910	u32 type_tucmd = 0;
2911
2912	if (skb->ip_summed == CHECKSUM_PARTIAL) {
2913		u8 l4_hdr = 0;
2914		switch (skb->protocol) {
2915		case htons(ETH_P_IP):
2916			vlan_macip_lens |= skb_network_header_len(skb);
2917			type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2918			l4_hdr = ip_hdr(skb)->protocol;
2919			break;
2920		case htons(ETH_P_IPV6):
2921			vlan_macip_lens |= skb_network_header_len(skb);
2922			l4_hdr = ipv6_hdr(skb)->nexthdr;
2923			break;
2924		default:
2925			if (unlikely(net_ratelimit())) {
2926				dev_warn(tx_ring->dev,
2927				 "partial checksum but proto=%x!\n",
2928				 first->protocol);
2929			}
2930			break;
2931		}
2932
2933		switch (l4_hdr) {
2934		case IPPROTO_TCP:
2935			type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2936			mss_l4len_idx = tcp_hdrlen(skb) <<
2937					IXGBE_ADVTXD_L4LEN_SHIFT;
2938			break;
2939		case IPPROTO_SCTP:
2940			type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
2941			mss_l4len_idx = sizeof(struct sctphdr) <<
2942					IXGBE_ADVTXD_L4LEN_SHIFT;
2943			break;
2944		case IPPROTO_UDP:
2945			mss_l4len_idx = sizeof(struct udphdr) <<
2946					IXGBE_ADVTXD_L4LEN_SHIFT;
2947			break;
2948		default:
2949			if (unlikely(net_ratelimit())) {
2950				dev_warn(tx_ring->dev,
2951				 "partial checksum but l4 proto=%x!\n",
2952				 l4_hdr);
2953			}
2954			break;
2955		}
2956
2957		/* update TX checksum flag */
2958		first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
2959	}
2960
2961	/* vlan_macip_lens: MACLEN, VLAN tag */
2962	vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2963	vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2964
2965	ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2966			    type_tucmd, mss_l4len_idx);
2967}
2968
2969static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
2970{
2971	/* set type for advanced descriptor with frame checksum insertion */
2972	__le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
2973				      IXGBE_ADVTXD_DCMD_IFCS |
2974				      IXGBE_ADVTXD_DCMD_DEXT);
2975
2976	/* set HW vlan bit if vlan is present */
2977	if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2978		cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
2979
2980	/* set segmentation enable bits for TSO/FSO */
2981	if (tx_flags & IXGBE_TX_FLAGS_TSO)
2982		cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
2983
2984	return cmd_type;
2985}
2986
2987static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
2988				     u32 tx_flags, unsigned int paylen)
2989{
2990	__le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
2991
2992	/* enable L4 checksum for TSO and TX checksum offload */
2993	if (tx_flags & IXGBE_TX_FLAGS_CSUM)
2994		olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
2995
2996	/* enble IPv4 checksum for TSO */
2997	if (tx_flags & IXGBE_TX_FLAGS_IPV4)
2998		olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
2999
3000	/* use index 1 context for TSO/FSO/FCOE */
3001	if (tx_flags & IXGBE_TX_FLAGS_TSO)
3002		olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT);
3003
3004	/* Check Context must be set if Tx switch is enabled, which it
3005	 * always is for case where virtual functions are running
3006	 */
3007	olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
3008
3009	tx_desc->read.olinfo_status = olinfo_status;
3010}
3011
3012static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
3013			   struct ixgbevf_tx_buffer *first,
3014			   const u8 hdr_len)
3015{
3016	dma_addr_t dma;
3017	struct sk_buff *skb = first->skb;
3018	struct ixgbevf_tx_buffer *tx_buffer;
3019	union ixgbe_adv_tx_desc *tx_desc;
3020	struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
3021	unsigned int data_len = skb->data_len;
3022	unsigned int size = skb_headlen(skb);
3023	unsigned int paylen = skb->len - hdr_len;
3024	u32 tx_flags = first->tx_flags;
3025	__le32 cmd_type;
3026	u16 i = tx_ring->next_to_use;
3027
3028	tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
3029
3030	ixgbevf_tx_olinfo_status(tx_desc, tx_flags, paylen);
3031	cmd_type = ixgbevf_tx_cmd_type(tx_flags);
3032
3033	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3034	if (dma_mapping_error(tx_ring->dev, dma))
3035		goto dma_error;
3036
3037	/* record length, and DMA address */
3038	dma_unmap_len_set(first, len, size);
3039	dma_unmap_addr_set(first, dma, dma);
3040
3041	tx_desc->read.buffer_addr = cpu_to_le64(dma);
3042
3043	for (;;) {
3044		while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
3045			tx_desc->read.cmd_type_len =
3046				cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
3047
3048			i++;
3049			tx_desc++;
3050			if (i == tx_ring->count) {
3051				tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3052				i = 0;
3053			}
3054
3055			dma += IXGBE_MAX_DATA_PER_TXD;
3056			size -= IXGBE_MAX_DATA_PER_TXD;
3057
3058			tx_desc->read.buffer_addr = cpu_to_le64(dma);
3059			tx_desc->read.olinfo_status = 0;
3060		}
3061
3062		if (likely(!data_len))
3063			break;
3064
3065		tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
3066
3067		i++;
3068		tx_desc++;
3069		if (i == tx_ring->count) {
3070			tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3071			i = 0;
3072		}
3073
3074		size = skb_frag_size(frag);
3075		data_len -= size;
3076
3077		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
3078				       DMA_TO_DEVICE);
3079		if (dma_mapping_error(tx_ring->dev, dma))
3080			goto dma_error;
3081
3082		tx_buffer = &tx_ring->tx_buffer_info[i];
3083		dma_unmap_len_set(tx_buffer, len, size);
3084		dma_unmap_addr_set(tx_buffer, dma, dma);
3085
3086		tx_desc->read.buffer_addr = cpu_to_le64(dma);
3087		tx_desc->read.olinfo_status = 0;
3088
3089		frag++;
3090	}
3091
3092	/* write last descriptor with RS and EOP bits */
3093	cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD);
3094	tx_desc->read.cmd_type_len = cmd_type;
3095
3096	/* set the timestamp */
3097	first->time_stamp = jiffies;
3098
3099	/* Force memory writes to complete before letting h/w know there
3100	 * are new descriptors to fetch.  (Only applicable for weak-ordered
3101	 * memory model archs, such as IA-64).
3102	 *
3103	 * We also need this memory barrier (wmb) to make certain all of the
3104	 * status bits have been updated before next_to_watch is written.
3105	 */
3106	wmb();
3107
3108	/* set next_to_watch value indicating a packet is present */
3109	first->next_to_watch = tx_desc;
3110
3111	i++;
3112	if (i == tx_ring->count)
3113		i = 0;
3114
3115	tx_ring->next_to_use = i;
3116
3117	/* notify HW of packet */
3118	ixgbevf_write_tail(tx_ring, i);
3119
3120	return;
3121dma_error:
3122	dev_err(tx_ring->dev, "TX DMA map failed\n");
3123
3124	/* clear dma mappings for failed tx_buffer_info map */
3125	for (;;) {
3126		tx_buffer = &tx_ring->tx_buffer_info[i];
3127		ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer);
3128		if (tx_buffer == first)
3129			break;
3130		if (i == 0)
3131			i = tx_ring->count;
3132		i--;
3133	}
3134
3135	tx_ring->next_to_use = i;
3136}
3137
3138static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
3139{
3140	netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
3141	/* Herbert's original patch had:
3142	 *  smp_mb__after_netif_stop_queue();
3143	 * but since that doesn't exist yet, just open code it. */
3144	smp_mb();
3145
3146	/* We need to check again in a case another CPU has just
3147	 * made room available. */
3148	if (likely(ixgbevf_desc_unused(tx_ring) < size))
3149		return -EBUSY;
3150
3151	/* A reprieve! - use start_queue because it doesn't call schedule */
3152	netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
3153	++tx_ring->tx_stats.restart_queue;
3154
3155	return 0;
3156}
3157
3158static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
3159{
3160	if (likely(ixgbevf_desc_unused(tx_ring) >= size))
3161		return 0;
3162	return __ixgbevf_maybe_stop_tx(tx_ring, size);
3163}
3164
3165static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3166{
3167	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3168	struct ixgbevf_tx_buffer *first;
3169	struct ixgbevf_ring *tx_ring;
3170	int tso;
3171	u32 tx_flags = 0;
3172	u16 count = TXD_USE_COUNT(skb_headlen(skb));
3173#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3174	unsigned short f;
3175#endif
3176	u8 hdr_len = 0;
3177	u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
3178
3179	if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
3180		dev_kfree_skb(skb);
3181		return NETDEV_TX_OK;
3182	}
3183
3184	tx_ring = adapter->tx_ring[skb->queue_mapping];
3185
3186	/*
3187	 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
3188	 *       + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
3189	 *       + 2 desc gap to keep tail from touching head,
3190	 *       + 1 desc for context descriptor,
3191	 * otherwise try next time
3192	 */
3193#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3194	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
3195		count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
3196#else
3197	count += skb_shinfo(skb)->nr_frags;
3198#endif
3199	if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
3200		tx_ring->tx_stats.tx_busy++;
3201		return NETDEV_TX_BUSY;
3202	}
3203
3204	/* record the location of the first descriptor for this packet */
3205	first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
3206	first->skb = skb;
3207	first->bytecount = skb->len;
3208	first->gso_segs = 1;
3209
3210	if (vlan_tx_tag_present(skb)) {
3211		tx_flags |= vlan_tx_tag_get(skb);
3212		tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3213		tx_flags |= IXGBE_TX_FLAGS_VLAN;
3214	}
3215
3216	/* record initial flags and protocol */
3217	first->tx_flags = tx_flags;
3218	first->protocol = vlan_get_protocol(skb);
3219
3220	tso = ixgbevf_tso(tx_ring, first, &hdr_len);
3221	if (tso < 0)
3222		goto out_drop;
3223	else if (!tso)
3224		ixgbevf_tx_csum(tx_ring, first);
3225
3226	ixgbevf_tx_map(tx_ring, first, hdr_len);
3227
3228	ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
3229
3230	return NETDEV_TX_OK;
3231
3232out_drop:
3233	dev_kfree_skb_any(first->skb);
3234	first->skb = NULL;
3235
3236	return NETDEV_TX_OK;
3237}
3238
3239/**
3240 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3241 * @netdev: network interface device structure
3242 * @p: pointer to an address structure
3243 *
3244 * Returns 0 on success, negative on failure
3245 **/
3246static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3247{
3248	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3249	struct ixgbe_hw *hw = &adapter->hw;
3250	struct sockaddr *addr = p;
3251
3252	if (!is_valid_ether_addr(addr->sa_data))
3253		return -EADDRNOTAVAIL;
3254
3255	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3256	memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
3257
3258	spin_lock_bh(&adapter->mbx_lock);
3259
3260	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
3261
3262	spin_unlock_bh(&adapter->mbx_lock);
3263
3264	return 0;
3265}
3266
3267/**
3268 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3269 * @netdev: network interface device structure
3270 * @new_mtu: new value for maximum frame size
3271 *
3272 * Returns 0 on success, negative on failure
3273 **/
3274static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3275{
3276	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3277	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3278	int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
3279
3280	switch (adapter->hw.api_version) {
3281	case ixgbe_mbox_api_11:
3282		max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3283		break;
3284	default:
3285		if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
3286			max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3287		break;
3288	}
3289
3290	/* MTU < 68 is an error and causes problems on some kernels */
3291	if ((new_mtu < 68) || (max_frame > max_possible_frame))
3292		return -EINVAL;
3293
3294	hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
3295	       netdev->mtu, new_mtu);
3296	/* must set new MTU before calling down or up */
3297	netdev->mtu = new_mtu;
3298
3299	if (netif_running(netdev))
3300		ixgbevf_reinit_locked(adapter);
3301
3302	return 0;
3303}
3304
3305static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
3306{
3307	struct net_device *netdev = pci_get_drvdata(pdev);
3308	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3309#ifdef CONFIG_PM
3310	int retval = 0;
3311#endif
3312
3313	netif_device_detach(netdev);
3314
3315	if (netif_running(netdev)) {
3316		rtnl_lock();
3317		ixgbevf_down(adapter);
3318		ixgbevf_free_irq(adapter);
3319		ixgbevf_free_all_tx_resources(adapter);
3320		ixgbevf_free_all_rx_resources(adapter);
3321		rtnl_unlock();
3322	}
3323
3324	ixgbevf_clear_interrupt_scheme(adapter);
3325
3326#ifdef CONFIG_PM
3327	retval = pci_save_state(pdev);
3328	if (retval)
3329		return retval;
3330
3331#endif
3332	if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
3333		pci_disable_device(pdev);
3334
3335	return 0;
3336}
3337
3338#ifdef CONFIG_PM
3339static int ixgbevf_resume(struct pci_dev *pdev)
3340{
3341	struct net_device *netdev = pci_get_drvdata(pdev);
3342	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3343	u32 err;
3344
3345	pci_restore_state(pdev);
3346	/*
3347	 * pci_restore_state clears dev->state_saved so call
3348	 * pci_save_state to restore it.
3349	 */
3350	pci_save_state(pdev);
3351
3352	err = pci_enable_device_mem(pdev);
3353	if (err) {
3354		dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
3355		return err;
3356	}
3357	smp_mb__before_clear_bit();
3358	clear_bit(__IXGBEVF_DISABLED, &adapter->state);
3359	pci_set_master(pdev);
3360
3361	ixgbevf_reset(adapter);
3362
3363	rtnl_lock();
3364	err = ixgbevf_init_interrupt_scheme(adapter);
3365	rtnl_unlock();
3366	if (err) {
3367		dev_err(&pdev->dev, "Cannot initialize interrupts\n");
3368		return err;
3369	}
3370
3371	if (netif_running(netdev)) {
3372		err = ixgbevf_open(netdev);
3373		if (err)
3374			return err;
3375	}
3376
3377	netif_device_attach(netdev);
3378
3379	return err;
3380}
3381
3382#endif /* CONFIG_PM */
3383static void ixgbevf_shutdown(struct pci_dev *pdev)
3384{
3385	ixgbevf_suspend(pdev, PMSG_SUSPEND);
3386}
3387
3388static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3389						struct rtnl_link_stats64 *stats)
3390{
3391	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3392	unsigned int start;
3393	u64 bytes, packets;
3394	const struct ixgbevf_ring *ring;
3395	int i;
3396
3397	ixgbevf_update_stats(adapter);
3398
3399	stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3400
3401	for (i = 0; i < adapter->num_rx_queues; i++) {
3402		ring = adapter->rx_ring[i];
3403		do {
3404			start = u64_stats_fetch_begin_irq(&ring->syncp);
3405			bytes = ring->stats.bytes;
3406			packets = ring->stats.packets;
3407		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
3408		stats->rx_bytes += bytes;
3409		stats->rx_packets += packets;
3410	}
3411
3412	for (i = 0; i < adapter->num_tx_queues; i++) {
3413		ring = adapter->tx_ring[i];
3414		do {
3415			start = u64_stats_fetch_begin_irq(&ring->syncp);
3416			bytes = ring->stats.bytes;
3417			packets = ring->stats.packets;
3418		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
3419		stats->tx_bytes += bytes;
3420		stats->tx_packets += packets;
3421	}
3422
3423	return stats;
3424}
3425
3426static const struct net_device_ops ixgbevf_netdev_ops = {
3427	.ndo_open		= ixgbevf_open,
3428	.ndo_stop		= ixgbevf_close,
3429	.ndo_start_xmit		= ixgbevf_xmit_frame,
3430	.ndo_set_rx_mode	= ixgbevf_set_rx_mode,
3431	.ndo_get_stats64	= ixgbevf_get_stats,
3432	.ndo_validate_addr	= eth_validate_addr,
3433	.ndo_set_mac_address	= ixgbevf_set_mac,
3434	.ndo_change_mtu		= ixgbevf_change_mtu,
3435	.ndo_tx_timeout		= ixgbevf_tx_timeout,
3436	.ndo_vlan_rx_add_vid	= ixgbevf_vlan_rx_add_vid,
3437	.ndo_vlan_rx_kill_vid	= ixgbevf_vlan_rx_kill_vid,
3438#ifdef CONFIG_NET_RX_BUSY_POLL
3439	.ndo_busy_poll		= ixgbevf_busy_poll_recv,
3440#endif
3441};
3442
3443static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3444{
3445	dev->netdev_ops = &ixgbevf_netdev_ops;
3446	ixgbevf_set_ethtool_ops(dev);
3447	dev->watchdog_timeo = 5 * HZ;
3448}
3449
3450/**
3451 * ixgbevf_probe - Device Initialization Routine
3452 * @pdev: PCI device information struct
3453 * @ent: entry in ixgbevf_pci_tbl
3454 *
3455 * Returns 0 on success, negative on failure
3456 *
3457 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3458 * The OS initialization, configuring of the adapter private structure,
3459 * and a hardware reset occur.
3460 **/
3461static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3462{
3463	struct net_device *netdev;
3464	struct ixgbevf_adapter *adapter = NULL;
3465	struct ixgbe_hw *hw = NULL;
3466	const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
3467	static int cards_found;
3468	int err, pci_using_dac;
3469
3470	err = pci_enable_device(pdev);
3471	if (err)
3472		return err;
3473
3474	if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
3475		pci_using_dac = 1;
3476	} else {
3477		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3478		if (err) {
3479			dev_err(&pdev->dev, "No usable DMA "
3480				"configuration, aborting\n");
3481			goto err_dma;
3482		}
3483		pci_using_dac = 0;
3484	}
3485
3486	err = pci_request_regions(pdev, ixgbevf_driver_name);
3487	if (err) {
3488		dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3489		goto err_pci_reg;
3490	}
3491
3492	pci_set_master(pdev);
3493
3494	netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
3495				   MAX_TX_QUEUES);
3496	if (!netdev) {
3497		err = -ENOMEM;
3498		goto err_alloc_etherdev;
3499	}
3500
3501	SET_NETDEV_DEV(netdev, &pdev->dev);
3502
3503	pci_set_drvdata(pdev, netdev);
3504	adapter = netdev_priv(netdev);
3505
3506	adapter->netdev = netdev;
3507	adapter->pdev = pdev;
3508	hw = &adapter->hw;
3509	hw->back = adapter;
3510	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3511
3512	/*
3513	 * call save state here in standalone driver because it relies on
3514	 * adapter struct to exist, and needs to call netdev_priv
3515	 */
3516	pci_save_state(pdev);
3517
3518	hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3519			      pci_resource_len(pdev, 0));
3520	adapter->io_addr = hw->hw_addr;
3521	if (!hw->hw_addr) {
3522		err = -EIO;
3523		goto err_ioremap;
3524	}
3525
3526	ixgbevf_assign_netdev_ops(netdev);
3527
3528	adapter->bd_number = cards_found;
3529
3530	/* Setup hw api */
3531	memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3532	hw->mac.type  = ii->mac;
3533
3534	memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
3535	       sizeof(struct ixgbe_mbx_operations));
3536
3537	/* setup the private structure */
3538	err = ixgbevf_sw_init(adapter);
3539	if (err)
3540		goto err_sw_init;
3541
3542	/* The HW MAC address was set and/or determined in sw_init */
3543	if (!is_valid_ether_addr(netdev->dev_addr)) {
3544		pr_err("invalid MAC address\n");
3545		err = -EIO;
3546		goto err_sw_init;
3547	}
3548
3549	netdev->hw_features = NETIF_F_SG |
3550			   NETIF_F_IP_CSUM |
3551			   NETIF_F_IPV6_CSUM |
3552			   NETIF_F_TSO |
3553			   NETIF_F_TSO6 |
3554			   NETIF_F_RXCSUM;
3555
3556	netdev->features = netdev->hw_features |
3557			   NETIF_F_HW_VLAN_CTAG_TX |
3558			   NETIF_F_HW_VLAN_CTAG_RX |
3559			   NETIF_F_HW_VLAN_CTAG_FILTER;
3560
3561	netdev->vlan_features |= NETIF_F_TSO;
3562	netdev->vlan_features |= NETIF_F_TSO6;
3563	netdev->vlan_features |= NETIF_F_IP_CSUM;
3564	netdev->vlan_features |= NETIF_F_IPV6_CSUM;
3565	netdev->vlan_features |= NETIF_F_SG;
3566
3567	if (pci_using_dac)
3568		netdev->features |= NETIF_F_HIGHDMA;
3569
3570	netdev->priv_flags |= IFF_UNICAST_FLT;
3571
3572	init_timer(&adapter->watchdog_timer);
3573	adapter->watchdog_timer.function = ixgbevf_watchdog;
3574	adapter->watchdog_timer.data = (unsigned long)adapter;
3575
3576	INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
3577	INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
3578
3579	err = ixgbevf_init_interrupt_scheme(adapter);
3580	if (err)
3581		goto err_sw_init;
3582
3583	strcpy(netdev->name, "eth%d");
3584
3585	err = register_netdev(netdev);
3586	if (err)
3587		goto err_register;
3588
3589	netif_carrier_off(netdev);
3590
3591	ixgbevf_init_last_counter_stats(adapter);
3592
3593	/* print the MAC address */
3594	hw_dbg(hw, "%pM\n", netdev->dev_addr);
3595
3596	hw_dbg(hw, "MAC: %d\n", hw->mac.type);
3597
3598	hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
3599	cards_found++;
3600	return 0;
3601
3602err_register:
3603	ixgbevf_clear_interrupt_scheme(adapter);
3604err_sw_init:
3605	ixgbevf_reset_interrupt_capability(adapter);
3606	iounmap(adapter->io_addr);
3607err_ioremap:
3608	free_netdev(netdev);
3609err_alloc_etherdev:
3610	pci_release_regions(pdev);
3611err_pci_reg:
3612err_dma:
3613	if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
3614		pci_disable_device(pdev);
3615	return err;
3616}
3617
3618/**
3619 * ixgbevf_remove - Device Removal Routine
3620 * @pdev: PCI device information struct
3621 *
3622 * ixgbevf_remove is called by the PCI subsystem to alert the driver
3623 * that it should release a PCI device.  The could be caused by a
3624 * Hot-Plug event, or because the driver is going to be removed from
3625 * memory.
3626 **/
3627static void ixgbevf_remove(struct pci_dev *pdev)
3628{
3629	struct net_device *netdev = pci_get_drvdata(pdev);
3630	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3631
3632	set_bit(__IXGBEVF_REMOVING, &adapter->state);
3633
3634	del_timer_sync(&adapter->watchdog_timer);
3635
3636	cancel_work_sync(&adapter->reset_task);
3637	cancel_work_sync(&adapter->watchdog_task);
3638
3639	if (netdev->reg_state == NETREG_REGISTERED)
3640		unregister_netdev(netdev);
3641
3642	ixgbevf_clear_interrupt_scheme(adapter);
3643	ixgbevf_reset_interrupt_capability(adapter);
3644
3645	iounmap(adapter->io_addr);
3646	pci_release_regions(pdev);
3647
3648	hw_dbg(&adapter->hw, "Remove complete\n");
3649
3650	free_netdev(netdev);
3651
3652	if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
3653		pci_disable_device(pdev);
3654}
3655
3656/**
3657 * ixgbevf_io_error_detected - called when PCI error is detected
3658 * @pdev: Pointer to PCI device
3659 * @state: The current pci connection state
3660 *
3661 * This function is called after a PCI bus error affecting
3662 * this device has been detected.
3663 */
3664static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
3665						  pci_channel_state_t state)
3666{
3667	struct net_device *netdev = pci_get_drvdata(pdev);
3668	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3669
3670	rtnl_lock();
3671	netif_device_detach(netdev);
3672
3673	if (state == pci_channel_io_perm_failure) {
3674		rtnl_unlock();
3675		return PCI_ERS_RESULT_DISCONNECT;
3676	}
3677
3678	if (netif_running(netdev))
3679		ixgbevf_down(adapter);
3680
3681	if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
3682		pci_disable_device(pdev);
3683	rtnl_unlock();
3684
3685	/* Request a slot slot reset. */
3686	return PCI_ERS_RESULT_NEED_RESET;
3687}
3688
3689/**
3690 * ixgbevf_io_slot_reset - called after the pci bus has been reset.
3691 * @pdev: Pointer to PCI device
3692 *
3693 * Restart the card from scratch, as if from a cold-boot. Implementation
3694 * resembles the first-half of the ixgbevf_resume routine.
3695 */
3696static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
3697{
3698	struct net_device *netdev = pci_get_drvdata(pdev);
3699	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3700
3701	if (pci_enable_device_mem(pdev)) {
3702		dev_err(&pdev->dev,
3703			"Cannot re-enable PCI device after reset.\n");
3704		return PCI_ERS_RESULT_DISCONNECT;
3705	}
3706
3707	smp_mb__before_clear_bit();
3708	clear_bit(__IXGBEVF_DISABLED, &adapter->state);
3709	pci_set_master(pdev);
3710
3711	ixgbevf_reset(adapter);
3712
3713	return PCI_ERS_RESULT_RECOVERED;
3714}
3715
3716/**
3717 * ixgbevf_io_resume - called when traffic can start flowing again.
3718 * @pdev: Pointer to PCI device
3719 *
3720 * This callback is called when the error recovery driver tells us that
3721 * its OK to resume normal operation. Implementation resembles the
3722 * second-half of the ixgbevf_resume routine.
3723 */
3724static void ixgbevf_io_resume(struct pci_dev *pdev)
3725{
3726	struct net_device *netdev = pci_get_drvdata(pdev);
3727	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3728
3729	if (netif_running(netdev))
3730		ixgbevf_up(adapter);
3731
3732	netif_device_attach(netdev);
3733}
3734
3735/* PCI Error Recovery (ERS) */
3736static const struct pci_error_handlers ixgbevf_err_handler = {
3737	.error_detected = ixgbevf_io_error_detected,
3738	.slot_reset = ixgbevf_io_slot_reset,
3739	.resume = ixgbevf_io_resume,
3740};
3741
3742static struct pci_driver ixgbevf_driver = {
3743	.name     = ixgbevf_driver_name,
3744	.id_table = ixgbevf_pci_tbl,
3745	.probe    = ixgbevf_probe,
3746	.remove   = ixgbevf_remove,
3747#ifdef CONFIG_PM
3748	/* Power Management Hooks */
3749	.suspend  = ixgbevf_suspend,
3750	.resume   = ixgbevf_resume,
3751#endif
3752	.shutdown = ixgbevf_shutdown,
3753	.err_handler = &ixgbevf_err_handler
3754};
3755
3756/**
3757 * ixgbevf_init_module - Driver Registration Routine
3758 *
3759 * ixgbevf_init_module is the first routine called when the driver is
3760 * loaded. All it does is register with the PCI subsystem.
3761 **/
3762static int __init ixgbevf_init_module(void)
3763{
3764	int ret;
3765	pr_info("%s - version %s\n", ixgbevf_driver_string,
3766		ixgbevf_driver_version);
3767
3768	pr_info("%s\n", ixgbevf_copyright);
3769
3770	ret = pci_register_driver(&ixgbevf_driver);
3771	return ret;
3772}
3773
3774module_init(ixgbevf_init_module);
3775
3776/**
3777 * ixgbevf_exit_module - Driver Exit Cleanup Routine
3778 *
3779 * ixgbevf_exit_module is called just before the driver is removed
3780 * from memory.
3781 **/
3782static void __exit ixgbevf_exit_module(void)
3783{
3784	pci_unregister_driver(&ixgbevf_driver);
3785}
3786
3787#ifdef DEBUG
3788/**
3789 * ixgbevf_get_hw_dev_name - return device name string
3790 * used by hardware layer to print debugging information
3791 **/
3792char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
3793{
3794	struct ixgbevf_adapter *adapter = hw->back;
3795	return adapter->netdev->name;
3796}
3797
3798#endif
3799module_exit(ixgbevf_exit_module);
3800
3801/* ixgbevf_main.c */
3802