ixgbevf_main.c revision 3938d3c8fdffc95ebc0a3e2708d91a726fd671ec
1/*******************************************************************************
2
3  Intel 82599 Virtual Function driver
4  Copyright(c) 1999 - 2012 Intel Corporation.
5
6  This program is free software; you can redistribute it and/or modify it
7  under the terms and conditions of the GNU General Public License,
8  version 2, as published by the Free Software Foundation.
9
10  This program is distributed in the hope it will be useful, but WITHOUT
11  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  more details.
14
15  You should have received a copy of the GNU General Public License along with
16  this program; if not, write to the Free Software Foundation, Inc.,
17  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19  The full GNU General Public License is included in this distribution in
20  the file called "COPYING".
21
22  Contact Information:
23  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28
29/******************************************************************************
30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
31******************************************************************************/
32
33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
35#include <linux/types.h>
36#include <linux/bitops.h>
37#include <linux/module.h>
38#include <linux/pci.h>
39#include <linux/netdevice.h>
40#include <linux/vmalloc.h>
41#include <linux/string.h>
42#include <linux/in.h>
43#include <linux/ip.h>
44#include <linux/tcp.h>
45#include <linux/sctp.h>
46#include <linux/ipv6.h>
47#include <linux/slab.h>
48#include <net/checksum.h>
49#include <net/ip6_checksum.h>
50#include <linux/ethtool.h>
51#include <linux/if.h>
52#include <linux/if_vlan.h>
53#include <linux/prefetch.h>
54
55#include "ixgbevf.h"
56
57const char ixgbevf_driver_name[] = "ixgbevf";
58static const char ixgbevf_driver_string[] =
59	"Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
60
61#define DRV_VERSION "2.7.12-k"
62const char ixgbevf_driver_version[] = DRV_VERSION;
63static char ixgbevf_copyright[] =
64	"Copyright (c) 2009 - 2012 Intel Corporation.";
65
66static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
67	[board_82599_vf] = &ixgbevf_82599_vf_info,
68	[board_X540_vf]  = &ixgbevf_X540_vf_info,
69};
70
71/* ixgbevf_pci_tbl - PCI Device ID Table
72 *
73 * Wildcard entries (PCI_ANY_ID) should come last
74 * Last entry must be all 0s
75 *
76 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
77 *   Class, Class Mask, private data (not used) }
78 */
79static struct pci_device_id ixgbevf_pci_tbl[] = {
80	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF),
81	board_82599_vf},
82	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF),
83	board_X540_vf},
84
85	/* required last entry */
86	{0, }
87};
88MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
89
90MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
91MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
92MODULE_LICENSE("GPL");
93MODULE_VERSION(DRV_VERSION);
94
95#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
96static int debug = -1;
97module_param(debug, int, 0);
98MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
99
100/* forward decls */
101static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
102static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
103
104static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
105					   struct ixgbevf_ring *rx_ring,
106					   u32 val)
107{
108	/*
109	 * Force memory writes to complete before letting h/w
110	 * know there are new descriptors to fetch.  (Only
111	 * applicable for weak-ordered memory model archs,
112	 * such as IA-64).
113	 */
114	wmb();
115	IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
116}
117
118/**
119 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
120 * @adapter: pointer to adapter struct
121 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
122 * @queue: queue to map the corresponding interrupt to
123 * @msix_vector: the vector to map to the corresponding queue
124 */
125static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
126			     u8 queue, u8 msix_vector)
127{
128	u32 ivar, index;
129	struct ixgbe_hw *hw = &adapter->hw;
130	if (direction == -1) {
131		/* other causes */
132		msix_vector |= IXGBE_IVAR_ALLOC_VAL;
133		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
134		ivar &= ~0xFF;
135		ivar |= msix_vector;
136		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
137	} else {
138		/* tx or rx causes */
139		msix_vector |= IXGBE_IVAR_ALLOC_VAL;
140		index = ((16 * (queue & 1)) + (8 * direction));
141		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
142		ivar &= ~(0xFF << index);
143		ivar |= (msix_vector << index);
144		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
145	}
146}
147
148static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
149					       struct ixgbevf_tx_buffer
150					       *tx_buffer_info)
151{
152	if (tx_buffer_info->dma) {
153		if (tx_buffer_info->mapped_as_page)
154			dma_unmap_page(tx_ring->dev,
155				       tx_buffer_info->dma,
156				       tx_buffer_info->length,
157				       DMA_TO_DEVICE);
158		else
159			dma_unmap_single(tx_ring->dev,
160					 tx_buffer_info->dma,
161					 tx_buffer_info->length,
162					 DMA_TO_DEVICE);
163		tx_buffer_info->dma = 0;
164	}
165	if (tx_buffer_info->skb) {
166		dev_kfree_skb_any(tx_buffer_info->skb);
167		tx_buffer_info->skb = NULL;
168	}
169	tx_buffer_info->time_stamp = 0;
170	/* tx_buffer_info must be completely set up in the transmit path */
171}
172
173#define IXGBE_MAX_TXD_PWR	14
174#define IXGBE_MAX_DATA_PER_TXD	(1 << IXGBE_MAX_TXD_PWR)
175
176/* Tx Descriptors needed, worst case */
177#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
178#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
179
180static void ixgbevf_tx_timeout(struct net_device *netdev);
181
182/**
183 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
184 * @q_vector: board private structure
185 * @tx_ring: tx ring to clean
186 **/
187static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
188				 struct ixgbevf_ring *tx_ring)
189{
190	struct ixgbevf_adapter *adapter = q_vector->adapter;
191	union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
192	struct ixgbevf_tx_buffer *tx_buffer_info;
193	unsigned int i, eop, count = 0;
194	unsigned int total_bytes = 0, total_packets = 0;
195
196	if (test_bit(__IXGBEVF_DOWN, &adapter->state))
197		return true;
198
199	i = tx_ring->next_to_clean;
200	eop = tx_ring->tx_buffer_info[i].next_to_watch;
201	eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
202
203	while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
204	       (count < tx_ring->count)) {
205		bool cleaned = false;
206		rmb(); /* read buffer_info after eop_desc */
207		/* eop could change between read and DD-check */
208		if (unlikely(eop != tx_ring->tx_buffer_info[i].next_to_watch))
209			goto cont_loop;
210		for ( ; !cleaned; count++) {
211			struct sk_buff *skb;
212			tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
213			tx_buffer_info = &tx_ring->tx_buffer_info[i];
214			cleaned = (i == eop);
215			skb = tx_buffer_info->skb;
216
217			if (cleaned && skb) {
218				unsigned int segs, bytecount;
219
220				/* gso_segs is currently only valid for tcp */
221				segs = skb_shinfo(skb)->gso_segs ?: 1;
222				/* multiply data chunks by size of headers */
223				bytecount = ((segs - 1) * skb_headlen(skb)) +
224					    skb->len;
225				total_packets += segs;
226				total_bytes += bytecount;
227			}
228
229			ixgbevf_unmap_and_free_tx_resource(tx_ring,
230							   tx_buffer_info);
231
232			tx_desc->wb.status = 0;
233
234			i++;
235			if (i == tx_ring->count)
236				i = 0;
237		}
238
239cont_loop:
240		eop = tx_ring->tx_buffer_info[i].next_to_watch;
241		eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
242	}
243
244	tx_ring->next_to_clean = i;
245
246#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
247	if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
248		     (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
249		/* Make sure that anybody stopping the queue after this
250		 * sees the new next_to_clean.
251		 */
252		smp_mb();
253		if (__netif_subqueue_stopped(tx_ring->netdev,
254					     tx_ring->queue_index) &&
255		    !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
256			netif_wake_subqueue(tx_ring->netdev,
257					    tx_ring->queue_index);
258			++adapter->restart_queue;
259		}
260	}
261
262	u64_stats_update_begin(&tx_ring->syncp);
263	tx_ring->total_bytes += total_bytes;
264	tx_ring->total_packets += total_packets;
265	u64_stats_update_end(&tx_ring->syncp);
266	q_vector->tx.total_bytes += total_bytes;
267	q_vector->tx.total_packets += total_packets;
268
269	return count < tx_ring->count;
270}
271
272/**
273 * ixgbevf_receive_skb - Send a completed packet up the stack
274 * @q_vector: structure containing interrupt and ring information
275 * @skb: packet to send up
276 * @status: hardware indication of status of receive
277 * @rx_desc: rx descriptor
278 **/
279static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
280				struct sk_buff *skb, u8 status,
281				union ixgbe_adv_rx_desc *rx_desc)
282{
283	struct ixgbevf_adapter *adapter = q_vector->adapter;
284	bool is_vlan = (status & IXGBE_RXD_STAT_VP);
285	u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
286
287	if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
288		__vlan_hwaccel_put_tag(skb, tag);
289
290	if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
291		napi_gro_receive(&q_vector->napi, skb);
292	else
293		netif_rx(skb);
294}
295
296/**
297 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
298 * @adapter: address of board private structure
299 * @status_err: hardware indication of status of receive
300 * @skb: skb currently being received and modified
301 **/
302static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
303				       struct ixgbevf_ring *ring,
304				       u32 status_err, struct sk_buff *skb)
305{
306	skb_checksum_none_assert(skb);
307
308	/* Rx csum disabled */
309	if (!(ring->netdev->features & NETIF_F_RXCSUM))
310		return;
311
312	/* if IP and error */
313	if ((status_err & IXGBE_RXD_STAT_IPCS) &&
314	    (status_err & IXGBE_RXDADV_ERR_IPE)) {
315		adapter->hw_csum_rx_error++;
316		return;
317	}
318
319	if (!(status_err & IXGBE_RXD_STAT_L4CS))
320		return;
321
322	if (status_err & IXGBE_RXDADV_ERR_TCPE) {
323		adapter->hw_csum_rx_error++;
324		return;
325	}
326
327	/* It must be a TCP or UDP packet with a valid checksum */
328	skb->ip_summed = CHECKSUM_UNNECESSARY;
329	adapter->hw_csum_rx_good++;
330}
331
332/**
333 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
334 * @adapter: address of board private structure
335 **/
336static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
337				     struct ixgbevf_ring *rx_ring,
338				     int cleaned_count)
339{
340	struct pci_dev *pdev = adapter->pdev;
341	union ixgbe_adv_rx_desc *rx_desc;
342	struct ixgbevf_rx_buffer *bi;
343	unsigned int i = rx_ring->next_to_use;
344
345	bi = &rx_ring->rx_buffer_info[i];
346
347	while (cleaned_count--) {
348		rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
349
350		if (!bi->skb) {
351			struct sk_buff *skb;
352
353			skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
354							rx_ring->rx_buf_len);
355			if (!skb) {
356				adapter->alloc_rx_buff_failed++;
357				goto no_buffers;
358			}
359			bi->skb = skb;
360
361			bi->dma = dma_map_single(&pdev->dev, skb->data,
362						 rx_ring->rx_buf_len,
363						 DMA_FROM_DEVICE);
364			if (dma_mapping_error(&pdev->dev, bi->dma)) {
365				dev_kfree_skb(skb);
366				bi->skb = NULL;
367				dev_err(&pdev->dev, "RX DMA map failed\n");
368				break;
369			}
370		}
371		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
372
373		i++;
374		if (i == rx_ring->count)
375			i = 0;
376		bi = &rx_ring->rx_buffer_info[i];
377	}
378
379no_buffers:
380	if (rx_ring->next_to_use != i) {
381		rx_ring->next_to_use = i;
382		ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
383	}
384}
385
386static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
387					     u32 qmask)
388{
389	struct ixgbe_hw *hw = &adapter->hw;
390
391	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
392}
393
394static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
395				 struct ixgbevf_ring *rx_ring,
396				 int budget)
397{
398	struct ixgbevf_adapter *adapter = q_vector->adapter;
399	struct pci_dev *pdev = adapter->pdev;
400	union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
401	struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
402	struct sk_buff *skb;
403	unsigned int i;
404	u32 len, staterr;
405	int cleaned_count = 0;
406	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
407
408	i = rx_ring->next_to_clean;
409	rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
410	staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
411	rx_buffer_info = &rx_ring->rx_buffer_info[i];
412
413	while (staterr & IXGBE_RXD_STAT_DD) {
414		if (!budget)
415			break;
416		budget--;
417
418		rmb(); /* read descriptor and rx_buffer_info after status DD */
419		len = le16_to_cpu(rx_desc->wb.upper.length);
420		skb = rx_buffer_info->skb;
421		prefetch(skb->data - NET_IP_ALIGN);
422		rx_buffer_info->skb = NULL;
423
424		if (rx_buffer_info->dma) {
425			dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
426					 rx_ring->rx_buf_len,
427					 DMA_FROM_DEVICE);
428			rx_buffer_info->dma = 0;
429			skb_put(skb, len);
430		}
431
432		i++;
433		if (i == rx_ring->count)
434			i = 0;
435
436		next_rxd = IXGBEVF_RX_DESC(rx_ring, i);
437		prefetch(next_rxd);
438		cleaned_count++;
439
440		next_buffer = &rx_ring->rx_buffer_info[i];
441
442		if (!(staterr & IXGBE_RXD_STAT_EOP)) {
443			skb->next = next_buffer->skb;
444			IXGBE_CB(skb->next)->prev = skb;
445			adapter->non_eop_descs++;
446			goto next_desc;
447		}
448
449		/* we should not be chaining buffers, if we did drop the skb */
450		if (IXGBE_CB(skb)->prev) {
451			do {
452				struct sk_buff *this = skb;
453				skb = IXGBE_CB(skb)->prev;
454				dev_kfree_skb(this);
455			} while (skb);
456			goto next_desc;
457		}
458
459		/* ERR_MASK will only have valid bits if EOP set */
460		if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
461			dev_kfree_skb_irq(skb);
462			goto next_desc;
463		}
464
465		ixgbevf_rx_checksum(adapter, rx_ring, staterr, skb);
466
467		/* probably a little skewed due to removing CRC */
468		total_rx_bytes += skb->len;
469		total_rx_packets++;
470
471		/*
472		 * Work around issue of some types of VM to VM loop back
473		 * packets not getting split correctly
474		 */
475		if (staterr & IXGBE_RXD_STAT_LB) {
476			u32 header_fixup_len = skb_headlen(skb);
477			if (header_fixup_len < 14)
478				skb_push(skb, header_fixup_len);
479		}
480		skb->protocol = eth_type_trans(skb, rx_ring->netdev);
481
482		/* Workaround hardware that can't do proper VEPA multicast
483		 * source pruning.
484		 */
485		if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) &&
486		    !(compare_ether_addr(adapter->netdev->dev_addr,
487					eth_hdr(skb)->h_source))) {
488			dev_kfree_skb_irq(skb);
489			goto next_desc;
490		}
491
492		ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc);
493
494next_desc:
495		rx_desc->wb.upper.status_error = 0;
496
497		/* return some buffers to hardware, one at a time is too slow */
498		if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
499			ixgbevf_alloc_rx_buffers(adapter, rx_ring,
500						 cleaned_count);
501			cleaned_count = 0;
502		}
503
504		/* use prefetched values */
505		rx_desc = next_rxd;
506		rx_buffer_info = &rx_ring->rx_buffer_info[i];
507
508		staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
509	}
510
511	rx_ring->next_to_clean = i;
512	cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
513
514	if (cleaned_count)
515		ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
516
517	u64_stats_update_begin(&rx_ring->syncp);
518	rx_ring->total_packets += total_rx_packets;
519	rx_ring->total_bytes += total_rx_bytes;
520	u64_stats_update_end(&rx_ring->syncp);
521	q_vector->rx.total_packets += total_rx_packets;
522	q_vector->rx.total_bytes += total_rx_bytes;
523
524	return !!budget;
525}
526
527/**
528 * ixgbevf_poll - NAPI polling calback
529 * @napi: napi struct with our devices info in it
530 * @budget: amount of work driver is allowed to do this pass, in packets
531 *
532 * This function will clean more than one or more rings associated with a
533 * q_vector.
534 **/
535static int ixgbevf_poll(struct napi_struct *napi, int budget)
536{
537	struct ixgbevf_q_vector *q_vector =
538		container_of(napi, struct ixgbevf_q_vector, napi);
539	struct ixgbevf_adapter *adapter = q_vector->adapter;
540	struct ixgbevf_ring *ring;
541	int per_ring_budget;
542	bool clean_complete = true;
543
544	ixgbevf_for_each_ring(ring, q_vector->tx)
545		clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
546
547	/* attempt to distribute budget to each queue fairly, but don't allow
548	 * the budget to go below 1 because we'll exit polling */
549	if (q_vector->rx.count > 1)
550		per_ring_budget = max(budget/q_vector->rx.count, 1);
551	else
552		per_ring_budget = budget;
553
554	adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
555	ixgbevf_for_each_ring(ring, q_vector->rx)
556		clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring,
557						       per_ring_budget);
558	adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
559
560	/* If all work not completed, return budget and keep polling */
561	if (!clean_complete)
562		return budget;
563	/* all work done, exit the polling mode */
564	napi_complete(napi);
565	if (adapter->rx_itr_setting & 1)
566		ixgbevf_set_itr(q_vector);
567	if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
568		ixgbevf_irq_enable_queues(adapter,
569					  1 << q_vector->v_idx);
570
571	return 0;
572}
573
574/**
575 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
576 * @q_vector: structure containing interrupt and ring information
577 */
578static void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
579{
580	struct ixgbevf_adapter *adapter = q_vector->adapter;
581	struct ixgbe_hw *hw = &adapter->hw;
582	int v_idx = q_vector->v_idx;
583	u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
584
585	/*
586	 * set the WDIS bit to not clear the timer bits and cause an
587	 * immediate assertion of the interrupt
588	 */
589	itr_reg |= IXGBE_EITR_CNT_WDIS;
590
591	IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
592}
593
594/**
595 * ixgbevf_configure_msix - Configure MSI-X hardware
596 * @adapter: board private structure
597 *
598 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
599 * interrupts.
600 **/
601static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
602{
603	struct ixgbevf_q_vector *q_vector;
604	int q_vectors, v_idx;
605
606	q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
607	adapter->eims_enable_mask = 0;
608
609	/*
610	 * Populate the IVAR table and set the ITR values to the
611	 * corresponding register.
612	 */
613	for (v_idx = 0; v_idx < q_vectors; v_idx++) {
614		struct ixgbevf_ring *ring;
615		q_vector = adapter->q_vector[v_idx];
616
617		ixgbevf_for_each_ring(ring, q_vector->rx)
618			ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
619
620		ixgbevf_for_each_ring(ring, q_vector->tx)
621			ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
622
623		if (q_vector->tx.ring && !q_vector->rx.ring) {
624			/* tx only vector */
625			if (adapter->tx_itr_setting == 1)
626				q_vector->itr = IXGBE_10K_ITR;
627			else
628				q_vector->itr = adapter->tx_itr_setting;
629		} else {
630			/* rx or rx/tx vector */
631			if (adapter->rx_itr_setting == 1)
632				q_vector->itr = IXGBE_20K_ITR;
633			else
634				q_vector->itr = adapter->rx_itr_setting;
635		}
636
637		/* add q_vector eims value to global eims_enable_mask */
638		adapter->eims_enable_mask |= 1 << v_idx;
639
640		ixgbevf_write_eitr(q_vector);
641	}
642
643	ixgbevf_set_ivar(adapter, -1, 1, v_idx);
644	/* setup eims_other and add value to global eims_enable_mask */
645	adapter->eims_other = 1 << v_idx;
646	adapter->eims_enable_mask |= adapter->eims_other;
647}
648
649enum latency_range {
650	lowest_latency = 0,
651	low_latency = 1,
652	bulk_latency = 2,
653	latency_invalid = 255
654};
655
656/**
657 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
658 * @q_vector: structure containing interrupt and ring information
659 * @ring_container: structure containing ring performance data
660 *
661 *      Stores a new ITR value based on packets and byte
662 *      counts during the last interrupt.  The advantage of per interrupt
663 *      computation is faster updates and more accurate ITR for the current
664 *      traffic pattern.  Constants in this function were computed
665 *      based on theoretical maximum wire speed and thresholds were set based
666 *      on testing data as well as attempting to minimize response time
667 *      while increasing bulk throughput.
668 **/
669static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
670			       struct ixgbevf_ring_container *ring_container)
671{
672	int bytes = ring_container->total_bytes;
673	int packets = ring_container->total_packets;
674	u32 timepassed_us;
675	u64 bytes_perint;
676	u8 itr_setting = ring_container->itr;
677
678	if (packets == 0)
679		return;
680
681	/* simple throttlerate management
682	 *    0-20MB/s lowest (100000 ints/s)
683	 *   20-100MB/s low   (20000 ints/s)
684	 *  100-1249MB/s bulk (8000 ints/s)
685	 */
686	/* what was last interrupt timeslice? */
687	timepassed_us = q_vector->itr >> 2;
688	bytes_perint = bytes / timepassed_us; /* bytes/usec */
689
690	switch (itr_setting) {
691	case lowest_latency:
692		if (bytes_perint > 10)
693			itr_setting = low_latency;
694		break;
695	case low_latency:
696		if (bytes_perint > 20)
697			itr_setting = bulk_latency;
698		else if (bytes_perint <= 10)
699			itr_setting = lowest_latency;
700		break;
701	case bulk_latency:
702		if (bytes_perint <= 20)
703			itr_setting = low_latency;
704		break;
705	}
706
707	/* clear work counters since we have the values we need */
708	ring_container->total_bytes = 0;
709	ring_container->total_packets = 0;
710
711	/* write updated itr to ring container */
712	ring_container->itr = itr_setting;
713}
714
715static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
716{
717	u32 new_itr = q_vector->itr;
718	u8 current_itr;
719
720	ixgbevf_update_itr(q_vector, &q_vector->tx);
721	ixgbevf_update_itr(q_vector, &q_vector->rx);
722
723	current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
724
725	switch (current_itr) {
726	/* counts and packets in update_itr are dependent on these numbers */
727	case lowest_latency:
728		new_itr = IXGBE_100K_ITR;
729		break;
730	case low_latency:
731		new_itr = IXGBE_20K_ITR;
732		break;
733	case bulk_latency:
734	default:
735		new_itr = IXGBE_8K_ITR;
736		break;
737	}
738
739	if (new_itr != q_vector->itr) {
740		/* do an exponential smoothing */
741		new_itr = (10 * new_itr * q_vector->itr) /
742			  ((9 * new_itr) + q_vector->itr);
743
744		/* save the algorithm value here */
745		q_vector->itr = new_itr;
746
747		ixgbevf_write_eitr(q_vector);
748	}
749}
750
751static irqreturn_t ixgbevf_msix_other(int irq, void *data)
752{
753	struct ixgbevf_adapter *adapter = data;
754	struct ixgbe_hw *hw = &adapter->hw;
755
756	hw->mac.get_link_status = 1;
757
758	if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
759		mod_timer(&adapter->watchdog_timer, jiffies);
760
761	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
762
763	return IRQ_HANDLED;
764}
765
766/**
767 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
768 * @irq: unused
769 * @data: pointer to our q_vector struct for this interrupt vector
770 **/
771static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
772{
773	struct ixgbevf_q_vector *q_vector = data;
774
775	/* EIAM disabled interrupts (on this vector) for us */
776	if (q_vector->rx.ring || q_vector->tx.ring)
777		napi_schedule(&q_vector->napi);
778
779	return IRQ_HANDLED;
780}
781
782static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
783				     int r_idx)
784{
785	struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
786
787	a->rx_ring[r_idx].next = q_vector->rx.ring;
788	q_vector->rx.ring = &a->rx_ring[r_idx];
789	q_vector->rx.count++;
790}
791
792static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
793				     int t_idx)
794{
795	struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
796
797	a->tx_ring[t_idx].next = q_vector->tx.ring;
798	q_vector->tx.ring = &a->tx_ring[t_idx];
799	q_vector->tx.count++;
800}
801
802/**
803 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
804 * @adapter: board private structure to initialize
805 *
806 * This function maps descriptor rings to the queue-specific vectors
807 * we were allotted through the MSI-X enabling code.  Ideally, we'd have
808 * one vector per ring/queue, but on a constrained vector budget, we
809 * group the rings as "efficiently" as possible.  You would add new
810 * mapping configurations in here.
811 **/
812static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
813{
814	int q_vectors;
815	int v_start = 0;
816	int rxr_idx = 0, txr_idx = 0;
817	int rxr_remaining = adapter->num_rx_queues;
818	int txr_remaining = adapter->num_tx_queues;
819	int i, j;
820	int rqpv, tqpv;
821	int err = 0;
822
823	q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
824
825	/*
826	 * The ideal configuration...
827	 * We have enough vectors to map one per queue.
828	 */
829	if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
830		for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
831			map_vector_to_rxq(adapter, v_start, rxr_idx);
832
833		for (; txr_idx < txr_remaining; v_start++, txr_idx++)
834			map_vector_to_txq(adapter, v_start, txr_idx);
835		goto out;
836	}
837
838	/*
839	 * If we don't have enough vectors for a 1-to-1
840	 * mapping, we'll have to group them so there are
841	 * multiple queues per vector.
842	 */
843	/* Re-adjusting *qpv takes care of the remainder. */
844	for (i = v_start; i < q_vectors; i++) {
845		rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
846		for (j = 0; j < rqpv; j++) {
847			map_vector_to_rxq(adapter, i, rxr_idx);
848			rxr_idx++;
849			rxr_remaining--;
850		}
851	}
852	for (i = v_start; i < q_vectors; i++) {
853		tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
854		for (j = 0; j < tqpv; j++) {
855			map_vector_to_txq(adapter, i, txr_idx);
856			txr_idx++;
857			txr_remaining--;
858		}
859	}
860
861out:
862	return err;
863}
864
865/**
866 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
867 * @adapter: board private structure
868 *
869 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
870 * interrupts from the kernel.
871 **/
872static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
873{
874	struct net_device *netdev = adapter->netdev;
875	int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
876	int vector, err;
877	int ri = 0, ti = 0;
878
879	for (vector = 0; vector < q_vectors; vector++) {
880		struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
881		struct msix_entry *entry = &adapter->msix_entries[vector];
882
883		if (q_vector->tx.ring && q_vector->rx.ring) {
884			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
885				 "%s-%s-%d", netdev->name, "TxRx", ri++);
886			ti++;
887		} else if (q_vector->rx.ring) {
888			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
889				 "%s-%s-%d", netdev->name, "rx", ri++);
890		} else if (q_vector->tx.ring) {
891			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
892				 "%s-%s-%d", netdev->name, "tx", ti++);
893		} else {
894			/* skip this unused q_vector */
895			continue;
896		}
897		err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
898				  q_vector->name, q_vector);
899		if (err) {
900			hw_dbg(&adapter->hw,
901			       "request_irq failed for MSIX interrupt "
902			       "Error: %d\n", err);
903			goto free_queue_irqs;
904		}
905	}
906
907	err = request_irq(adapter->msix_entries[vector].vector,
908			  &ixgbevf_msix_other, 0, netdev->name, adapter);
909	if (err) {
910		hw_dbg(&adapter->hw,
911		       "request_irq for msix_other failed: %d\n", err);
912		goto free_queue_irqs;
913	}
914
915	return 0;
916
917free_queue_irqs:
918	while (vector) {
919		vector--;
920		free_irq(adapter->msix_entries[vector].vector,
921			 adapter->q_vector[vector]);
922	}
923	pci_disable_msix(adapter->pdev);
924	kfree(adapter->msix_entries);
925	adapter->msix_entries = NULL;
926	return err;
927}
928
929static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
930{
931	int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
932
933	for (i = 0; i < q_vectors; i++) {
934		struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
935		q_vector->rx.ring = NULL;
936		q_vector->tx.ring = NULL;
937		q_vector->rx.count = 0;
938		q_vector->tx.count = 0;
939	}
940}
941
942/**
943 * ixgbevf_request_irq - initialize interrupts
944 * @adapter: board private structure
945 *
946 * Attempts to configure interrupts using the best available
947 * capabilities of the hardware and kernel.
948 **/
949static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
950{
951	int err = 0;
952
953	err = ixgbevf_request_msix_irqs(adapter);
954
955	if (err)
956		hw_dbg(&adapter->hw,
957		       "request_irq failed, Error %d\n", err);
958
959	return err;
960}
961
962static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
963{
964	int i, q_vectors;
965
966	q_vectors = adapter->num_msix_vectors;
967	i = q_vectors - 1;
968
969	free_irq(adapter->msix_entries[i].vector, adapter);
970	i--;
971
972	for (; i >= 0; i--) {
973		/* free only the irqs that were actually requested */
974		if (!adapter->q_vector[i]->rx.ring &&
975		    !adapter->q_vector[i]->tx.ring)
976			continue;
977
978		free_irq(adapter->msix_entries[i].vector,
979			 adapter->q_vector[i]);
980	}
981
982	ixgbevf_reset_q_vectors(adapter);
983}
984
985/**
986 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
987 * @adapter: board private structure
988 **/
989static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
990{
991	struct ixgbe_hw *hw = &adapter->hw;
992	int i;
993
994	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
995	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
996	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
997
998	IXGBE_WRITE_FLUSH(hw);
999
1000	for (i = 0; i < adapter->num_msix_vectors; i++)
1001		synchronize_irq(adapter->msix_entries[i].vector);
1002}
1003
1004/**
1005 * ixgbevf_irq_enable - Enable default interrupt generation settings
1006 * @adapter: board private structure
1007 **/
1008static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
1009{
1010	struct ixgbe_hw *hw = &adapter->hw;
1011
1012	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1013	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1014	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
1015}
1016
1017/**
1018 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1019 * @adapter: board private structure
1020 *
1021 * Configure the Tx unit of the MAC after a reset.
1022 **/
1023static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1024{
1025	u64 tdba;
1026	struct ixgbe_hw *hw = &adapter->hw;
1027	u32 i, j, tdlen, txctrl;
1028
1029	/* Setup the HW Tx Head and Tail descriptor pointers */
1030	for (i = 0; i < adapter->num_tx_queues; i++) {
1031		struct ixgbevf_ring *ring = &adapter->tx_ring[i];
1032		j = ring->reg_idx;
1033		tdba = ring->dma;
1034		tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
1035		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1036				(tdba & DMA_BIT_MASK(32)));
1037		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1038		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen);
1039		IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0);
1040		IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0);
1041		adapter->tx_ring[i].head = IXGBE_VFTDH(j);
1042		adapter->tx_ring[i].tail = IXGBE_VFTDT(j);
1043		/* Disable Tx Head Writeback RO bit, since this hoses
1044		 * bookkeeping if things aren't delivered in order.
1045		 */
1046		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1047		txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1048		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1049	}
1050}
1051
1052#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT	2
1053
1054static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1055{
1056	struct ixgbevf_ring *rx_ring;
1057	struct ixgbe_hw *hw = &adapter->hw;
1058	u32 srrctl;
1059
1060	rx_ring = &adapter->rx_ring[index];
1061
1062	srrctl = IXGBE_SRRCTL_DROP_EN;
1063
1064	srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1065
1066	srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
1067		  IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1068
1069	IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1070}
1071
1072static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
1073{
1074	struct ixgbe_hw *hw = &adapter->hw;
1075	struct net_device *netdev = adapter->netdev;
1076	int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1077	int i;
1078	u16 rx_buf_len;
1079
1080	/* notify the PF of our intent to use this size of frame */
1081	ixgbevf_rlpml_set_vf(hw, max_frame);
1082
1083	/* PF will allow an extra 4 bytes past for vlan tagged frames */
1084	max_frame += VLAN_HLEN;
1085
1086	/*
1087	 * Allocate buffer sizes that fit well into 32K and
1088	 * take into account max frame size of 9.5K
1089	 */
1090	if ((hw->mac.type == ixgbe_mac_X540_vf) &&
1091	    (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE))
1092		rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1093	else if (max_frame <= IXGBEVF_RXBUFFER_2K)
1094		rx_buf_len = IXGBEVF_RXBUFFER_2K;
1095	else if (max_frame <= IXGBEVF_RXBUFFER_4K)
1096		rx_buf_len = IXGBEVF_RXBUFFER_4K;
1097	else if (max_frame <= IXGBEVF_RXBUFFER_8K)
1098		rx_buf_len = IXGBEVF_RXBUFFER_8K;
1099	else
1100		rx_buf_len = IXGBEVF_RXBUFFER_10K;
1101
1102	for (i = 0; i < adapter->num_rx_queues; i++)
1103		adapter->rx_ring[i].rx_buf_len = rx_buf_len;
1104}
1105
1106/**
1107 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1108 * @adapter: board private structure
1109 *
1110 * Configure the Rx unit of the MAC after a reset.
1111 **/
1112static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1113{
1114	u64 rdba;
1115	struct ixgbe_hw *hw = &adapter->hw;
1116	int i, j;
1117	u32 rdlen;
1118
1119	/* PSRTYPE must be initialized in 82599 */
1120	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
1121
1122	/* set_rx_buffer_len must be called before ring initialization */
1123	ixgbevf_set_rx_buffer_len(adapter);
1124
1125	rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
1126	/* Setup the HW Rx Head and Tail Descriptor Pointers and
1127	 * the Base and Length of the Rx Descriptor Ring */
1128	for (i = 0; i < adapter->num_rx_queues; i++) {
1129		rdba = adapter->rx_ring[i].dma;
1130		j = adapter->rx_ring[i].reg_idx;
1131		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1132				(rdba & DMA_BIT_MASK(32)));
1133		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1134		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen);
1135		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0);
1136		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
1137		adapter->rx_ring[i].head = IXGBE_VFRDH(j);
1138		adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
1139
1140		ixgbevf_configure_srrctl(adapter, j);
1141	}
1142}
1143
1144static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1145{
1146	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1147	struct ixgbe_hw *hw = &adapter->hw;
1148	int err;
1149
1150	spin_lock_bh(&adapter->mbx_lock);
1151
1152	/* add VID to filter table */
1153	err = hw->mac.ops.set_vfta(hw, vid, 0, true);
1154
1155	spin_unlock_bh(&adapter->mbx_lock);
1156
1157	/* translate error return types so error makes sense */
1158	if (err == IXGBE_ERR_MBX)
1159		return -EIO;
1160
1161	if (err == IXGBE_ERR_INVALID_ARGUMENT)
1162		return -EACCES;
1163
1164	set_bit(vid, adapter->active_vlans);
1165
1166	return err;
1167}
1168
1169static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1170{
1171	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1172	struct ixgbe_hw *hw = &adapter->hw;
1173	int err = -EOPNOTSUPP;
1174
1175	spin_lock_bh(&adapter->mbx_lock);
1176
1177	/* remove VID from filter table */
1178	err = hw->mac.ops.set_vfta(hw, vid, 0, false);
1179
1180	spin_unlock_bh(&adapter->mbx_lock);
1181
1182	clear_bit(vid, adapter->active_vlans);
1183
1184	return err;
1185}
1186
1187static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1188{
1189	u16 vid;
1190
1191	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1192		ixgbevf_vlan_rx_add_vid(adapter->netdev, vid);
1193}
1194
1195static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
1196{
1197	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1198	struct ixgbe_hw *hw = &adapter->hw;
1199	int count = 0;
1200
1201	if ((netdev_uc_count(netdev)) > 10) {
1202		pr_err("Too many unicast filters - No Space\n");
1203		return -ENOSPC;
1204	}
1205
1206	if (!netdev_uc_empty(netdev)) {
1207		struct netdev_hw_addr *ha;
1208		netdev_for_each_uc_addr(ha, netdev) {
1209			hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
1210			udelay(200);
1211		}
1212	} else {
1213		/*
1214		 * If the list is empty then send message to PF driver to
1215		 * clear all macvlans on this VF.
1216		 */
1217		hw->mac.ops.set_uc_addr(hw, 0, NULL);
1218	}
1219
1220	return count;
1221}
1222
1223/**
1224 * ixgbevf_set_rx_mode - Multicast and unicast set
1225 * @netdev: network interface device structure
1226 *
1227 * The set_rx_method entry point is called whenever the multicast address
1228 * list, unicast address list or the network interface flags are updated.
1229 * This routine is responsible for configuring the hardware for proper
1230 * multicast mode and configuring requested unicast filters.
1231 **/
1232static void ixgbevf_set_rx_mode(struct net_device *netdev)
1233{
1234	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1235	struct ixgbe_hw *hw = &adapter->hw;
1236
1237	spin_lock_bh(&adapter->mbx_lock);
1238
1239	/* reprogram multicast list */
1240	hw->mac.ops.update_mc_addr_list(hw, netdev);
1241
1242	ixgbevf_write_uc_addr_list(netdev);
1243
1244	spin_unlock_bh(&adapter->mbx_lock);
1245}
1246
1247static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1248{
1249	int q_idx;
1250	struct ixgbevf_q_vector *q_vector;
1251	int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1252
1253	for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1254		q_vector = adapter->q_vector[q_idx];
1255		napi_enable(&q_vector->napi);
1256	}
1257}
1258
1259static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1260{
1261	int q_idx;
1262	struct ixgbevf_q_vector *q_vector;
1263	int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1264
1265	for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1266		q_vector = adapter->q_vector[q_idx];
1267		napi_disable(&q_vector->napi);
1268	}
1269}
1270
1271static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1272{
1273	struct net_device *netdev = adapter->netdev;
1274	int i;
1275
1276	ixgbevf_set_rx_mode(netdev);
1277
1278	ixgbevf_restore_vlan(adapter);
1279
1280	ixgbevf_configure_tx(adapter);
1281	ixgbevf_configure_rx(adapter);
1282	for (i = 0; i < adapter->num_rx_queues; i++) {
1283		struct ixgbevf_ring *ring = &adapter->rx_ring[i];
1284		ixgbevf_alloc_rx_buffers(adapter, ring,
1285					 IXGBE_DESC_UNUSED(ring));
1286	}
1287}
1288
1289#define IXGBE_MAX_RX_DESC_POLL 10
1290static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1291						int rxr)
1292{
1293	struct ixgbe_hw *hw = &adapter->hw;
1294	int j = adapter->rx_ring[rxr].reg_idx;
1295	int k;
1296
1297	for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
1298		if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
1299			break;
1300		else
1301			msleep(1);
1302	}
1303	if (k >= IXGBE_MAX_RX_DESC_POLL) {
1304		hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d "
1305		       "not set within the polling period\n", rxr);
1306	}
1307
1308	ixgbevf_release_rx_desc(hw, &adapter->rx_ring[rxr],
1309				adapter->rx_ring[rxr].count - 1);
1310}
1311
1312static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
1313{
1314	/* Only save pre-reset stats if there are some */
1315	if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
1316		adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
1317			adapter->stats.base_vfgprc;
1318		adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
1319			adapter->stats.base_vfgptc;
1320		adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
1321			adapter->stats.base_vfgorc;
1322		adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
1323			adapter->stats.base_vfgotc;
1324		adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
1325			adapter->stats.base_vfmprc;
1326	}
1327}
1328
1329static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
1330{
1331	struct ixgbe_hw *hw = &adapter->hw;
1332
1333	adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1334	adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1335	adapter->stats.last_vfgorc |=
1336		(((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1337	adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1338	adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1339	adapter->stats.last_vfgotc |=
1340		(((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1341	adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1342
1343	adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
1344	adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
1345	adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
1346	adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
1347	adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
1348}
1349
1350static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
1351{
1352	struct ixgbe_hw *hw = &adapter->hw;
1353	int api[] = { ixgbe_mbox_api_11,
1354		      ixgbe_mbox_api_10,
1355		      ixgbe_mbox_api_unknown };
1356	int err = 0, idx = 0;
1357
1358	spin_lock_bh(&adapter->mbx_lock);
1359
1360	while (api[idx] != ixgbe_mbox_api_unknown) {
1361		err = ixgbevf_negotiate_api_version(hw, api[idx]);
1362		if (!err)
1363			break;
1364		idx++;
1365	}
1366
1367	spin_unlock_bh(&adapter->mbx_lock);
1368}
1369
1370static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1371{
1372	struct net_device *netdev = adapter->netdev;
1373	struct ixgbe_hw *hw = &adapter->hw;
1374	int i, j = 0;
1375	int num_rx_rings = adapter->num_rx_queues;
1376	u32 txdctl, rxdctl;
1377
1378	for (i = 0; i < adapter->num_tx_queues; i++) {
1379		j = adapter->tx_ring[i].reg_idx;
1380		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1381		/* enable WTHRESH=8 descriptors, to encourage burst writeback */
1382		txdctl |= (8 << 16);
1383		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1384	}
1385
1386	for (i = 0; i < adapter->num_tx_queues; i++) {
1387		j = adapter->tx_ring[i].reg_idx;
1388		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1389		txdctl |= IXGBE_TXDCTL_ENABLE;
1390		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1391	}
1392
1393	for (i = 0; i < num_rx_rings; i++) {
1394		j = adapter->rx_ring[i].reg_idx;
1395		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1396		rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1397		if (hw->mac.type == ixgbe_mac_X540_vf) {
1398			rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
1399			rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) |
1400				   IXGBE_RXDCTL_RLPML_EN);
1401		}
1402		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1403		ixgbevf_rx_desc_queue_enable(adapter, i);
1404	}
1405
1406	ixgbevf_configure_msix(adapter);
1407
1408	spin_lock_bh(&adapter->mbx_lock);
1409
1410	if (is_valid_ether_addr(hw->mac.addr))
1411		hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
1412	else
1413		hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
1414
1415	spin_unlock_bh(&adapter->mbx_lock);
1416
1417	clear_bit(__IXGBEVF_DOWN, &adapter->state);
1418	ixgbevf_napi_enable_all(adapter);
1419
1420	/* enable transmits */
1421	netif_tx_start_all_queues(netdev);
1422
1423	ixgbevf_save_reset_stats(adapter);
1424	ixgbevf_init_last_counter_stats(adapter);
1425
1426	hw->mac.get_link_status = 1;
1427	mod_timer(&adapter->watchdog_timer, jiffies);
1428}
1429
1430static int ixgbevf_reset_queues(struct ixgbevf_adapter *adapter)
1431{
1432	struct ixgbe_hw *hw = &adapter->hw;
1433	struct ixgbevf_ring *rx_ring;
1434	unsigned int def_q = 0;
1435	unsigned int num_tcs = 0;
1436	unsigned int num_rx_queues = 1;
1437	int err, i;
1438
1439	spin_lock_bh(&adapter->mbx_lock);
1440
1441	/* fetch queue configuration from the PF */
1442	err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1443
1444	spin_unlock_bh(&adapter->mbx_lock);
1445
1446	if (err)
1447		return err;
1448
1449	if (num_tcs > 1) {
1450		/* update default Tx ring register index */
1451		adapter->tx_ring[0].reg_idx = def_q;
1452
1453		/* we need as many queues as traffic classes */
1454		num_rx_queues = num_tcs;
1455	}
1456
1457	/* nothing to do if we have the correct number of queues */
1458	if (adapter->num_rx_queues == num_rx_queues)
1459		return 0;
1460
1461	/* allocate new rings */
1462	rx_ring = kcalloc(num_rx_queues,
1463			  sizeof(struct ixgbevf_ring), GFP_KERNEL);
1464	if (!rx_ring)
1465		return -ENOMEM;
1466
1467	/* setup ring fields */
1468	for (i = 0; i < num_rx_queues; i++) {
1469		rx_ring[i].count = adapter->rx_ring_count;
1470		rx_ring[i].queue_index = i;
1471		rx_ring[i].reg_idx = i;
1472		rx_ring[i].dev = &adapter->pdev->dev;
1473		rx_ring[i].netdev = adapter->netdev;
1474
1475		/* allocate resources on the ring */
1476		err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
1477		if (err) {
1478			while (i) {
1479				i--;
1480				ixgbevf_free_rx_resources(adapter, &rx_ring[i]);
1481			}
1482			kfree(rx_ring);
1483			return err;
1484		}
1485	}
1486
1487	/* free the existing rings and queues */
1488	ixgbevf_free_all_rx_resources(adapter);
1489	adapter->num_rx_queues = 0;
1490	kfree(adapter->rx_ring);
1491
1492	/* move new rings into position on the adapter struct */
1493	adapter->rx_ring = rx_ring;
1494	adapter->num_rx_queues = num_rx_queues;
1495
1496	/* reset ring to vector mapping */
1497	ixgbevf_reset_q_vectors(adapter);
1498	ixgbevf_map_rings_to_vectors(adapter);
1499
1500	return 0;
1501}
1502
1503void ixgbevf_up(struct ixgbevf_adapter *adapter)
1504{
1505	struct ixgbe_hw *hw = &adapter->hw;
1506
1507	ixgbevf_negotiate_api(adapter);
1508
1509	ixgbevf_reset_queues(adapter);
1510
1511	ixgbevf_configure(adapter);
1512
1513	ixgbevf_up_complete(adapter);
1514
1515	/* clear any pending interrupts, may auto mask */
1516	IXGBE_READ_REG(hw, IXGBE_VTEICR);
1517
1518	ixgbevf_irq_enable(adapter);
1519}
1520
1521/**
1522 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
1523 * @adapter: board private structure
1524 * @rx_ring: ring to free buffers from
1525 **/
1526static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
1527				  struct ixgbevf_ring *rx_ring)
1528{
1529	struct pci_dev *pdev = adapter->pdev;
1530	unsigned long size;
1531	unsigned int i;
1532
1533	if (!rx_ring->rx_buffer_info)
1534		return;
1535
1536	/* Free all the Rx ring sk_buffs */
1537	for (i = 0; i < rx_ring->count; i++) {
1538		struct ixgbevf_rx_buffer *rx_buffer_info;
1539
1540		rx_buffer_info = &rx_ring->rx_buffer_info[i];
1541		if (rx_buffer_info->dma) {
1542			dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
1543					 rx_ring->rx_buf_len,
1544					 DMA_FROM_DEVICE);
1545			rx_buffer_info->dma = 0;
1546		}
1547		if (rx_buffer_info->skb) {
1548			struct sk_buff *skb = rx_buffer_info->skb;
1549			rx_buffer_info->skb = NULL;
1550			do {
1551				struct sk_buff *this = skb;
1552				skb = IXGBE_CB(skb)->prev;
1553				dev_kfree_skb(this);
1554			} while (skb);
1555		}
1556	}
1557
1558	size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
1559	memset(rx_ring->rx_buffer_info, 0, size);
1560
1561	/* Zero out the descriptor ring */
1562	memset(rx_ring->desc, 0, rx_ring->size);
1563
1564	rx_ring->next_to_clean = 0;
1565	rx_ring->next_to_use = 0;
1566
1567	if (rx_ring->head)
1568		writel(0, adapter->hw.hw_addr + rx_ring->head);
1569	if (rx_ring->tail)
1570		writel(0, adapter->hw.hw_addr + rx_ring->tail);
1571}
1572
1573/**
1574 * ixgbevf_clean_tx_ring - Free Tx Buffers
1575 * @adapter: board private structure
1576 * @tx_ring: ring to be cleaned
1577 **/
1578static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
1579				  struct ixgbevf_ring *tx_ring)
1580{
1581	struct ixgbevf_tx_buffer *tx_buffer_info;
1582	unsigned long size;
1583	unsigned int i;
1584
1585	if (!tx_ring->tx_buffer_info)
1586		return;
1587
1588	/* Free all the Tx ring sk_buffs */
1589	for (i = 0; i < tx_ring->count; i++) {
1590		tx_buffer_info = &tx_ring->tx_buffer_info[i];
1591		ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
1592	}
1593
1594	size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
1595	memset(tx_ring->tx_buffer_info, 0, size);
1596
1597	memset(tx_ring->desc, 0, tx_ring->size);
1598
1599	tx_ring->next_to_use = 0;
1600	tx_ring->next_to_clean = 0;
1601
1602	if (tx_ring->head)
1603		writel(0, adapter->hw.hw_addr + tx_ring->head);
1604	if (tx_ring->tail)
1605		writel(0, adapter->hw.hw_addr + tx_ring->tail);
1606}
1607
1608/**
1609 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
1610 * @adapter: board private structure
1611 **/
1612static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
1613{
1614	int i;
1615
1616	for (i = 0; i < adapter->num_rx_queues; i++)
1617		ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]);
1618}
1619
1620/**
1621 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
1622 * @adapter: board private structure
1623 **/
1624static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
1625{
1626	int i;
1627
1628	for (i = 0; i < adapter->num_tx_queues; i++)
1629		ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1630}
1631
1632void ixgbevf_down(struct ixgbevf_adapter *adapter)
1633{
1634	struct net_device *netdev = adapter->netdev;
1635	struct ixgbe_hw *hw = &adapter->hw;
1636	u32 txdctl;
1637	int i, j;
1638
1639	/* signal that we are down to the interrupt handler */
1640	set_bit(__IXGBEVF_DOWN, &adapter->state);
1641	/* disable receives */
1642
1643	netif_tx_disable(netdev);
1644
1645	msleep(10);
1646
1647	netif_tx_stop_all_queues(netdev);
1648
1649	ixgbevf_irq_disable(adapter);
1650
1651	ixgbevf_napi_disable_all(adapter);
1652
1653	del_timer_sync(&adapter->watchdog_timer);
1654	/* can't call flush scheduled work here because it can deadlock
1655	 * if linkwatch_event tries to acquire the rtnl_lock which we are
1656	 * holding */
1657	while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
1658		msleep(1);
1659
1660	/* disable transmits in the hardware now that interrupts are off */
1661	for (i = 0; i < adapter->num_tx_queues; i++) {
1662		j = adapter->tx_ring[i].reg_idx;
1663		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1664		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j),
1665				(txdctl & ~IXGBE_TXDCTL_ENABLE));
1666	}
1667
1668	netif_carrier_off(netdev);
1669
1670	if (!pci_channel_offline(adapter->pdev))
1671		ixgbevf_reset(adapter);
1672
1673	ixgbevf_clean_all_tx_rings(adapter);
1674	ixgbevf_clean_all_rx_rings(adapter);
1675}
1676
1677void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
1678{
1679	WARN_ON(in_interrupt());
1680
1681	while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
1682		msleep(1);
1683
1684	ixgbevf_down(adapter);
1685	ixgbevf_up(adapter);
1686
1687	clear_bit(__IXGBEVF_RESETTING, &adapter->state);
1688}
1689
1690void ixgbevf_reset(struct ixgbevf_adapter *adapter)
1691{
1692	struct ixgbe_hw *hw = &adapter->hw;
1693	struct net_device *netdev = adapter->netdev;
1694
1695	if (hw->mac.ops.reset_hw(hw))
1696		hw_dbg(hw, "PF still resetting\n");
1697	else
1698		hw->mac.ops.init_hw(hw);
1699
1700	if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1701		memcpy(netdev->dev_addr, adapter->hw.mac.addr,
1702		       netdev->addr_len);
1703		memcpy(netdev->perm_addr, adapter->hw.mac.addr,
1704		       netdev->addr_len);
1705	}
1706}
1707
1708static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1709					int vectors)
1710{
1711	int err = 0;
1712	int vector_threshold;
1713
1714	/* We'll want at least 2 (vector_threshold):
1715	 * 1) TxQ[0] + RxQ[0] handler
1716	 * 2) Other (Link Status Change, etc.)
1717	 */
1718	vector_threshold = MIN_MSIX_COUNT;
1719
1720	/* The more we get, the more we will assign to Tx/Rx Cleanup
1721	 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1722	 * Right now, we simply care about how many we'll get; we'll
1723	 * set them up later while requesting irq's.
1724	 */
1725	while (vectors >= vector_threshold) {
1726		err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1727				      vectors);
1728		if (!err || err < 0) /* Success or a nasty failure. */
1729			break;
1730		else /* err == number of vectors we should try again with */
1731			vectors = err;
1732	}
1733
1734	if (vectors < vector_threshold)
1735		err = -ENOMEM;
1736
1737	if (err) {
1738		dev_err(&adapter->pdev->dev,
1739			"Unable to allocate MSI-X interrupts\n");
1740		kfree(adapter->msix_entries);
1741		adapter->msix_entries = NULL;
1742	} else {
1743		/*
1744		 * Adjust for only the vectors we'll use, which is minimum
1745		 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
1746		 * vectors we were allocated.
1747		 */
1748		adapter->num_msix_vectors = vectors;
1749	}
1750
1751	return err;
1752}
1753
1754/**
1755 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
1756 * @adapter: board private structure to initialize
1757 *
1758 * This is the top level queue allocation routine.  The order here is very
1759 * important, starting with the "most" number of features turned on at once,
1760 * and ending with the smallest set of features.  This way large combinations
1761 * can be allocated if they're turned on, and smaller combinations are the
1762 * fallthrough conditions.
1763 *
1764 **/
1765static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
1766{
1767	/* Start with base case */
1768	adapter->num_rx_queues = 1;
1769	adapter->num_tx_queues = 1;
1770}
1771
1772/**
1773 * ixgbevf_alloc_queues - Allocate memory for all rings
1774 * @adapter: board private structure to initialize
1775 *
1776 * We allocate one ring per queue at run-time since we don't know the
1777 * number of queues at compile-time.  The polling_netdev array is
1778 * intended for Multiqueue, but should work fine with a single queue.
1779 **/
1780static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
1781{
1782	int i;
1783
1784	adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1785				   sizeof(struct ixgbevf_ring), GFP_KERNEL);
1786	if (!adapter->tx_ring)
1787		goto err_tx_ring_allocation;
1788
1789	adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1790				   sizeof(struct ixgbevf_ring), GFP_KERNEL);
1791	if (!adapter->rx_ring)
1792		goto err_rx_ring_allocation;
1793
1794	for (i = 0; i < adapter->num_tx_queues; i++) {
1795		adapter->tx_ring[i].count = adapter->tx_ring_count;
1796		adapter->tx_ring[i].queue_index = i;
1797		/* reg_idx may be remapped later by DCB config */
1798		adapter->tx_ring[i].reg_idx = i;
1799		adapter->tx_ring[i].dev = &adapter->pdev->dev;
1800		adapter->tx_ring[i].netdev = adapter->netdev;
1801	}
1802
1803	for (i = 0; i < adapter->num_rx_queues; i++) {
1804		adapter->rx_ring[i].count = adapter->rx_ring_count;
1805		adapter->rx_ring[i].queue_index = i;
1806		adapter->rx_ring[i].reg_idx = i;
1807		adapter->rx_ring[i].dev = &adapter->pdev->dev;
1808		adapter->rx_ring[i].netdev = adapter->netdev;
1809	}
1810
1811	return 0;
1812
1813err_rx_ring_allocation:
1814	kfree(adapter->tx_ring);
1815err_tx_ring_allocation:
1816	return -ENOMEM;
1817}
1818
1819/**
1820 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
1821 * @adapter: board private structure to initialize
1822 *
1823 * Attempt to configure the interrupts using the best available
1824 * capabilities of the hardware and the kernel.
1825 **/
1826static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
1827{
1828	struct net_device *netdev = adapter->netdev;
1829	int err = 0;
1830	int vector, v_budget;
1831
1832	/*
1833	 * It's easy to be greedy for MSI-X vectors, but it really
1834	 * doesn't do us much good if we have a lot more vectors
1835	 * than CPU's.  So let's be conservative and only ask for
1836	 * (roughly) the same number of vectors as there are CPU's.
1837	 * The default is to use pairs of vectors.
1838	 */
1839	v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
1840	v_budget = min_t(int, v_budget, num_online_cpus());
1841	v_budget += NON_Q_VECTORS;
1842
1843	/* A failure in MSI-X entry allocation isn't fatal, but it does
1844	 * mean we disable MSI-X capabilities of the adapter. */
1845	adapter->msix_entries = kcalloc(v_budget,
1846					sizeof(struct msix_entry), GFP_KERNEL);
1847	if (!adapter->msix_entries) {
1848		err = -ENOMEM;
1849		goto out;
1850	}
1851
1852	for (vector = 0; vector < v_budget; vector++)
1853		adapter->msix_entries[vector].entry = vector;
1854
1855	err = ixgbevf_acquire_msix_vectors(adapter, v_budget);
1856	if (err)
1857		goto out;
1858
1859	err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
1860	if (err)
1861		goto out;
1862
1863	err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
1864
1865out:
1866	return err;
1867}
1868
1869/**
1870 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
1871 * @adapter: board private structure to initialize
1872 *
1873 * We allocate one q_vector per queue interrupt.  If allocation fails we
1874 * return -ENOMEM.
1875 **/
1876static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
1877{
1878	int q_idx, num_q_vectors;
1879	struct ixgbevf_q_vector *q_vector;
1880
1881	num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1882
1883	for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1884		q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
1885		if (!q_vector)
1886			goto err_out;
1887		q_vector->adapter = adapter;
1888		q_vector->v_idx = q_idx;
1889		netif_napi_add(adapter->netdev, &q_vector->napi,
1890			       ixgbevf_poll, 64);
1891		adapter->q_vector[q_idx] = q_vector;
1892	}
1893
1894	return 0;
1895
1896err_out:
1897	while (q_idx) {
1898		q_idx--;
1899		q_vector = adapter->q_vector[q_idx];
1900		netif_napi_del(&q_vector->napi);
1901		kfree(q_vector);
1902		adapter->q_vector[q_idx] = NULL;
1903	}
1904	return -ENOMEM;
1905}
1906
1907/**
1908 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
1909 * @adapter: board private structure to initialize
1910 *
1911 * This function frees the memory allocated to the q_vectors.  In addition if
1912 * NAPI is enabled it will delete any references to the NAPI struct prior
1913 * to freeing the q_vector.
1914 **/
1915static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
1916{
1917	int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1918
1919	for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1920		struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
1921
1922		adapter->q_vector[q_idx] = NULL;
1923		netif_napi_del(&q_vector->napi);
1924		kfree(q_vector);
1925	}
1926}
1927
1928/**
1929 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
1930 * @adapter: board private structure
1931 *
1932 **/
1933static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
1934{
1935	pci_disable_msix(adapter->pdev);
1936	kfree(adapter->msix_entries);
1937	adapter->msix_entries = NULL;
1938}
1939
1940/**
1941 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
1942 * @adapter: board private structure to initialize
1943 *
1944 **/
1945static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
1946{
1947	int err;
1948
1949	/* Number of supported queues */
1950	ixgbevf_set_num_queues(adapter);
1951
1952	err = ixgbevf_set_interrupt_capability(adapter);
1953	if (err) {
1954		hw_dbg(&adapter->hw,
1955		       "Unable to setup interrupt capabilities\n");
1956		goto err_set_interrupt;
1957	}
1958
1959	err = ixgbevf_alloc_q_vectors(adapter);
1960	if (err) {
1961		hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
1962		       "vectors\n");
1963		goto err_alloc_q_vectors;
1964	}
1965
1966	err = ixgbevf_alloc_queues(adapter);
1967	if (err) {
1968		pr_err("Unable to allocate memory for queues\n");
1969		goto err_alloc_queues;
1970	}
1971
1972	hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
1973	       "Tx Queue count = %u\n",
1974	       (adapter->num_rx_queues > 1) ? "Enabled" :
1975	       "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
1976
1977	set_bit(__IXGBEVF_DOWN, &adapter->state);
1978
1979	return 0;
1980err_alloc_queues:
1981	ixgbevf_free_q_vectors(adapter);
1982err_alloc_q_vectors:
1983	ixgbevf_reset_interrupt_capability(adapter);
1984err_set_interrupt:
1985	return err;
1986}
1987
1988/**
1989 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
1990 * @adapter: board private structure to clear interrupt scheme on
1991 *
1992 * We go through and clear interrupt specific resources and reset the structure
1993 * to pre-load conditions
1994 **/
1995static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
1996{
1997	adapter->num_tx_queues = 0;
1998	adapter->num_rx_queues = 0;
1999
2000	ixgbevf_free_q_vectors(adapter);
2001	ixgbevf_reset_interrupt_capability(adapter);
2002}
2003
2004/**
2005 * ixgbevf_sw_init - Initialize general software structures
2006 * (struct ixgbevf_adapter)
2007 * @adapter: board private structure to initialize
2008 *
2009 * ixgbevf_sw_init initializes the Adapter private data structure.
2010 * Fields are initialized based on PCI device information and
2011 * OS network device settings (MTU size).
2012 **/
2013static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
2014{
2015	struct ixgbe_hw *hw = &adapter->hw;
2016	struct pci_dev *pdev = adapter->pdev;
2017	int err;
2018
2019	/* PCI config space info */
2020
2021	hw->vendor_id = pdev->vendor;
2022	hw->device_id = pdev->device;
2023	hw->revision_id = pdev->revision;
2024	hw->subsystem_vendor_id = pdev->subsystem_vendor;
2025	hw->subsystem_device_id = pdev->subsystem_device;
2026
2027	hw->mbx.ops.init_params(hw);
2028
2029	/* assume legacy case in which PF would only give VF 2 queues */
2030	hw->mac.max_tx_queues = 2;
2031	hw->mac.max_rx_queues = 2;
2032
2033	err = hw->mac.ops.reset_hw(hw);
2034	if (err) {
2035		dev_info(&pdev->dev,
2036		         "PF still in reset state, assigning new address\n");
2037		eth_hw_addr_random(adapter->netdev);
2038		memcpy(adapter->hw.mac.addr, adapter->netdev->dev_addr,
2039			adapter->netdev->addr_len);
2040	} else {
2041		err = hw->mac.ops.init_hw(hw);
2042		if (err) {
2043			pr_err("init_shared_code failed: %d\n", err);
2044			goto out;
2045		}
2046		memcpy(adapter->netdev->dev_addr, adapter->hw.mac.addr,
2047		       adapter->netdev->addr_len);
2048	}
2049
2050	/* lock to protect mailbox accesses */
2051	spin_lock_init(&adapter->mbx_lock);
2052
2053	/* Enable dynamic interrupt throttling rates */
2054	adapter->rx_itr_setting = 1;
2055	adapter->tx_itr_setting = 1;
2056
2057	/* set default ring sizes */
2058	adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
2059	adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
2060
2061	set_bit(__IXGBEVF_DOWN, &adapter->state);
2062	return 0;
2063
2064out:
2065	return err;
2066}
2067
2068#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter)	\
2069	{							\
2070		u32 current_counter = IXGBE_READ_REG(hw, reg);	\
2071		if (current_counter < last_counter)		\
2072			counter += 0x100000000LL;		\
2073		last_counter = current_counter;			\
2074		counter &= 0xFFFFFFFF00000000LL;		\
2075		counter |= current_counter;			\
2076	}
2077
2078#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2079	{								 \
2080		u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb);	 \
2081		u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb);	 \
2082		u64 current_counter = (current_counter_msb << 32) |      \
2083			current_counter_lsb;                             \
2084		if (current_counter < last_counter)			 \
2085			counter += 0x1000000000LL;			 \
2086		last_counter = current_counter;				 \
2087		counter &= 0xFFFFFFF000000000LL;			 \
2088		counter |= current_counter;				 \
2089	}
2090/**
2091 * ixgbevf_update_stats - Update the board statistics counters.
2092 * @adapter: board private structure
2093 **/
2094void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2095{
2096	struct ixgbe_hw *hw = &adapter->hw;
2097
2098	UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
2099				adapter->stats.vfgprc);
2100	UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
2101				adapter->stats.vfgptc);
2102	UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2103				adapter->stats.last_vfgorc,
2104				adapter->stats.vfgorc);
2105	UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2106				adapter->stats.last_vfgotc,
2107				adapter->stats.vfgotc);
2108	UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
2109				adapter->stats.vfmprc);
2110}
2111
2112/**
2113 * ixgbevf_watchdog - Timer Call-back
2114 * @data: pointer to adapter cast into an unsigned long
2115 **/
2116static void ixgbevf_watchdog(unsigned long data)
2117{
2118	struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
2119	struct ixgbe_hw *hw = &adapter->hw;
2120	u32 eics = 0;
2121	int i;
2122
2123	/*
2124	 * Do the watchdog outside of interrupt context due to the lovely
2125	 * delays that some of the newer hardware requires
2126	 */
2127
2128	if (test_bit(__IXGBEVF_DOWN, &adapter->state))
2129		goto watchdog_short_circuit;
2130
2131	/* get one bit for every active tx/rx interrupt vector */
2132	for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2133		struct ixgbevf_q_vector *qv = adapter->q_vector[i];
2134		if (qv->rx.ring || qv->tx.ring)
2135			eics |= 1 << i;
2136	}
2137
2138	IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
2139
2140watchdog_short_circuit:
2141	schedule_work(&adapter->watchdog_task);
2142}
2143
2144/**
2145 * ixgbevf_tx_timeout - Respond to a Tx Hang
2146 * @netdev: network interface device structure
2147 **/
2148static void ixgbevf_tx_timeout(struct net_device *netdev)
2149{
2150	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2151
2152	/* Do the reset outside of interrupt context */
2153	schedule_work(&adapter->reset_task);
2154}
2155
2156static void ixgbevf_reset_task(struct work_struct *work)
2157{
2158	struct ixgbevf_adapter *adapter;
2159	adapter = container_of(work, struct ixgbevf_adapter, reset_task);
2160
2161	/* If we're already down or resetting, just bail */
2162	if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2163	    test_bit(__IXGBEVF_RESETTING, &adapter->state))
2164		return;
2165
2166	adapter->tx_timeout_count++;
2167
2168	ixgbevf_reinit_locked(adapter);
2169}
2170
2171/**
2172 * ixgbevf_watchdog_task - worker thread to bring link up
2173 * @work: pointer to work_struct containing our data
2174 **/
2175static void ixgbevf_watchdog_task(struct work_struct *work)
2176{
2177	struct ixgbevf_adapter *adapter = container_of(work,
2178						       struct ixgbevf_adapter,
2179						       watchdog_task);
2180	struct net_device *netdev = adapter->netdev;
2181	struct ixgbe_hw *hw = &adapter->hw;
2182	u32 link_speed = adapter->link_speed;
2183	bool link_up = adapter->link_up;
2184	s32 need_reset;
2185
2186	adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
2187
2188	/*
2189	 * Always check the link on the watchdog because we have
2190	 * no LSC interrupt
2191	 */
2192	spin_lock_bh(&adapter->mbx_lock);
2193
2194	need_reset = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
2195
2196	spin_unlock_bh(&adapter->mbx_lock);
2197
2198	if (need_reset) {
2199		adapter->link_up = link_up;
2200		adapter->link_speed = link_speed;
2201		netif_carrier_off(netdev);
2202		netif_tx_stop_all_queues(netdev);
2203		schedule_work(&adapter->reset_task);
2204		goto pf_has_reset;
2205	}
2206	adapter->link_up = link_up;
2207	adapter->link_speed = link_speed;
2208
2209	if (link_up) {
2210		if (!netif_carrier_ok(netdev)) {
2211			hw_dbg(&adapter->hw, "NIC Link is Up, %u Gbps\n",
2212			       (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
2213			       10 : 1);
2214			netif_carrier_on(netdev);
2215			netif_tx_wake_all_queues(netdev);
2216		}
2217	} else {
2218		adapter->link_up = false;
2219		adapter->link_speed = 0;
2220		if (netif_carrier_ok(netdev)) {
2221			hw_dbg(&adapter->hw, "NIC Link is Down\n");
2222			netif_carrier_off(netdev);
2223			netif_tx_stop_all_queues(netdev);
2224		}
2225	}
2226
2227	ixgbevf_update_stats(adapter);
2228
2229pf_has_reset:
2230	/* Reset the timer */
2231	if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
2232		mod_timer(&adapter->watchdog_timer,
2233			  round_jiffies(jiffies + (2 * HZ)));
2234
2235	adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
2236}
2237
2238/**
2239 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2240 * @adapter: board private structure
2241 * @tx_ring: Tx descriptor ring for a specific queue
2242 *
2243 * Free all transmit software resources
2244 **/
2245void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
2246			       struct ixgbevf_ring *tx_ring)
2247{
2248	struct pci_dev *pdev = adapter->pdev;
2249
2250	ixgbevf_clean_tx_ring(adapter, tx_ring);
2251
2252	vfree(tx_ring->tx_buffer_info);
2253	tx_ring->tx_buffer_info = NULL;
2254
2255	dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
2256			  tx_ring->dma);
2257
2258	tx_ring->desc = NULL;
2259}
2260
2261/**
2262 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2263 * @adapter: board private structure
2264 *
2265 * Free all transmit software resources
2266 **/
2267static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2268{
2269	int i;
2270
2271	for (i = 0; i < adapter->num_tx_queues; i++)
2272		if (adapter->tx_ring[i].desc)
2273			ixgbevf_free_tx_resources(adapter,
2274						  &adapter->tx_ring[i]);
2275
2276}
2277
2278/**
2279 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2280 * @adapter: board private structure
2281 * @tx_ring:    tx descriptor ring (for a specific queue) to setup
2282 *
2283 * Return 0 on success, negative on failure
2284 **/
2285int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
2286			       struct ixgbevf_ring *tx_ring)
2287{
2288	struct pci_dev *pdev = adapter->pdev;
2289	int size;
2290
2291	size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2292	tx_ring->tx_buffer_info = vzalloc(size);
2293	if (!tx_ring->tx_buffer_info)
2294		goto err;
2295
2296	/* round up to nearest 4K */
2297	tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2298	tx_ring->size = ALIGN(tx_ring->size, 4096);
2299
2300	tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
2301					   &tx_ring->dma, GFP_KERNEL);
2302	if (!tx_ring->desc)
2303		goto err;
2304
2305	tx_ring->next_to_use = 0;
2306	tx_ring->next_to_clean = 0;
2307	return 0;
2308
2309err:
2310	vfree(tx_ring->tx_buffer_info);
2311	tx_ring->tx_buffer_info = NULL;
2312	hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
2313	       "descriptor ring\n");
2314	return -ENOMEM;
2315}
2316
2317/**
2318 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2319 * @adapter: board private structure
2320 *
2321 * If this function returns with an error, then it's possible one or
2322 * more of the rings is populated (while the rest are not).  It is the
2323 * callers duty to clean those orphaned rings.
2324 *
2325 * Return 0 on success, negative on failure
2326 **/
2327static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
2328{
2329	int i, err = 0;
2330
2331	for (i = 0; i < adapter->num_tx_queues; i++) {
2332		err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]);
2333		if (!err)
2334			continue;
2335		hw_dbg(&adapter->hw,
2336		       "Allocation for Tx Queue %u failed\n", i);
2337		break;
2338	}
2339
2340	return err;
2341}
2342
2343/**
2344 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
2345 * @adapter: board private structure
2346 * @rx_ring:    rx descriptor ring (for a specific queue) to setup
2347 *
2348 * Returns 0 on success, negative on failure
2349 **/
2350int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
2351			       struct ixgbevf_ring *rx_ring)
2352{
2353	struct pci_dev *pdev = adapter->pdev;
2354	int size;
2355
2356	size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
2357	rx_ring->rx_buffer_info = vzalloc(size);
2358	if (!rx_ring->rx_buffer_info)
2359		goto alloc_failed;
2360
2361	/* Round up to nearest 4K */
2362	rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2363	rx_ring->size = ALIGN(rx_ring->size, 4096);
2364
2365	rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
2366					   &rx_ring->dma, GFP_KERNEL);
2367
2368	if (!rx_ring->desc) {
2369		hw_dbg(&adapter->hw,
2370		       "Unable to allocate memory for "
2371		       "the receive descriptor ring\n");
2372		vfree(rx_ring->rx_buffer_info);
2373		rx_ring->rx_buffer_info = NULL;
2374		goto alloc_failed;
2375	}
2376
2377	rx_ring->next_to_clean = 0;
2378	rx_ring->next_to_use = 0;
2379
2380	return 0;
2381alloc_failed:
2382	return -ENOMEM;
2383}
2384
2385/**
2386 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
2387 * @adapter: board private structure
2388 *
2389 * If this function returns with an error, then it's possible one or
2390 * more of the rings is populated (while the rest are not).  It is the
2391 * callers duty to clean those orphaned rings.
2392 *
2393 * Return 0 on success, negative on failure
2394 **/
2395static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
2396{
2397	int i, err = 0;
2398
2399	for (i = 0; i < adapter->num_rx_queues; i++) {
2400		err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]);
2401		if (!err)
2402			continue;
2403		hw_dbg(&adapter->hw,
2404		       "Allocation for Rx Queue %u failed\n", i);
2405		break;
2406	}
2407	return err;
2408}
2409
2410/**
2411 * ixgbevf_free_rx_resources - Free Rx Resources
2412 * @adapter: board private structure
2413 * @rx_ring: ring to clean the resources from
2414 *
2415 * Free all receive software resources
2416 **/
2417void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
2418			       struct ixgbevf_ring *rx_ring)
2419{
2420	struct pci_dev *pdev = adapter->pdev;
2421
2422	ixgbevf_clean_rx_ring(adapter, rx_ring);
2423
2424	vfree(rx_ring->rx_buffer_info);
2425	rx_ring->rx_buffer_info = NULL;
2426
2427	dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2428			  rx_ring->dma);
2429
2430	rx_ring->desc = NULL;
2431}
2432
2433/**
2434 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
2435 * @adapter: board private structure
2436 *
2437 * Free all receive software resources
2438 **/
2439static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
2440{
2441	int i;
2442
2443	for (i = 0; i < adapter->num_rx_queues; i++)
2444		if (adapter->rx_ring[i].desc)
2445			ixgbevf_free_rx_resources(adapter,
2446						  &adapter->rx_ring[i]);
2447}
2448
2449static int ixgbevf_setup_queues(struct ixgbevf_adapter *adapter)
2450{
2451	struct ixgbe_hw *hw = &adapter->hw;
2452	struct ixgbevf_ring *rx_ring;
2453	unsigned int def_q = 0;
2454	unsigned int num_tcs = 0;
2455	unsigned int num_rx_queues = 1;
2456	int err, i;
2457
2458	spin_lock_bh(&adapter->mbx_lock);
2459
2460	/* fetch queue configuration from the PF */
2461	err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2462
2463	spin_unlock_bh(&adapter->mbx_lock);
2464
2465	if (err)
2466		return err;
2467
2468	if (num_tcs > 1) {
2469		/* update default Tx ring register index */
2470		adapter->tx_ring[0].reg_idx = def_q;
2471
2472		/* we need as many queues as traffic classes */
2473		num_rx_queues = num_tcs;
2474	}
2475
2476	/* nothing to do if we have the correct number of queues */
2477	if (adapter->num_rx_queues == num_rx_queues)
2478		return 0;
2479
2480	/* allocate new rings */
2481	rx_ring = kcalloc(num_rx_queues,
2482			  sizeof(struct ixgbevf_ring), GFP_KERNEL);
2483	if (!rx_ring)
2484		return -ENOMEM;
2485
2486	/* setup ring fields */
2487	for (i = 0; i < num_rx_queues; i++) {
2488		rx_ring[i].count = adapter->rx_ring_count;
2489		rx_ring[i].queue_index = i;
2490		rx_ring[i].reg_idx = i;
2491		rx_ring[i].dev = &adapter->pdev->dev;
2492		rx_ring[i].netdev = adapter->netdev;
2493	}
2494
2495	/* free the existing ring and queues */
2496	adapter->num_rx_queues = 0;
2497	kfree(adapter->rx_ring);
2498
2499	/* move new rings into position on the adapter struct */
2500	adapter->rx_ring = rx_ring;
2501	adapter->num_rx_queues = num_rx_queues;
2502
2503	return 0;
2504}
2505
2506/**
2507 * ixgbevf_open - Called when a network interface is made active
2508 * @netdev: network interface device structure
2509 *
2510 * Returns 0 on success, negative value on failure
2511 *
2512 * The open entry point is called when a network interface is made
2513 * active by the system (IFF_UP).  At this point all resources needed
2514 * for transmit and receive operations are allocated, the interrupt
2515 * handler is registered with the OS, the watchdog timer is started,
2516 * and the stack is notified that the interface is ready.
2517 **/
2518static int ixgbevf_open(struct net_device *netdev)
2519{
2520	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2521	struct ixgbe_hw *hw = &adapter->hw;
2522	int err;
2523
2524	/* disallow open during test */
2525	if (test_bit(__IXGBEVF_TESTING, &adapter->state))
2526		return -EBUSY;
2527
2528	if (hw->adapter_stopped) {
2529		ixgbevf_reset(adapter);
2530		/* if adapter is still stopped then PF isn't up and
2531		 * the vf can't start. */
2532		if (hw->adapter_stopped) {
2533			err = IXGBE_ERR_MBX;
2534			pr_err("Unable to start - perhaps the PF Driver isn't "
2535			       "up yet\n");
2536			goto err_setup_reset;
2537		}
2538	}
2539
2540	ixgbevf_negotiate_api(adapter);
2541
2542	/* setup queue reg_idx and Rx queue count */
2543	err = ixgbevf_setup_queues(adapter);
2544	if (err)
2545		goto err_setup_queues;
2546
2547	/* allocate transmit descriptors */
2548	err = ixgbevf_setup_all_tx_resources(adapter);
2549	if (err)
2550		goto err_setup_tx;
2551
2552	/* allocate receive descriptors */
2553	err = ixgbevf_setup_all_rx_resources(adapter);
2554	if (err)
2555		goto err_setup_rx;
2556
2557	ixgbevf_configure(adapter);
2558
2559	/*
2560	 * Map the Tx/Rx rings to the vectors we were allotted.
2561	 * if request_irq will be called in this function map_rings
2562	 * must be called *before* up_complete
2563	 */
2564	ixgbevf_map_rings_to_vectors(adapter);
2565
2566	ixgbevf_up_complete(adapter);
2567
2568	/* clear any pending interrupts, may auto mask */
2569	IXGBE_READ_REG(hw, IXGBE_VTEICR);
2570	err = ixgbevf_request_irq(adapter);
2571	if (err)
2572		goto err_req_irq;
2573
2574	ixgbevf_irq_enable(adapter);
2575
2576	return 0;
2577
2578err_req_irq:
2579	ixgbevf_down(adapter);
2580	ixgbevf_free_irq(adapter);
2581err_setup_rx:
2582	ixgbevf_free_all_rx_resources(adapter);
2583err_setup_tx:
2584	ixgbevf_free_all_tx_resources(adapter);
2585err_setup_queues:
2586	ixgbevf_reset(adapter);
2587
2588err_setup_reset:
2589
2590	return err;
2591}
2592
2593/**
2594 * ixgbevf_close - Disables a network interface
2595 * @netdev: network interface device structure
2596 *
2597 * Returns 0, this is not allowed to fail
2598 *
2599 * The close entry point is called when an interface is de-activated
2600 * by the OS.  The hardware is still under the drivers control, but
2601 * needs to be disabled.  A global MAC reset is issued to stop the
2602 * hardware, and all transmit and receive resources are freed.
2603 **/
2604static int ixgbevf_close(struct net_device *netdev)
2605{
2606	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2607
2608	ixgbevf_down(adapter);
2609	ixgbevf_free_irq(adapter);
2610
2611	ixgbevf_free_all_tx_resources(adapter);
2612	ixgbevf_free_all_rx_resources(adapter);
2613
2614	return 0;
2615}
2616
2617static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
2618				u32 vlan_macip_lens, u32 type_tucmd,
2619				u32 mss_l4len_idx)
2620{
2621	struct ixgbe_adv_tx_context_desc *context_desc;
2622	u16 i = tx_ring->next_to_use;
2623
2624	context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
2625
2626	i++;
2627	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2628
2629	/* set bits to identify this as an advanced context descriptor */
2630	type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2631
2632	context_desc->vlan_macip_lens	= cpu_to_le32(vlan_macip_lens);
2633	context_desc->seqnum_seed	= 0;
2634	context_desc->type_tucmd_mlhl	= cpu_to_le32(type_tucmd);
2635	context_desc->mss_l4len_idx	= cpu_to_le32(mss_l4len_idx);
2636}
2637
2638static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
2639		       struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
2640{
2641	u32 vlan_macip_lens, type_tucmd;
2642	u32 mss_l4len_idx, l4len;
2643
2644	if (!skb_is_gso(skb))
2645		return 0;
2646
2647	if (skb_header_cloned(skb)) {
2648		int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2649		if (err)
2650			return err;
2651	}
2652
2653	/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2654	type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
2655
2656	if (skb->protocol == htons(ETH_P_IP)) {
2657		struct iphdr *iph = ip_hdr(skb);
2658		iph->tot_len = 0;
2659		iph->check = 0;
2660		tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2661							 iph->daddr, 0,
2662							 IPPROTO_TCP,
2663							 0);
2664		type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2665	} else if (skb_is_gso_v6(skb)) {
2666		ipv6_hdr(skb)->payload_len = 0;
2667		tcp_hdr(skb)->check =
2668		    ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2669				     &ipv6_hdr(skb)->daddr,
2670				     0, IPPROTO_TCP, 0);
2671	}
2672
2673	/* compute header lengths */
2674	l4len = tcp_hdrlen(skb);
2675	*hdr_len += l4len;
2676	*hdr_len = skb_transport_offset(skb) + l4len;
2677
2678	/* mss_l4len_id: use 1 as index for TSO */
2679	mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
2680	mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
2681	mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
2682
2683	/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
2684	vlan_macip_lens = skb_network_header_len(skb);
2685	vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2686	vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2687
2688	ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2689			    type_tucmd, mss_l4len_idx);
2690
2691	return 1;
2692}
2693
2694static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
2695			    struct sk_buff *skb, u32 tx_flags)
2696{
2697	u32 vlan_macip_lens = 0;
2698	u32 mss_l4len_idx = 0;
2699	u32 type_tucmd = 0;
2700
2701	if (skb->ip_summed == CHECKSUM_PARTIAL) {
2702		u8 l4_hdr = 0;
2703		switch (skb->protocol) {
2704		case __constant_htons(ETH_P_IP):
2705			vlan_macip_lens |= skb_network_header_len(skb);
2706			type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2707			l4_hdr = ip_hdr(skb)->protocol;
2708			break;
2709		case __constant_htons(ETH_P_IPV6):
2710			vlan_macip_lens |= skb_network_header_len(skb);
2711			l4_hdr = ipv6_hdr(skb)->nexthdr;
2712			break;
2713		default:
2714			if (unlikely(net_ratelimit())) {
2715				dev_warn(tx_ring->dev,
2716				 "partial checksum but proto=%x!\n",
2717				 skb->protocol);
2718			}
2719			break;
2720		}
2721
2722		switch (l4_hdr) {
2723		case IPPROTO_TCP:
2724			type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2725			mss_l4len_idx = tcp_hdrlen(skb) <<
2726					IXGBE_ADVTXD_L4LEN_SHIFT;
2727			break;
2728		case IPPROTO_SCTP:
2729			type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
2730			mss_l4len_idx = sizeof(struct sctphdr) <<
2731					IXGBE_ADVTXD_L4LEN_SHIFT;
2732			break;
2733		case IPPROTO_UDP:
2734			mss_l4len_idx = sizeof(struct udphdr) <<
2735					IXGBE_ADVTXD_L4LEN_SHIFT;
2736			break;
2737		default:
2738			if (unlikely(net_ratelimit())) {
2739				dev_warn(tx_ring->dev,
2740				 "partial checksum but l4 proto=%x!\n",
2741				 l4_hdr);
2742			}
2743			break;
2744		}
2745	}
2746
2747	/* vlan_macip_lens: MACLEN, VLAN tag */
2748	vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2749	vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2750
2751	ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2752			    type_tucmd, mss_l4len_idx);
2753
2754	return (skb->ip_summed == CHECKSUM_PARTIAL);
2755}
2756
2757static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
2758			  struct sk_buff *skb, u32 tx_flags,
2759			  unsigned int first)
2760{
2761	struct ixgbevf_tx_buffer *tx_buffer_info;
2762	unsigned int len;
2763	unsigned int total = skb->len;
2764	unsigned int offset = 0, size;
2765	int count = 0;
2766	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2767	unsigned int f;
2768	int i;
2769
2770	i = tx_ring->next_to_use;
2771
2772	len = min(skb_headlen(skb), total);
2773	while (len) {
2774		tx_buffer_info = &tx_ring->tx_buffer_info[i];
2775		size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2776
2777		tx_buffer_info->length = size;
2778		tx_buffer_info->mapped_as_page = false;
2779		tx_buffer_info->dma = dma_map_single(tx_ring->dev,
2780						     skb->data + offset,
2781						     size, DMA_TO_DEVICE);
2782		if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma))
2783			goto dma_error;
2784		tx_buffer_info->next_to_watch = i;
2785
2786		len -= size;
2787		total -= size;
2788		offset += size;
2789		count++;
2790		i++;
2791		if (i == tx_ring->count)
2792			i = 0;
2793	}
2794
2795	for (f = 0; f < nr_frags; f++) {
2796		const struct skb_frag_struct *frag;
2797
2798		frag = &skb_shinfo(skb)->frags[f];
2799		len = min((unsigned int)skb_frag_size(frag), total);
2800		offset = 0;
2801
2802		while (len) {
2803			tx_buffer_info = &tx_ring->tx_buffer_info[i];
2804			size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2805
2806			tx_buffer_info->length = size;
2807			tx_buffer_info->dma =
2808				skb_frag_dma_map(tx_ring->dev, frag,
2809						 offset, size, DMA_TO_DEVICE);
2810			if (dma_mapping_error(tx_ring->dev,
2811					      tx_buffer_info->dma))
2812				goto dma_error;
2813			tx_buffer_info->mapped_as_page = true;
2814			tx_buffer_info->next_to_watch = i;
2815
2816			len -= size;
2817			total -= size;
2818			offset += size;
2819			count++;
2820			i++;
2821			if (i == tx_ring->count)
2822				i = 0;
2823		}
2824		if (total == 0)
2825			break;
2826	}
2827
2828	if (i == 0)
2829		i = tx_ring->count - 1;
2830	else
2831		i = i - 1;
2832	tx_ring->tx_buffer_info[i].skb = skb;
2833	tx_ring->tx_buffer_info[first].next_to_watch = i;
2834	tx_ring->tx_buffer_info[first].time_stamp = jiffies;
2835
2836	return count;
2837
2838dma_error:
2839	dev_err(tx_ring->dev, "TX DMA map failed\n");
2840
2841	/* clear timestamp and dma mappings for failed tx_buffer_info map */
2842	tx_buffer_info->dma = 0;
2843	tx_buffer_info->next_to_watch = 0;
2844	count--;
2845
2846	/* clear timestamp and dma mappings for remaining portion of packet */
2847	while (count >= 0) {
2848		count--;
2849		i--;
2850		if (i < 0)
2851			i += tx_ring->count;
2852		tx_buffer_info = &tx_ring->tx_buffer_info[i];
2853		ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
2854	}
2855
2856	return count;
2857}
2858
2859static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
2860			     int count, u32 paylen, u8 hdr_len)
2861{
2862	union ixgbe_adv_tx_desc *tx_desc = NULL;
2863	struct ixgbevf_tx_buffer *tx_buffer_info;
2864	u32 olinfo_status = 0, cmd_type_len = 0;
2865	unsigned int i;
2866
2867	u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
2868
2869	cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
2870
2871	cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
2872
2873	if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2874		cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
2875
2876	if (tx_flags & IXGBE_TX_FLAGS_CSUM)
2877		olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM;
2878
2879	if (tx_flags & IXGBE_TX_FLAGS_TSO) {
2880		cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
2881
2882		/* use index 1 context for tso */
2883		olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
2884		if (tx_flags & IXGBE_TX_FLAGS_IPV4)
2885			olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM;
2886	}
2887
2888	/*
2889	 * Check Context must be set if Tx switch is enabled, which it
2890	 * always is for case where virtual functions are running
2891	 */
2892	olinfo_status |= IXGBE_ADVTXD_CC;
2893
2894	olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
2895
2896	i = tx_ring->next_to_use;
2897	while (count--) {
2898		tx_buffer_info = &tx_ring->tx_buffer_info[i];
2899		tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
2900		tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
2901		tx_desc->read.cmd_type_len =
2902			cpu_to_le32(cmd_type_len | tx_buffer_info->length);
2903		tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2904		i++;
2905		if (i == tx_ring->count)
2906			i = 0;
2907	}
2908
2909	tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
2910
2911	tx_ring->next_to_use = i;
2912}
2913
2914static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
2915{
2916	struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
2917
2918	netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2919	/* Herbert's original patch had:
2920	 *  smp_mb__after_netif_stop_queue();
2921	 * but since that doesn't exist yet, just open code it. */
2922	smp_mb();
2923
2924	/* We need to check again in a case another CPU has just
2925	 * made room available. */
2926	if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
2927		return -EBUSY;
2928
2929	/* A reprieve! - use start_queue because it doesn't call schedule */
2930	netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2931	++adapter->restart_queue;
2932	return 0;
2933}
2934
2935static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
2936{
2937	if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
2938		return 0;
2939	return __ixgbevf_maybe_stop_tx(tx_ring, size);
2940}
2941
2942static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2943{
2944	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2945	struct ixgbevf_ring *tx_ring;
2946	unsigned int first;
2947	unsigned int tx_flags = 0;
2948	u8 hdr_len = 0;
2949	int r_idx = 0, tso;
2950	u16 count = TXD_USE_COUNT(skb_headlen(skb));
2951#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
2952	unsigned short f;
2953#endif
2954	u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
2955	if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
2956		dev_kfree_skb(skb);
2957		return NETDEV_TX_OK;
2958	}
2959
2960	tx_ring = &adapter->tx_ring[r_idx];
2961
2962	/*
2963	 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
2964	 *       + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
2965	 *       + 2 desc gap to keep tail from touching head,
2966	 *       + 1 desc for context descriptor,
2967	 * otherwise try next time
2968	 */
2969#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
2970	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
2971		count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
2972#else
2973	count += skb_shinfo(skb)->nr_frags;
2974#endif
2975	if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
2976		adapter->tx_busy++;
2977		return NETDEV_TX_BUSY;
2978	}
2979
2980	if (vlan_tx_tag_present(skb)) {
2981		tx_flags |= vlan_tx_tag_get(skb);
2982		tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
2983		tx_flags |= IXGBE_TX_FLAGS_VLAN;
2984	}
2985
2986	first = tx_ring->next_to_use;
2987
2988	if (skb->protocol == htons(ETH_P_IP))
2989		tx_flags |= IXGBE_TX_FLAGS_IPV4;
2990	tso = ixgbevf_tso(tx_ring, skb, tx_flags, &hdr_len);
2991	if (tso < 0) {
2992		dev_kfree_skb_any(skb);
2993		return NETDEV_TX_OK;
2994	}
2995
2996	if (tso)
2997		tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM;
2998	else if (ixgbevf_tx_csum(tx_ring, skb, tx_flags))
2999		tx_flags |= IXGBE_TX_FLAGS_CSUM;
3000
3001	ixgbevf_tx_queue(tx_ring, tx_flags,
3002			 ixgbevf_tx_map(tx_ring, skb, tx_flags, first),
3003			 skb->len, hdr_len);
3004	/*
3005	 * Force memory writes to complete before letting h/w
3006	 * know there are new descriptors to fetch.  (Only
3007	 * applicable for weak-ordered memory model archs,
3008	 * such as IA-64).
3009	 */
3010	wmb();
3011
3012	writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail);
3013
3014	ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
3015
3016	return NETDEV_TX_OK;
3017}
3018
3019/**
3020 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3021 * @netdev: network interface device structure
3022 * @p: pointer to an address structure
3023 *
3024 * Returns 0 on success, negative on failure
3025 **/
3026static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3027{
3028	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3029	struct ixgbe_hw *hw = &adapter->hw;
3030	struct sockaddr *addr = p;
3031
3032	if (!is_valid_ether_addr(addr->sa_data))
3033		return -EADDRNOTAVAIL;
3034
3035	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3036	memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
3037
3038	spin_lock_bh(&adapter->mbx_lock);
3039
3040	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
3041
3042	spin_unlock_bh(&adapter->mbx_lock);
3043
3044	return 0;
3045}
3046
3047/**
3048 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3049 * @netdev: network interface device structure
3050 * @new_mtu: new value for maximum frame size
3051 *
3052 * Returns 0 on success, negative on failure
3053 **/
3054static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3055{
3056	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3057	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3058	int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
3059
3060	switch (adapter->hw.api_version) {
3061	case ixgbe_mbox_api_11:
3062		max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3063		break;
3064	default:
3065		if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
3066			max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3067		break;
3068	}
3069
3070	/* MTU < 68 is an error and causes problems on some kernels */
3071	if ((new_mtu < 68) || (max_frame > max_possible_frame))
3072		return -EINVAL;
3073
3074	hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
3075	       netdev->mtu, new_mtu);
3076	/* must set new MTU before calling down or up */
3077	netdev->mtu = new_mtu;
3078
3079	if (netif_running(netdev))
3080		ixgbevf_reinit_locked(adapter);
3081
3082	return 0;
3083}
3084
3085static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
3086{
3087	struct net_device *netdev = pci_get_drvdata(pdev);
3088	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3089#ifdef CONFIG_PM
3090	int retval = 0;
3091#endif
3092
3093	netif_device_detach(netdev);
3094
3095	if (netif_running(netdev)) {
3096		rtnl_lock();
3097		ixgbevf_down(adapter);
3098		ixgbevf_free_irq(adapter);
3099		ixgbevf_free_all_tx_resources(adapter);
3100		ixgbevf_free_all_rx_resources(adapter);
3101		rtnl_unlock();
3102	}
3103
3104	ixgbevf_clear_interrupt_scheme(adapter);
3105
3106#ifdef CONFIG_PM
3107	retval = pci_save_state(pdev);
3108	if (retval)
3109		return retval;
3110
3111#endif
3112	pci_disable_device(pdev);
3113
3114	return 0;
3115}
3116
3117#ifdef CONFIG_PM
3118static int ixgbevf_resume(struct pci_dev *pdev)
3119{
3120	struct ixgbevf_adapter *adapter = pci_get_drvdata(pdev);
3121	struct net_device *netdev = adapter->netdev;
3122	u32 err;
3123
3124	pci_set_power_state(pdev, PCI_D0);
3125	pci_restore_state(pdev);
3126	/*
3127	 * pci_restore_state clears dev->state_saved so call
3128	 * pci_save_state to restore it.
3129	 */
3130	pci_save_state(pdev);
3131
3132	err = pci_enable_device_mem(pdev);
3133	if (err) {
3134		dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
3135		return err;
3136	}
3137	pci_set_master(pdev);
3138
3139	rtnl_lock();
3140	err = ixgbevf_init_interrupt_scheme(adapter);
3141	rtnl_unlock();
3142	if (err) {
3143		dev_err(&pdev->dev, "Cannot initialize interrupts\n");
3144		return err;
3145	}
3146
3147	ixgbevf_reset(adapter);
3148
3149	if (netif_running(netdev)) {
3150		err = ixgbevf_open(netdev);
3151		if (err)
3152			return err;
3153	}
3154
3155	netif_device_attach(netdev);
3156
3157	return err;
3158}
3159
3160#endif /* CONFIG_PM */
3161static void ixgbevf_shutdown(struct pci_dev *pdev)
3162{
3163	ixgbevf_suspend(pdev, PMSG_SUSPEND);
3164}
3165
3166static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3167						struct rtnl_link_stats64 *stats)
3168{
3169	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3170	unsigned int start;
3171	u64 bytes, packets;
3172	const struct ixgbevf_ring *ring;
3173	int i;
3174
3175	ixgbevf_update_stats(adapter);
3176
3177	stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3178
3179	for (i = 0; i < adapter->num_rx_queues; i++) {
3180		ring = &adapter->rx_ring[i];
3181		do {
3182			start = u64_stats_fetch_begin_bh(&ring->syncp);
3183			bytes = ring->total_bytes;
3184			packets = ring->total_packets;
3185		} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
3186		stats->rx_bytes += bytes;
3187		stats->rx_packets += packets;
3188	}
3189
3190	for (i = 0; i < adapter->num_tx_queues; i++) {
3191		ring = &adapter->tx_ring[i];
3192		do {
3193			start = u64_stats_fetch_begin_bh(&ring->syncp);
3194			bytes = ring->total_bytes;
3195			packets = ring->total_packets;
3196		} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
3197		stats->tx_bytes += bytes;
3198		stats->tx_packets += packets;
3199	}
3200
3201	return stats;
3202}
3203
3204static const struct net_device_ops ixgbevf_netdev_ops = {
3205	.ndo_open		= ixgbevf_open,
3206	.ndo_stop		= ixgbevf_close,
3207	.ndo_start_xmit		= ixgbevf_xmit_frame,
3208	.ndo_set_rx_mode	= ixgbevf_set_rx_mode,
3209	.ndo_get_stats64	= ixgbevf_get_stats,
3210	.ndo_validate_addr	= eth_validate_addr,
3211	.ndo_set_mac_address	= ixgbevf_set_mac,
3212	.ndo_change_mtu		= ixgbevf_change_mtu,
3213	.ndo_tx_timeout		= ixgbevf_tx_timeout,
3214	.ndo_vlan_rx_add_vid	= ixgbevf_vlan_rx_add_vid,
3215	.ndo_vlan_rx_kill_vid	= ixgbevf_vlan_rx_kill_vid,
3216};
3217
3218static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3219{
3220	dev->netdev_ops = &ixgbevf_netdev_ops;
3221	ixgbevf_set_ethtool_ops(dev);
3222	dev->watchdog_timeo = 5 * HZ;
3223}
3224
3225/**
3226 * ixgbevf_probe - Device Initialization Routine
3227 * @pdev: PCI device information struct
3228 * @ent: entry in ixgbevf_pci_tbl
3229 *
3230 * Returns 0 on success, negative on failure
3231 *
3232 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3233 * The OS initialization, configuring of the adapter private structure,
3234 * and a hardware reset occur.
3235 **/
3236static int __devinit ixgbevf_probe(struct pci_dev *pdev,
3237				   const struct pci_device_id *ent)
3238{
3239	struct net_device *netdev;
3240	struct ixgbevf_adapter *adapter = NULL;
3241	struct ixgbe_hw *hw = NULL;
3242	const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
3243	static int cards_found;
3244	int err, pci_using_dac;
3245
3246	err = pci_enable_device(pdev);
3247	if (err)
3248		return err;
3249
3250	if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3251	    !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
3252		pci_using_dac = 1;
3253	} else {
3254		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3255		if (err) {
3256			err = dma_set_coherent_mask(&pdev->dev,
3257						    DMA_BIT_MASK(32));
3258			if (err) {
3259				dev_err(&pdev->dev, "No usable DMA "
3260					"configuration, aborting\n");
3261				goto err_dma;
3262			}
3263		}
3264		pci_using_dac = 0;
3265	}
3266
3267	err = pci_request_regions(pdev, ixgbevf_driver_name);
3268	if (err) {
3269		dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3270		goto err_pci_reg;
3271	}
3272
3273	pci_set_master(pdev);
3274
3275	netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
3276				   MAX_TX_QUEUES);
3277	if (!netdev) {
3278		err = -ENOMEM;
3279		goto err_alloc_etherdev;
3280	}
3281
3282	SET_NETDEV_DEV(netdev, &pdev->dev);
3283
3284	pci_set_drvdata(pdev, netdev);
3285	adapter = netdev_priv(netdev);
3286
3287	adapter->netdev = netdev;
3288	adapter->pdev = pdev;
3289	hw = &adapter->hw;
3290	hw->back = adapter;
3291	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3292
3293	/*
3294	 * call save state here in standalone driver because it relies on
3295	 * adapter struct to exist, and needs to call netdev_priv
3296	 */
3297	pci_save_state(pdev);
3298
3299	hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3300			      pci_resource_len(pdev, 0));
3301	if (!hw->hw_addr) {
3302		err = -EIO;
3303		goto err_ioremap;
3304	}
3305
3306	ixgbevf_assign_netdev_ops(netdev);
3307
3308	adapter->bd_number = cards_found;
3309
3310	/* Setup hw api */
3311	memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3312	hw->mac.type  = ii->mac;
3313
3314	memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
3315	       sizeof(struct ixgbe_mbx_operations));
3316
3317	/* setup the private structure */
3318	err = ixgbevf_sw_init(adapter);
3319	if (err)
3320		goto err_sw_init;
3321
3322	/* The HW MAC address was set and/or determined in sw_init */
3323	memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
3324
3325	if (!is_valid_ether_addr(netdev->dev_addr)) {
3326		pr_err("invalid MAC address\n");
3327		err = -EIO;
3328		goto err_sw_init;
3329	}
3330
3331	netdev->hw_features = NETIF_F_SG |
3332			   NETIF_F_IP_CSUM |
3333			   NETIF_F_IPV6_CSUM |
3334			   NETIF_F_TSO |
3335			   NETIF_F_TSO6 |
3336			   NETIF_F_RXCSUM;
3337
3338	netdev->features = netdev->hw_features |
3339			   NETIF_F_HW_VLAN_TX |
3340			   NETIF_F_HW_VLAN_RX |
3341			   NETIF_F_HW_VLAN_FILTER;
3342
3343	netdev->vlan_features |= NETIF_F_TSO;
3344	netdev->vlan_features |= NETIF_F_TSO6;
3345	netdev->vlan_features |= NETIF_F_IP_CSUM;
3346	netdev->vlan_features |= NETIF_F_IPV6_CSUM;
3347	netdev->vlan_features |= NETIF_F_SG;
3348
3349	if (pci_using_dac)
3350		netdev->features |= NETIF_F_HIGHDMA;
3351
3352	netdev->priv_flags |= IFF_UNICAST_FLT;
3353
3354	init_timer(&adapter->watchdog_timer);
3355	adapter->watchdog_timer.function = ixgbevf_watchdog;
3356	adapter->watchdog_timer.data = (unsigned long)adapter;
3357
3358	INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
3359	INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
3360
3361	err = ixgbevf_init_interrupt_scheme(adapter);
3362	if (err)
3363		goto err_sw_init;
3364
3365	strcpy(netdev->name, "eth%d");
3366
3367	err = register_netdev(netdev);
3368	if (err)
3369		goto err_register;
3370
3371	netif_carrier_off(netdev);
3372
3373	ixgbevf_init_last_counter_stats(adapter);
3374
3375	/* print the MAC address */
3376	hw_dbg(hw, "%pM\n", netdev->dev_addr);
3377
3378	hw_dbg(hw, "MAC: %d\n", hw->mac.type);
3379
3380	hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
3381	cards_found++;
3382	return 0;
3383
3384err_register:
3385	ixgbevf_clear_interrupt_scheme(adapter);
3386err_sw_init:
3387	ixgbevf_reset_interrupt_capability(adapter);
3388	iounmap(hw->hw_addr);
3389err_ioremap:
3390	free_netdev(netdev);
3391err_alloc_etherdev:
3392	pci_release_regions(pdev);
3393err_pci_reg:
3394err_dma:
3395	pci_disable_device(pdev);
3396	return err;
3397}
3398
3399/**
3400 * ixgbevf_remove - Device Removal Routine
3401 * @pdev: PCI device information struct
3402 *
3403 * ixgbevf_remove is called by the PCI subsystem to alert the driver
3404 * that it should release a PCI device.  The could be caused by a
3405 * Hot-Plug event, or because the driver is going to be removed from
3406 * memory.
3407 **/
3408static void __devexit ixgbevf_remove(struct pci_dev *pdev)
3409{
3410	struct net_device *netdev = pci_get_drvdata(pdev);
3411	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3412
3413	set_bit(__IXGBEVF_DOWN, &adapter->state);
3414
3415	del_timer_sync(&adapter->watchdog_timer);
3416
3417	cancel_work_sync(&adapter->reset_task);
3418	cancel_work_sync(&adapter->watchdog_task);
3419
3420	if (netdev->reg_state == NETREG_REGISTERED)
3421		unregister_netdev(netdev);
3422
3423	ixgbevf_clear_interrupt_scheme(adapter);
3424	ixgbevf_reset_interrupt_capability(adapter);
3425
3426	iounmap(adapter->hw.hw_addr);
3427	pci_release_regions(pdev);
3428
3429	hw_dbg(&adapter->hw, "Remove complete\n");
3430
3431	kfree(adapter->tx_ring);
3432	kfree(adapter->rx_ring);
3433
3434	free_netdev(netdev);
3435
3436	pci_disable_device(pdev);
3437}
3438
3439/**
3440 * ixgbevf_io_error_detected - called when PCI error is detected
3441 * @pdev: Pointer to PCI device
3442 * @state: The current pci connection state
3443 *
3444 * This function is called after a PCI bus error affecting
3445 * this device has been detected.
3446 */
3447static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
3448						  pci_channel_state_t state)
3449{
3450	struct net_device *netdev = pci_get_drvdata(pdev);
3451	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3452
3453	netif_device_detach(netdev);
3454
3455	if (state == pci_channel_io_perm_failure)
3456		return PCI_ERS_RESULT_DISCONNECT;
3457
3458	if (netif_running(netdev))
3459		ixgbevf_down(adapter);
3460
3461	pci_disable_device(pdev);
3462
3463	/* Request a slot slot reset. */
3464	return PCI_ERS_RESULT_NEED_RESET;
3465}
3466
3467/**
3468 * ixgbevf_io_slot_reset - called after the pci bus has been reset.
3469 * @pdev: Pointer to PCI device
3470 *
3471 * Restart the card from scratch, as if from a cold-boot. Implementation
3472 * resembles the first-half of the ixgbevf_resume routine.
3473 */
3474static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
3475{
3476	struct net_device *netdev = pci_get_drvdata(pdev);
3477	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3478
3479	if (pci_enable_device_mem(pdev)) {
3480		dev_err(&pdev->dev,
3481			"Cannot re-enable PCI device after reset.\n");
3482		return PCI_ERS_RESULT_DISCONNECT;
3483	}
3484
3485	pci_set_master(pdev);
3486
3487	ixgbevf_reset(adapter);
3488
3489	return PCI_ERS_RESULT_RECOVERED;
3490}
3491
3492/**
3493 * ixgbevf_io_resume - called when traffic can start flowing again.
3494 * @pdev: Pointer to PCI device
3495 *
3496 * This callback is called when the error recovery driver tells us that
3497 * its OK to resume normal operation. Implementation resembles the
3498 * second-half of the ixgbevf_resume routine.
3499 */
3500static void ixgbevf_io_resume(struct pci_dev *pdev)
3501{
3502	struct net_device *netdev = pci_get_drvdata(pdev);
3503	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3504
3505	if (netif_running(netdev))
3506		ixgbevf_up(adapter);
3507
3508	netif_device_attach(netdev);
3509}
3510
3511/* PCI Error Recovery (ERS) */
3512static const struct pci_error_handlers ixgbevf_err_handler = {
3513	.error_detected = ixgbevf_io_error_detected,
3514	.slot_reset = ixgbevf_io_slot_reset,
3515	.resume = ixgbevf_io_resume,
3516};
3517
3518static struct pci_driver ixgbevf_driver = {
3519	.name     = ixgbevf_driver_name,
3520	.id_table = ixgbevf_pci_tbl,
3521	.probe    = ixgbevf_probe,
3522	.remove   = __devexit_p(ixgbevf_remove),
3523#ifdef CONFIG_PM
3524	/* Power Management Hooks */
3525	.suspend  = ixgbevf_suspend,
3526	.resume   = ixgbevf_resume,
3527#endif
3528	.shutdown = ixgbevf_shutdown,
3529	.err_handler = &ixgbevf_err_handler
3530};
3531
3532/**
3533 * ixgbevf_init_module - Driver Registration Routine
3534 *
3535 * ixgbevf_init_module is the first routine called when the driver is
3536 * loaded. All it does is register with the PCI subsystem.
3537 **/
3538static int __init ixgbevf_init_module(void)
3539{
3540	int ret;
3541	pr_info("%s - version %s\n", ixgbevf_driver_string,
3542		ixgbevf_driver_version);
3543
3544	pr_info("%s\n", ixgbevf_copyright);
3545
3546	ret = pci_register_driver(&ixgbevf_driver);
3547	return ret;
3548}
3549
3550module_init(ixgbevf_init_module);
3551
3552/**
3553 * ixgbevf_exit_module - Driver Exit Cleanup Routine
3554 *
3555 * ixgbevf_exit_module is called just before the driver is removed
3556 * from memory.
3557 **/
3558static void __exit ixgbevf_exit_module(void)
3559{
3560	pci_unregister_driver(&ixgbevf_driver);
3561}
3562
3563#ifdef DEBUG
3564/**
3565 * ixgbevf_get_hw_dev_name - return device name string
3566 * used by hardware layer to print debugging information
3567 **/
3568char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
3569{
3570	struct ixgbevf_adapter *adapter = hw->back;
3571	return adapter->netdev->name;
3572}
3573
3574#endif
3575module_exit(ixgbevf_exit_module);
3576
3577/* ixgbevf_main.c */
3578