ixgbevf_main.c revision 46acc460c07b5c74287560a00b6cbc6111136ab6
1/*******************************************************************************
2
3  Intel 82599 Virtual Function driver
4  Copyright(c) 1999 - 2012 Intel Corporation.
5
6  This program is free software; you can redistribute it and/or modify it
7  under the terms and conditions of the GNU General Public License,
8  version 2, as published by the Free Software Foundation.
9
10  This program is distributed in the hope it will be useful, but WITHOUT
11  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  more details.
14
15  You should have received a copy of the GNU General Public License along with
16  this program; if not, write to the Free Software Foundation, Inc.,
17  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19  The full GNU General Public License is included in this distribution in
20  the file called "COPYING".
21
22  Contact Information:
23  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28
29/******************************************************************************
30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
31******************************************************************************/
32
33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
35#include <linux/types.h>
36#include <linux/bitops.h>
37#include <linux/module.h>
38#include <linux/pci.h>
39#include <linux/netdevice.h>
40#include <linux/vmalloc.h>
41#include <linux/string.h>
42#include <linux/in.h>
43#include <linux/ip.h>
44#include <linux/tcp.h>
45#include <linux/sctp.h>
46#include <linux/ipv6.h>
47#include <linux/slab.h>
48#include <net/checksum.h>
49#include <net/ip6_checksum.h>
50#include <linux/ethtool.h>
51#include <linux/if.h>
52#include <linux/if_vlan.h>
53#include <linux/prefetch.h>
54
55#include "ixgbevf.h"
56
57const char ixgbevf_driver_name[] = "ixgbevf";
58static const char ixgbevf_driver_string[] =
59	"Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
60
61#define DRV_VERSION "2.7.12-k"
62const char ixgbevf_driver_version[] = DRV_VERSION;
63static char ixgbevf_copyright[] =
64	"Copyright (c) 2009 - 2012 Intel Corporation.";
65
66static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
67	[board_82599_vf] = &ixgbevf_82599_vf_info,
68	[board_X540_vf]  = &ixgbevf_X540_vf_info,
69};
70
71/* ixgbevf_pci_tbl - PCI Device ID Table
72 *
73 * Wildcard entries (PCI_ANY_ID) should come last
74 * Last entry must be all 0s
75 *
76 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
77 *   Class, Class Mask, private data (not used) }
78 */
79static struct pci_device_id ixgbevf_pci_tbl[] = {
80	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF),
81	board_82599_vf},
82	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF),
83	board_X540_vf},
84
85	/* required last entry */
86	{0, }
87};
88MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
89
90MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
91MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
92MODULE_LICENSE("GPL");
93MODULE_VERSION(DRV_VERSION);
94
95#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
96static int debug = -1;
97module_param(debug, int, 0);
98MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
99
100/* forward decls */
101static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
102static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
103
104static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
105					   struct ixgbevf_ring *rx_ring,
106					   u32 val)
107{
108	/*
109	 * Force memory writes to complete before letting h/w
110	 * know there are new descriptors to fetch.  (Only
111	 * applicable for weak-ordered memory model archs,
112	 * such as IA-64).
113	 */
114	wmb();
115	IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
116}
117
118/**
119 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
120 * @adapter: pointer to adapter struct
121 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
122 * @queue: queue to map the corresponding interrupt to
123 * @msix_vector: the vector to map to the corresponding queue
124 *
125 */
126static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
127			     u8 queue, u8 msix_vector)
128{
129	u32 ivar, index;
130	struct ixgbe_hw *hw = &adapter->hw;
131	if (direction == -1) {
132		/* other causes */
133		msix_vector |= IXGBE_IVAR_ALLOC_VAL;
134		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
135		ivar &= ~0xFF;
136		ivar |= msix_vector;
137		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
138	} else {
139		/* tx or rx causes */
140		msix_vector |= IXGBE_IVAR_ALLOC_VAL;
141		index = ((16 * (queue & 1)) + (8 * direction));
142		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
143		ivar &= ~(0xFF << index);
144		ivar |= (msix_vector << index);
145		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
146	}
147}
148
149static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
150					       struct ixgbevf_tx_buffer
151					       *tx_buffer_info)
152{
153	if (tx_buffer_info->dma) {
154		if (tx_buffer_info->mapped_as_page)
155			dma_unmap_page(tx_ring->dev,
156				       tx_buffer_info->dma,
157				       tx_buffer_info->length,
158				       DMA_TO_DEVICE);
159		else
160			dma_unmap_single(tx_ring->dev,
161					 tx_buffer_info->dma,
162					 tx_buffer_info->length,
163					 DMA_TO_DEVICE);
164		tx_buffer_info->dma = 0;
165	}
166	if (tx_buffer_info->skb) {
167		dev_kfree_skb_any(tx_buffer_info->skb);
168		tx_buffer_info->skb = NULL;
169	}
170	tx_buffer_info->time_stamp = 0;
171	/* tx_buffer_info must be completely set up in the transmit path */
172}
173
174#define IXGBE_MAX_TXD_PWR	14
175#define IXGBE_MAX_DATA_PER_TXD	(1 << IXGBE_MAX_TXD_PWR)
176
177/* Tx Descriptors needed, worst case */
178#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
179#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
180
181static void ixgbevf_tx_timeout(struct net_device *netdev);
182
183/**
184 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
185 * @q_vector: board private structure
186 * @tx_ring: tx ring to clean
187 **/
188static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
189				 struct ixgbevf_ring *tx_ring)
190{
191	struct ixgbevf_adapter *adapter = q_vector->adapter;
192	union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
193	struct ixgbevf_tx_buffer *tx_buffer_info;
194	unsigned int i, eop, count = 0;
195	unsigned int total_bytes = 0, total_packets = 0;
196
197	if (test_bit(__IXGBEVF_DOWN, &adapter->state))
198		return true;
199
200	i = tx_ring->next_to_clean;
201	eop = tx_ring->tx_buffer_info[i].next_to_watch;
202	eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
203
204	while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
205	       (count < tx_ring->count)) {
206		bool cleaned = false;
207		rmb(); /* read buffer_info after eop_desc */
208		/* eop could change between read and DD-check */
209		if (unlikely(eop != tx_ring->tx_buffer_info[i].next_to_watch))
210			goto cont_loop;
211		for ( ; !cleaned; count++) {
212			struct sk_buff *skb;
213			tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
214			tx_buffer_info = &tx_ring->tx_buffer_info[i];
215			cleaned = (i == eop);
216			skb = tx_buffer_info->skb;
217
218			if (cleaned && skb) {
219				unsigned int segs, bytecount;
220
221				/* gso_segs is currently only valid for tcp */
222				segs = skb_shinfo(skb)->gso_segs ?: 1;
223				/* multiply data chunks by size of headers */
224				bytecount = ((segs - 1) * skb_headlen(skb)) +
225					    skb->len;
226				total_packets += segs;
227				total_bytes += bytecount;
228			}
229
230			ixgbevf_unmap_and_free_tx_resource(tx_ring,
231							   tx_buffer_info);
232
233			tx_desc->wb.status = 0;
234
235			i++;
236			if (i == tx_ring->count)
237				i = 0;
238		}
239
240cont_loop:
241		eop = tx_ring->tx_buffer_info[i].next_to_watch;
242		eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
243	}
244
245	tx_ring->next_to_clean = i;
246
247#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
248	if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
249		     (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
250		/* Make sure that anybody stopping the queue after this
251		 * sees the new next_to_clean.
252		 */
253		smp_mb();
254		if (__netif_subqueue_stopped(tx_ring->netdev,
255					     tx_ring->queue_index) &&
256		    !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
257			netif_wake_subqueue(tx_ring->netdev,
258					    tx_ring->queue_index);
259			++adapter->restart_queue;
260		}
261	}
262
263	u64_stats_update_begin(&tx_ring->syncp);
264	tx_ring->total_bytes += total_bytes;
265	tx_ring->total_packets += total_packets;
266	u64_stats_update_end(&tx_ring->syncp);
267	q_vector->tx.total_bytes += total_bytes;
268	q_vector->tx.total_packets += total_packets;
269
270	return count < tx_ring->count;
271}
272
273/**
274 * ixgbevf_receive_skb - Send a completed packet up the stack
275 * @q_vector: structure containing interrupt and ring information
276 * @skb: packet to send up
277 * @status: hardware indication of status of receive
278 * @rx_desc: rx descriptor
279 **/
280static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
281				struct sk_buff *skb, u8 status,
282				union ixgbe_adv_rx_desc *rx_desc)
283{
284	struct ixgbevf_adapter *adapter = q_vector->adapter;
285	bool is_vlan = (status & IXGBE_RXD_STAT_VP);
286	u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
287
288	if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
289		__vlan_hwaccel_put_tag(skb, tag);
290
291	napi_gro_receive(&q_vector->napi, skb);
292}
293
294/**
295 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
296 * @adapter: address of board private structure
297 * @status_err: hardware indication of status of receive
298 * @skb: skb currently being received and modified
299 **/
300static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
301				       struct ixgbevf_ring *ring,
302				       u32 status_err, struct sk_buff *skb)
303{
304	skb_checksum_none_assert(skb);
305
306	/* Rx csum disabled */
307	if (!(ring->netdev->features & NETIF_F_RXCSUM))
308		return;
309
310	/* if IP and error */
311	if ((status_err & IXGBE_RXD_STAT_IPCS) &&
312	    (status_err & IXGBE_RXDADV_ERR_IPE)) {
313		adapter->hw_csum_rx_error++;
314		return;
315	}
316
317	if (!(status_err & IXGBE_RXD_STAT_L4CS))
318		return;
319
320	if (status_err & IXGBE_RXDADV_ERR_TCPE) {
321		adapter->hw_csum_rx_error++;
322		return;
323	}
324
325	/* It must be a TCP or UDP packet with a valid checksum */
326	skb->ip_summed = CHECKSUM_UNNECESSARY;
327	adapter->hw_csum_rx_good++;
328}
329
330/**
331 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
332 * @adapter: address of board private structure
333 **/
334static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
335				     struct ixgbevf_ring *rx_ring,
336				     int cleaned_count)
337{
338	struct pci_dev *pdev = adapter->pdev;
339	union ixgbe_adv_rx_desc *rx_desc;
340	struct ixgbevf_rx_buffer *bi;
341	struct sk_buff *skb;
342	unsigned int i = rx_ring->next_to_use;
343
344	bi = &rx_ring->rx_buffer_info[i];
345
346	while (cleaned_count--) {
347		rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
348		skb = bi->skb;
349		if (!skb) {
350			skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
351							rx_ring->rx_buf_len);
352			if (!skb) {
353				adapter->alloc_rx_buff_failed++;
354				goto no_buffers;
355			}
356			bi->skb = skb;
357		}
358		if (!bi->dma) {
359			bi->dma = dma_map_single(&pdev->dev, skb->data,
360						 rx_ring->rx_buf_len,
361						 DMA_FROM_DEVICE);
362			if (dma_mapping_error(&pdev->dev, bi->dma)) {
363				dev_kfree_skb(skb);
364				bi->skb = NULL;
365				dev_err(&pdev->dev, "RX DMA map failed\n");
366				break;
367			}
368		}
369		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
370
371		i++;
372		if (i == rx_ring->count)
373			i = 0;
374		bi = &rx_ring->rx_buffer_info[i];
375	}
376
377no_buffers:
378	if (rx_ring->next_to_use != i) {
379		rx_ring->next_to_use = i;
380
381		ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
382	}
383}
384
385static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
386					     u32 qmask)
387{
388	struct ixgbe_hw *hw = &adapter->hw;
389
390	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
391}
392
393static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
394				 struct ixgbevf_ring *rx_ring,
395				 int budget)
396{
397	struct ixgbevf_adapter *adapter = q_vector->adapter;
398	struct pci_dev *pdev = adapter->pdev;
399	union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
400	struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
401	struct sk_buff *skb;
402	unsigned int i;
403	u32 len, staterr;
404	int cleaned_count = 0;
405	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
406
407	i = rx_ring->next_to_clean;
408	rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
409	staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
410	rx_buffer_info = &rx_ring->rx_buffer_info[i];
411
412	while (staterr & IXGBE_RXD_STAT_DD) {
413		if (!budget)
414			break;
415		budget--;
416
417		rmb(); /* read descriptor and rx_buffer_info after status DD */
418		len = le16_to_cpu(rx_desc->wb.upper.length);
419		skb = rx_buffer_info->skb;
420		prefetch(skb->data - NET_IP_ALIGN);
421		rx_buffer_info->skb = NULL;
422
423		if (rx_buffer_info->dma) {
424			dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
425					 rx_ring->rx_buf_len,
426					 DMA_FROM_DEVICE);
427			rx_buffer_info->dma = 0;
428			skb_put(skb, len);
429		}
430
431		i++;
432		if (i == rx_ring->count)
433			i = 0;
434
435		next_rxd = IXGBEVF_RX_DESC(rx_ring, i);
436		prefetch(next_rxd);
437		cleaned_count++;
438
439		next_buffer = &rx_ring->rx_buffer_info[i];
440
441		if (!(staterr & IXGBE_RXD_STAT_EOP)) {
442			skb->next = next_buffer->skb;
443			IXGBE_CB(skb->next)->prev = skb;
444			adapter->non_eop_descs++;
445			goto next_desc;
446		}
447
448		/* we should not be chaining buffers, if we did drop the skb */
449		if (IXGBE_CB(skb)->prev) {
450			do {
451				struct sk_buff *this = skb;
452				skb = IXGBE_CB(skb)->prev;
453				dev_kfree_skb(this);
454			} while (skb);
455			goto next_desc;
456		}
457
458		/* ERR_MASK will only have valid bits if EOP set */
459		if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
460			dev_kfree_skb_irq(skb);
461			goto next_desc;
462		}
463
464		ixgbevf_rx_checksum(adapter, rx_ring, staterr, skb);
465
466		/* probably a little skewed due to removing CRC */
467		total_rx_bytes += skb->len;
468		total_rx_packets++;
469
470		/*
471		 * Work around issue of some types of VM to VM loop back
472		 * packets not getting split correctly
473		 */
474		if (staterr & IXGBE_RXD_STAT_LB) {
475			u32 header_fixup_len = skb_headlen(skb);
476			if (header_fixup_len < 14)
477				skb_push(skb, header_fixup_len);
478		}
479		skb->protocol = eth_type_trans(skb, rx_ring->netdev);
480
481		/* Workaround hardware that can't do proper VEPA multicast
482		 * source pruning.
483		 */
484		if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) &&
485		    !(compare_ether_addr(adapter->netdev->dev_addr,
486					eth_hdr(skb)->h_source))) {
487			dev_kfree_skb_irq(skb);
488			goto next_desc;
489		}
490
491		ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc);
492
493next_desc:
494		rx_desc->wb.upper.status_error = 0;
495
496		/* return some buffers to hardware, one at a time is too slow */
497		if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
498			ixgbevf_alloc_rx_buffers(adapter, rx_ring,
499						 cleaned_count);
500			cleaned_count = 0;
501		}
502
503		/* use prefetched values */
504		rx_desc = next_rxd;
505		rx_buffer_info = &rx_ring->rx_buffer_info[i];
506
507		staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
508	}
509
510	rx_ring->next_to_clean = i;
511	cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
512
513	if (cleaned_count)
514		ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
515
516	u64_stats_update_begin(&rx_ring->syncp);
517	rx_ring->total_packets += total_rx_packets;
518	rx_ring->total_bytes += total_rx_bytes;
519	u64_stats_update_end(&rx_ring->syncp);
520	q_vector->rx.total_packets += total_rx_packets;
521	q_vector->rx.total_bytes += total_rx_bytes;
522
523	return !!budget;
524}
525
526/**
527 * ixgbevf_poll - NAPI polling calback
528 * @napi: napi struct with our devices info in it
529 * @budget: amount of work driver is allowed to do this pass, in packets
530 *
531 * This function will clean more than one or more rings associated with a
532 * q_vector.
533 **/
534static int ixgbevf_poll(struct napi_struct *napi, int budget)
535{
536	struct ixgbevf_q_vector *q_vector =
537		container_of(napi, struct ixgbevf_q_vector, napi);
538	struct ixgbevf_adapter *adapter = q_vector->adapter;
539	struct ixgbevf_ring *ring;
540	int per_ring_budget;
541	bool clean_complete = true;
542
543	ixgbevf_for_each_ring(ring, q_vector->tx)
544		clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
545
546	/* attempt to distribute budget to each queue fairly, but don't allow
547	 * the budget to go below 1 because we'll exit polling */
548	if (q_vector->rx.count > 1)
549		per_ring_budget = max(budget/q_vector->rx.count, 1);
550	else
551		per_ring_budget = budget;
552
553	ixgbevf_for_each_ring(ring, q_vector->rx)
554		clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring,
555						       per_ring_budget);
556
557	/* If all work not completed, return budget and keep polling */
558	if (!clean_complete)
559		return budget;
560	/* all work done, exit the polling mode */
561	napi_complete(napi);
562	if (adapter->rx_itr_setting & 1)
563		ixgbevf_set_itr(q_vector);
564	if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
565		ixgbevf_irq_enable_queues(adapter,
566					  1 << q_vector->v_idx);
567
568	return 0;
569}
570
571/**
572 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
573 * @q_vector: structure containing interrupt and ring information
574 */
575static void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
576{
577	struct ixgbevf_adapter *adapter = q_vector->adapter;
578	struct ixgbe_hw *hw = &adapter->hw;
579	int v_idx = q_vector->v_idx;
580	u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
581
582	/*
583	 * set the WDIS bit to not clear the timer bits and cause an
584	 * immediate assertion of the interrupt
585	 */
586	itr_reg |= IXGBE_EITR_CNT_WDIS;
587
588	IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
589}
590
591/**
592 * ixgbevf_configure_msix - Configure MSI-X hardware
593 * @adapter: board private structure
594 *
595 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
596 * interrupts.
597 **/
598static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
599{
600	struct ixgbevf_q_vector *q_vector;
601	int q_vectors, v_idx;
602
603	q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
604	adapter->eims_enable_mask = 0;
605
606	/*
607	 * Populate the IVAR table and set the ITR values to the
608	 * corresponding register.
609	 */
610	for (v_idx = 0; v_idx < q_vectors; v_idx++) {
611		struct ixgbevf_ring *ring;
612		q_vector = adapter->q_vector[v_idx];
613
614		ixgbevf_for_each_ring(ring, q_vector->rx)
615			ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
616
617		ixgbevf_for_each_ring(ring, q_vector->tx)
618			ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
619
620		if (q_vector->tx.ring && !q_vector->rx.ring) {
621			/* tx only vector */
622			if (adapter->tx_itr_setting == 1)
623				q_vector->itr = IXGBE_10K_ITR;
624			else
625				q_vector->itr = adapter->tx_itr_setting;
626		} else {
627			/* rx or rx/tx vector */
628			if (adapter->rx_itr_setting == 1)
629				q_vector->itr = IXGBE_20K_ITR;
630			else
631				q_vector->itr = adapter->rx_itr_setting;
632		}
633
634		/* add q_vector eims value to global eims_enable_mask */
635		adapter->eims_enable_mask |= 1 << v_idx;
636
637		ixgbevf_write_eitr(q_vector);
638	}
639
640	ixgbevf_set_ivar(adapter, -1, 1, v_idx);
641	/* setup eims_other and add value to global eims_enable_mask */
642	adapter->eims_other = 1 << v_idx;
643	adapter->eims_enable_mask |= adapter->eims_other;
644}
645
646enum latency_range {
647	lowest_latency = 0,
648	low_latency = 1,
649	bulk_latency = 2,
650	latency_invalid = 255
651};
652
653/**
654 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
655 * @q_vector: structure containing interrupt and ring information
656 * @ring_container: structure containing ring performance data
657 *
658 *      Stores a new ITR value based on packets and byte
659 *      counts during the last interrupt.  The advantage of per interrupt
660 *      computation is faster updates and more accurate ITR for the current
661 *      traffic pattern.  Constants in this function were computed
662 *      based on theoretical maximum wire speed and thresholds were set based
663 *      on testing data as well as attempting to minimize response time
664 *      while increasing bulk throughput.
665 **/
666static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
667			       struct ixgbevf_ring_container *ring_container)
668{
669	int bytes = ring_container->total_bytes;
670	int packets = ring_container->total_packets;
671	u32 timepassed_us;
672	u64 bytes_perint;
673	u8 itr_setting = ring_container->itr;
674
675	if (packets == 0)
676		return;
677
678	/* simple throttlerate management
679	 *    0-20MB/s lowest (100000 ints/s)
680	 *   20-100MB/s low   (20000 ints/s)
681	 *  100-1249MB/s bulk (8000 ints/s)
682	 */
683	/* what was last interrupt timeslice? */
684	timepassed_us = q_vector->itr >> 2;
685	bytes_perint = bytes / timepassed_us; /* bytes/usec */
686
687	switch (itr_setting) {
688	case lowest_latency:
689		if (bytes_perint > 10)
690			itr_setting = low_latency;
691		break;
692	case low_latency:
693		if (bytes_perint > 20)
694			itr_setting = bulk_latency;
695		else if (bytes_perint <= 10)
696			itr_setting = lowest_latency;
697		break;
698	case bulk_latency:
699		if (bytes_perint <= 20)
700			itr_setting = low_latency;
701		break;
702	}
703
704	/* clear work counters since we have the values we need */
705	ring_container->total_bytes = 0;
706	ring_container->total_packets = 0;
707
708	/* write updated itr to ring container */
709	ring_container->itr = itr_setting;
710}
711
712static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
713{
714	u32 new_itr = q_vector->itr;
715	u8 current_itr;
716
717	ixgbevf_update_itr(q_vector, &q_vector->tx);
718	ixgbevf_update_itr(q_vector, &q_vector->rx);
719
720	current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
721
722	switch (current_itr) {
723	/* counts and packets in update_itr are dependent on these numbers */
724	case lowest_latency:
725		new_itr = IXGBE_100K_ITR;
726		break;
727	case low_latency:
728		new_itr = IXGBE_20K_ITR;
729		break;
730	case bulk_latency:
731	default:
732		new_itr = IXGBE_8K_ITR;
733		break;
734	}
735
736	if (new_itr != q_vector->itr) {
737		/* do an exponential smoothing */
738		new_itr = (10 * new_itr * q_vector->itr) /
739			  ((9 * new_itr) + q_vector->itr);
740
741		/* save the algorithm value here */
742		q_vector->itr = new_itr;
743
744		ixgbevf_write_eitr(q_vector);
745	}
746}
747
748static irqreturn_t ixgbevf_msix_other(int irq, void *data)
749{
750	struct ixgbevf_adapter *adapter = data;
751	struct ixgbe_hw *hw = &adapter->hw;
752
753	hw->mac.get_link_status = 1;
754
755	if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
756		mod_timer(&adapter->watchdog_timer, jiffies);
757
758	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
759
760	return IRQ_HANDLED;
761}
762
763
764/**
765 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
766 * @irq: unused
767 * @data: pointer to our q_vector struct for this interrupt vector
768 **/
769static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
770{
771	struct ixgbevf_q_vector *q_vector = data;
772
773	/* EIAM disabled interrupts (on this vector) for us */
774	if (q_vector->rx.ring || q_vector->tx.ring)
775		napi_schedule(&q_vector->napi);
776
777	return IRQ_HANDLED;
778}
779
780static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
781				     int r_idx)
782{
783	struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
784
785	a->rx_ring[r_idx].next = q_vector->rx.ring;
786	q_vector->rx.ring = &a->rx_ring[r_idx];
787	q_vector->rx.count++;
788}
789
790static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
791				     int t_idx)
792{
793	struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
794
795	a->tx_ring[t_idx].next = q_vector->tx.ring;
796	q_vector->tx.ring = &a->tx_ring[t_idx];
797	q_vector->tx.count++;
798}
799
800/**
801 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
802 * @adapter: board private structure to initialize
803 *
804 * This function maps descriptor rings to the queue-specific vectors
805 * we were allotted through the MSI-X enabling code.  Ideally, we'd have
806 * one vector per ring/queue, but on a constrained vector budget, we
807 * group the rings as "efficiently" as possible.  You would add new
808 * mapping configurations in here.
809 **/
810static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
811{
812	int q_vectors;
813	int v_start = 0;
814	int rxr_idx = 0, txr_idx = 0;
815	int rxr_remaining = adapter->num_rx_queues;
816	int txr_remaining = adapter->num_tx_queues;
817	int i, j;
818	int rqpv, tqpv;
819	int err = 0;
820
821	q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
822
823	/*
824	 * The ideal configuration...
825	 * We have enough vectors to map one per queue.
826	 */
827	if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
828		for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
829			map_vector_to_rxq(adapter, v_start, rxr_idx);
830
831		for (; txr_idx < txr_remaining; v_start++, txr_idx++)
832			map_vector_to_txq(adapter, v_start, txr_idx);
833		goto out;
834	}
835
836	/*
837	 * If we don't have enough vectors for a 1-to-1
838	 * mapping, we'll have to group them so there are
839	 * multiple queues per vector.
840	 */
841	/* Re-adjusting *qpv takes care of the remainder. */
842	for (i = v_start; i < q_vectors; i++) {
843		rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
844		for (j = 0; j < rqpv; j++) {
845			map_vector_to_rxq(adapter, i, rxr_idx);
846			rxr_idx++;
847			rxr_remaining--;
848		}
849	}
850	for (i = v_start; i < q_vectors; i++) {
851		tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
852		for (j = 0; j < tqpv; j++) {
853			map_vector_to_txq(adapter, i, txr_idx);
854			txr_idx++;
855			txr_remaining--;
856		}
857	}
858
859out:
860	return err;
861}
862
863/**
864 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
865 * @adapter: board private structure
866 *
867 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
868 * interrupts from the kernel.
869 **/
870static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
871{
872	struct net_device *netdev = adapter->netdev;
873	int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
874	int vector, err;
875	int ri = 0, ti = 0;
876
877	for (vector = 0; vector < q_vectors; vector++) {
878		struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
879		struct msix_entry *entry = &adapter->msix_entries[vector];
880
881		if (q_vector->tx.ring && q_vector->rx.ring) {
882			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
883				 "%s-%s-%d", netdev->name, "TxRx", ri++);
884			ti++;
885		} else if (q_vector->rx.ring) {
886			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
887				 "%s-%s-%d", netdev->name, "rx", ri++);
888		} else if (q_vector->tx.ring) {
889			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
890				 "%s-%s-%d", netdev->name, "tx", ti++);
891		} else {
892			/* skip this unused q_vector */
893			continue;
894		}
895		err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
896				  q_vector->name, q_vector);
897		if (err) {
898			hw_dbg(&adapter->hw,
899			       "request_irq failed for MSIX interrupt "
900			       "Error: %d\n", err);
901			goto free_queue_irqs;
902		}
903	}
904
905	err = request_irq(adapter->msix_entries[vector].vector,
906			  &ixgbevf_msix_other, 0, netdev->name, adapter);
907	if (err) {
908		hw_dbg(&adapter->hw,
909		       "request_irq for msix_other failed: %d\n", err);
910		goto free_queue_irqs;
911	}
912
913	return 0;
914
915free_queue_irqs:
916	while (vector) {
917		vector--;
918		free_irq(adapter->msix_entries[vector].vector,
919			 adapter->q_vector[vector]);
920	}
921	pci_disable_msix(adapter->pdev);
922	kfree(adapter->msix_entries);
923	adapter->msix_entries = NULL;
924	return err;
925}
926
927static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
928{
929	int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
930
931	for (i = 0; i < q_vectors; i++) {
932		struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
933		q_vector->rx.ring = NULL;
934		q_vector->tx.ring = NULL;
935		q_vector->rx.count = 0;
936		q_vector->tx.count = 0;
937	}
938}
939
940/**
941 * ixgbevf_request_irq - initialize interrupts
942 * @adapter: board private structure
943 *
944 * Attempts to configure interrupts using the best available
945 * capabilities of the hardware and kernel.
946 **/
947static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
948{
949	int err = 0;
950
951	err = ixgbevf_request_msix_irqs(adapter);
952
953	if (err)
954		hw_dbg(&adapter->hw,
955		       "request_irq failed, Error %d\n", err);
956
957	return err;
958}
959
960static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
961{
962	int i, q_vectors;
963
964	q_vectors = adapter->num_msix_vectors;
965	i = q_vectors - 1;
966
967	free_irq(adapter->msix_entries[i].vector, adapter);
968	i--;
969
970	for (; i >= 0; i--) {
971		/* free only the irqs that were actually requested */
972		if (!adapter->q_vector[i]->rx.ring &&
973		    !adapter->q_vector[i]->tx.ring)
974			continue;
975
976		free_irq(adapter->msix_entries[i].vector,
977			 adapter->q_vector[i]);
978	}
979
980	ixgbevf_reset_q_vectors(adapter);
981}
982
983/**
984 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
985 * @adapter: board private structure
986 **/
987static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
988{
989	struct ixgbe_hw *hw = &adapter->hw;
990	int i;
991
992	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
993	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
994	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
995
996	IXGBE_WRITE_FLUSH(hw);
997
998	for (i = 0; i < adapter->num_msix_vectors; i++)
999		synchronize_irq(adapter->msix_entries[i].vector);
1000}
1001
1002/**
1003 * ixgbevf_irq_enable - Enable default interrupt generation settings
1004 * @adapter: board private structure
1005 **/
1006static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
1007{
1008	struct ixgbe_hw *hw = &adapter->hw;
1009
1010	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1011	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1012	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
1013}
1014
1015/**
1016 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1017 * @adapter: board private structure
1018 *
1019 * Configure the Tx unit of the MAC after a reset.
1020 **/
1021static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1022{
1023	u64 tdba;
1024	struct ixgbe_hw *hw = &adapter->hw;
1025	u32 i, j, tdlen, txctrl;
1026
1027	/* Setup the HW Tx Head and Tail descriptor pointers */
1028	for (i = 0; i < adapter->num_tx_queues; i++) {
1029		struct ixgbevf_ring *ring = &adapter->tx_ring[i];
1030		j = ring->reg_idx;
1031		tdba = ring->dma;
1032		tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
1033		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1034				(tdba & DMA_BIT_MASK(32)));
1035		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1036		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen);
1037		IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0);
1038		IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0);
1039		adapter->tx_ring[i].head = IXGBE_VFTDH(j);
1040		adapter->tx_ring[i].tail = IXGBE_VFTDT(j);
1041		/* Disable Tx Head Writeback RO bit, since this hoses
1042		 * bookkeeping if things aren't delivered in order.
1043		 */
1044		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1045		txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1046		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1047	}
1048}
1049
1050#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT	2
1051
1052static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1053{
1054	struct ixgbevf_ring *rx_ring;
1055	struct ixgbe_hw *hw = &adapter->hw;
1056	u32 srrctl;
1057
1058	rx_ring = &adapter->rx_ring[index];
1059
1060	srrctl = IXGBE_SRRCTL_DROP_EN;
1061
1062	srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1063
1064	srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
1065		  IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1066
1067	IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1068}
1069
1070static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
1071{
1072	struct ixgbe_hw *hw = &adapter->hw;
1073	struct net_device *netdev = adapter->netdev;
1074	int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1075	int i;
1076	u16 rx_buf_len;
1077
1078	/* notify the PF of our intent to use this size of frame */
1079	ixgbevf_rlpml_set_vf(hw, max_frame);
1080
1081	/* PF will allow an extra 4 bytes past for vlan tagged frames */
1082	max_frame += VLAN_HLEN;
1083
1084	/*
1085	 * Make best use of allocation by using all but 1K of a
1086	 * power of 2 allocation that will be used for skb->head.
1087	 */
1088	if ((hw->mac.type == ixgbe_mac_X540_vf) &&
1089	    (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE))
1090		rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1091	else if (max_frame <= IXGBEVF_RXBUFFER_3K)
1092		rx_buf_len = IXGBEVF_RXBUFFER_3K;
1093	else if (max_frame <= IXGBEVF_RXBUFFER_7K)
1094		rx_buf_len = IXGBEVF_RXBUFFER_7K;
1095	else if (max_frame <= IXGBEVF_RXBUFFER_15K)
1096		rx_buf_len = IXGBEVF_RXBUFFER_15K;
1097	else
1098		rx_buf_len = IXGBEVF_MAX_RXBUFFER;
1099
1100	for (i = 0; i < adapter->num_rx_queues; i++)
1101		adapter->rx_ring[i].rx_buf_len = rx_buf_len;
1102}
1103
1104/**
1105 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1106 * @adapter: board private structure
1107 *
1108 * Configure the Rx unit of the MAC after a reset.
1109 **/
1110static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1111{
1112	u64 rdba;
1113	struct ixgbe_hw *hw = &adapter->hw;
1114	int i, j;
1115	u32 rdlen;
1116
1117	/* PSRTYPE must be initialized in 82599 */
1118	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
1119
1120	/* set_rx_buffer_len must be called before ring initialization */
1121	ixgbevf_set_rx_buffer_len(adapter);
1122
1123	rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
1124	/* Setup the HW Rx Head and Tail Descriptor Pointers and
1125	 * the Base and Length of the Rx Descriptor Ring */
1126	for (i = 0; i < adapter->num_rx_queues; i++) {
1127		rdba = adapter->rx_ring[i].dma;
1128		j = adapter->rx_ring[i].reg_idx;
1129		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1130				(rdba & DMA_BIT_MASK(32)));
1131		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1132		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen);
1133		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0);
1134		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
1135		adapter->rx_ring[i].head = IXGBE_VFRDH(j);
1136		adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
1137
1138		ixgbevf_configure_srrctl(adapter, j);
1139	}
1140}
1141
1142static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1143{
1144	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1145	struct ixgbe_hw *hw = &adapter->hw;
1146	int err;
1147
1148	if (!hw->mac.ops.set_vfta)
1149		return -EOPNOTSUPP;
1150
1151	spin_lock_bh(&adapter->mbx_lock);
1152
1153	/* add VID to filter table */
1154	err = hw->mac.ops.set_vfta(hw, vid, 0, true);
1155
1156	spin_unlock_bh(&adapter->mbx_lock);
1157
1158	/* translate error return types so error makes sense */
1159	if (err == IXGBE_ERR_MBX)
1160		return -EIO;
1161
1162	if (err == IXGBE_ERR_INVALID_ARGUMENT)
1163		return -EACCES;
1164
1165	set_bit(vid, adapter->active_vlans);
1166
1167	return err;
1168}
1169
1170static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1171{
1172	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1173	struct ixgbe_hw *hw = &adapter->hw;
1174	int err = -EOPNOTSUPP;
1175
1176	spin_lock_bh(&adapter->mbx_lock);
1177
1178	/* remove VID from filter table */
1179	if (hw->mac.ops.set_vfta)
1180		err = hw->mac.ops.set_vfta(hw, vid, 0, false);
1181
1182	spin_unlock_bh(&adapter->mbx_lock);
1183
1184	clear_bit(vid, adapter->active_vlans);
1185
1186	return err;
1187}
1188
1189static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1190{
1191	u16 vid;
1192
1193	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1194		ixgbevf_vlan_rx_add_vid(adapter->netdev, vid);
1195}
1196
1197static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
1198{
1199	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1200	struct ixgbe_hw *hw = &adapter->hw;
1201	int count = 0;
1202
1203	if ((netdev_uc_count(netdev)) > 10) {
1204		pr_err("Too many unicast filters - No Space\n");
1205		return -ENOSPC;
1206	}
1207
1208	if (!netdev_uc_empty(netdev)) {
1209		struct netdev_hw_addr *ha;
1210		netdev_for_each_uc_addr(ha, netdev) {
1211			hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
1212			udelay(200);
1213		}
1214	} else {
1215		/*
1216		 * If the list is empty then send message to PF driver to
1217		 * clear all macvlans on this VF.
1218		 */
1219		hw->mac.ops.set_uc_addr(hw, 0, NULL);
1220	}
1221
1222	return count;
1223}
1224
1225/**
1226 * ixgbevf_set_rx_mode - Multicast set
1227 * @netdev: network interface device structure
1228 *
1229 * The set_rx_method entry point is called whenever the multicast address
1230 * list or the network interface flags are updated.  This routine is
1231 * responsible for configuring the hardware for proper multicast mode.
1232 **/
1233static void ixgbevf_set_rx_mode(struct net_device *netdev)
1234{
1235	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1236	struct ixgbe_hw *hw = &adapter->hw;
1237
1238	spin_lock_bh(&adapter->mbx_lock);
1239
1240	/* reprogram multicast list */
1241	if (hw->mac.ops.update_mc_addr_list)
1242		hw->mac.ops.update_mc_addr_list(hw, netdev);
1243
1244	ixgbevf_write_uc_addr_list(netdev);
1245
1246	spin_unlock_bh(&adapter->mbx_lock);
1247}
1248
1249static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1250{
1251	int q_idx;
1252	struct ixgbevf_q_vector *q_vector;
1253	int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1254
1255	for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1256		q_vector = adapter->q_vector[q_idx];
1257		napi_enable(&q_vector->napi);
1258	}
1259}
1260
1261static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1262{
1263	int q_idx;
1264	struct ixgbevf_q_vector *q_vector;
1265	int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1266
1267	for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1268		q_vector = adapter->q_vector[q_idx];
1269		napi_disable(&q_vector->napi);
1270	}
1271}
1272
1273static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1274{
1275	struct net_device *netdev = adapter->netdev;
1276	int i;
1277
1278	ixgbevf_set_rx_mode(netdev);
1279
1280	ixgbevf_restore_vlan(adapter);
1281
1282	ixgbevf_configure_tx(adapter);
1283	ixgbevf_configure_rx(adapter);
1284	for (i = 0; i < adapter->num_rx_queues; i++) {
1285		struct ixgbevf_ring *ring = &adapter->rx_ring[i];
1286		ixgbevf_alloc_rx_buffers(adapter, ring,
1287					 IXGBE_DESC_UNUSED(ring));
1288	}
1289}
1290
1291#define IXGBE_MAX_RX_DESC_POLL 10
1292static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1293						int rxr)
1294{
1295	struct ixgbe_hw *hw = &adapter->hw;
1296	int j = adapter->rx_ring[rxr].reg_idx;
1297	int k;
1298
1299	for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
1300		if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
1301			break;
1302		else
1303			msleep(1);
1304	}
1305	if (k >= IXGBE_MAX_RX_DESC_POLL) {
1306		hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d "
1307		       "not set within the polling period\n", rxr);
1308	}
1309
1310	ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
1311				(adapter->rx_ring[rxr].count - 1));
1312}
1313
1314static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
1315{
1316	/* Only save pre-reset stats if there are some */
1317	if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
1318		adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
1319			adapter->stats.base_vfgprc;
1320		adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
1321			adapter->stats.base_vfgptc;
1322		adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
1323			adapter->stats.base_vfgorc;
1324		adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
1325			adapter->stats.base_vfgotc;
1326		adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
1327			adapter->stats.base_vfmprc;
1328	}
1329}
1330
1331static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
1332{
1333	struct ixgbe_hw *hw = &adapter->hw;
1334
1335	adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1336	adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1337	adapter->stats.last_vfgorc |=
1338		(((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1339	adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1340	adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1341	adapter->stats.last_vfgotc |=
1342		(((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1343	adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1344
1345	adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
1346	adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
1347	adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
1348	adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
1349	adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
1350}
1351
1352static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
1353{
1354	struct ixgbe_hw *hw = &adapter->hw;
1355	int api[] = { ixgbe_mbox_api_11,
1356		      ixgbe_mbox_api_10,
1357		      ixgbe_mbox_api_unknown };
1358	int err = 0, idx = 0;
1359
1360	spin_lock_bh(&adapter->mbx_lock);
1361
1362	while (api[idx] != ixgbe_mbox_api_unknown) {
1363		err = ixgbevf_negotiate_api_version(hw, api[idx]);
1364		if (!err)
1365			break;
1366		idx++;
1367	}
1368
1369	spin_unlock_bh(&adapter->mbx_lock);
1370}
1371
1372static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1373{
1374	struct net_device *netdev = adapter->netdev;
1375	struct ixgbe_hw *hw = &adapter->hw;
1376	int i, j = 0;
1377	int num_rx_rings = adapter->num_rx_queues;
1378	u32 txdctl, rxdctl;
1379
1380	for (i = 0; i < adapter->num_tx_queues; i++) {
1381		j = adapter->tx_ring[i].reg_idx;
1382		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1383		/* enable WTHRESH=8 descriptors, to encourage burst writeback */
1384		txdctl |= (8 << 16);
1385		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1386	}
1387
1388	for (i = 0; i < adapter->num_tx_queues; i++) {
1389		j = adapter->tx_ring[i].reg_idx;
1390		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1391		txdctl |= IXGBE_TXDCTL_ENABLE;
1392		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1393	}
1394
1395	for (i = 0; i < num_rx_rings; i++) {
1396		j = adapter->rx_ring[i].reg_idx;
1397		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1398		rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1399		if (hw->mac.type == ixgbe_mac_X540_vf) {
1400			rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
1401			rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) |
1402				   IXGBE_RXDCTL_RLPML_EN);
1403		}
1404		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1405		ixgbevf_rx_desc_queue_enable(adapter, i);
1406	}
1407
1408	ixgbevf_configure_msix(adapter);
1409
1410	spin_lock_bh(&adapter->mbx_lock);
1411
1412	if (hw->mac.ops.set_rar) {
1413		if (is_valid_ether_addr(hw->mac.addr))
1414			hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
1415		else
1416			hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
1417	}
1418
1419	spin_unlock_bh(&adapter->mbx_lock);
1420
1421	clear_bit(__IXGBEVF_DOWN, &adapter->state);
1422	ixgbevf_napi_enable_all(adapter);
1423
1424	/* enable transmits */
1425	netif_tx_start_all_queues(netdev);
1426
1427	ixgbevf_save_reset_stats(adapter);
1428	ixgbevf_init_last_counter_stats(adapter);
1429
1430	hw->mac.get_link_status = 1;
1431	mod_timer(&adapter->watchdog_timer, jiffies);
1432}
1433
1434static int ixgbevf_reset_queues(struct ixgbevf_adapter *adapter)
1435{
1436	struct ixgbe_hw *hw = &adapter->hw;
1437	struct ixgbevf_ring *rx_ring;
1438	unsigned int def_q = 0;
1439	unsigned int num_tcs = 0;
1440	unsigned int num_rx_queues = 1;
1441	int err, i;
1442
1443	spin_lock_bh(&adapter->mbx_lock);
1444
1445	/* fetch queue configuration from the PF */
1446	err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1447
1448	spin_unlock_bh(&adapter->mbx_lock);
1449
1450	if (err)
1451		return err;
1452
1453	if (num_tcs > 1) {
1454		/* update default Tx ring register index */
1455		adapter->tx_ring[0].reg_idx = def_q;
1456
1457		/* we need as many queues as traffic classes */
1458		num_rx_queues = num_tcs;
1459	}
1460
1461	/* nothing to do if we have the correct number of queues */
1462	if (adapter->num_rx_queues == num_rx_queues)
1463		return 0;
1464
1465	/* allocate new rings */
1466	rx_ring = kcalloc(num_rx_queues,
1467			  sizeof(struct ixgbevf_ring), GFP_KERNEL);
1468	if (!rx_ring)
1469		return -ENOMEM;
1470
1471	/* setup ring fields */
1472	for (i = 0; i < num_rx_queues; i++) {
1473		rx_ring[i].count = adapter->rx_ring_count;
1474		rx_ring[i].queue_index = i;
1475		rx_ring[i].reg_idx = i;
1476		rx_ring[i].dev = &adapter->pdev->dev;
1477		rx_ring[i].netdev = adapter->netdev;
1478
1479		/* allocate resources on the ring */
1480		err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
1481		if (err) {
1482			while (i) {
1483				i--;
1484				ixgbevf_free_rx_resources(adapter, &rx_ring[i]);
1485			}
1486			kfree(rx_ring);
1487			return err;
1488		}
1489	}
1490
1491	/* free the existing rings and queues */
1492	ixgbevf_free_all_rx_resources(adapter);
1493	adapter->num_rx_queues = 0;
1494	kfree(adapter->rx_ring);
1495
1496	/* move new rings into position on the adapter struct */
1497	adapter->rx_ring = rx_ring;
1498	adapter->num_rx_queues = num_rx_queues;
1499
1500	/* reset ring to vector mapping */
1501	ixgbevf_reset_q_vectors(adapter);
1502	ixgbevf_map_rings_to_vectors(adapter);
1503
1504	return 0;
1505}
1506
1507void ixgbevf_up(struct ixgbevf_adapter *adapter)
1508{
1509	struct ixgbe_hw *hw = &adapter->hw;
1510
1511	ixgbevf_negotiate_api(adapter);
1512
1513	ixgbevf_reset_queues(adapter);
1514
1515	ixgbevf_configure(adapter);
1516
1517	ixgbevf_up_complete(adapter);
1518
1519	/* clear any pending interrupts, may auto mask */
1520	IXGBE_READ_REG(hw, IXGBE_VTEICR);
1521
1522	ixgbevf_irq_enable(adapter);
1523}
1524
1525/**
1526 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
1527 * @adapter: board private structure
1528 * @rx_ring: ring to free buffers from
1529 **/
1530static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
1531				  struct ixgbevf_ring *rx_ring)
1532{
1533	struct pci_dev *pdev = adapter->pdev;
1534	unsigned long size;
1535	unsigned int i;
1536
1537	if (!rx_ring->rx_buffer_info)
1538		return;
1539
1540	/* Free all the Rx ring sk_buffs */
1541	for (i = 0; i < rx_ring->count; i++) {
1542		struct ixgbevf_rx_buffer *rx_buffer_info;
1543
1544		rx_buffer_info = &rx_ring->rx_buffer_info[i];
1545		if (rx_buffer_info->dma) {
1546			dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
1547					 rx_ring->rx_buf_len,
1548					 DMA_FROM_DEVICE);
1549			rx_buffer_info->dma = 0;
1550		}
1551		if (rx_buffer_info->skb) {
1552			struct sk_buff *skb = rx_buffer_info->skb;
1553			rx_buffer_info->skb = NULL;
1554			do {
1555				struct sk_buff *this = skb;
1556				skb = IXGBE_CB(skb)->prev;
1557				dev_kfree_skb(this);
1558			} while (skb);
1559		}
1560	}
1561
1562	size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
1563	memset(rx_ring->rx_buffer_info, 0, size);
1564
1565	/* Zero out the descriptor ring */
1566	memset(rx_ring->desc, 0, rx_ring->size);
1567
1568	rx_ring->next_to_clean = 0;
1569	rx_ring->next_to_use = 0;
1570
1571	if (rx_ring->head)
1572		writel(0, adapter->hw.hw_addr + rx_ring->head);
1573	if (rx_ring->tail)
1574		writel(0, adapter->hw.hw_addr + rx_ring->tail);
1575}
1576
1577/**
1578 * ixgbevf_clean_tx_ring - Free Tx Buffers
1579 * @adapter: board private structure
1580 * @tx_ring: ring to be cleaned
1581 **/
1582static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
1583				  struct ixgbevf_ring *tx_ring)
1584{
1585	struct ixgbevf_tx_buffer *tx_buffer_info;
1586	unsigned long size;
1587	unsigned int i;
1588
1589	if (!tx_ring->tx_buffer_info)
1590		return;
1591
1592	/* Free all the Tx ring sk_buffs */
1593
1594	for (i = 0; i < tx_ring->count; i++) {
1595		tx_buffer_info = &tx_ring->tx_buffer_info[i];
1596		ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
1597	}
1598
1599	size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
1600	memset(tx_ring->tx_buffer_info, 0, size);
1601
1602	memset(tx_ring->desc, 0, tx_ring->size);
1603
1604	tx_ring->next_to_use = 0;
1605	tx_ring->next_to_clean = 0;
1606
1607	if (tx_ring->head)
1608		writel(0, adapter->hw.hw_addr + tx_ring->head);
1609	if (tx_ring->tail)
1610		writel(0, adapter->hw.hw_addr + tx_ring->tail);
1611}
1612
1613/**
1614 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
1615 * @adapter: board private structure
1616 **/
1617static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
1618{
1619	int i;
1620
1621	for (i = 0; i < adapter->num_rx_queues; i++)
1622		ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]);
1623}
1624
1625/**
1626 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
1627 * @adapter: board private structure
1628 **/
1629static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
1630{
1631	int i;
1632
1633	for (i = 0; i < adapter->num_tx_queues; i++)
1634		ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1635}
1636
1637void ixgbevf_down(struct ixgbevf_adapter *adapter)
1638{
1639	struct net_device *netdev = adapter->netdev;
1640	struct ixgbe_hw *hw = &adapter->hw;
1641	u32 txdctl;
1642	int i, j;
1643
1644	/* signal that we are down to the interrupt handler */
1645	set_bit(__IXGBEVF_DOWN, &adapter->state);
1646	/* disable receives */
1647
1648	netif_tx_disable(netdev);
1649
1650	msleep(10);
1651
1652	netif_tx_stop_all_queues(netdev);
1653
1654	ixgbevf_irq_disable(adapter);
1655
1656	ixgbevf_napi_disable_all(adapter);
1657
1658	del_timer_sync(&adapter->watchdog_timer);
1659	/* can't call flush scheduled work here because it can deadlock
1660	 * if linkwatch_event tries to acquire the rtnl_lock which we are
1661	 * holding */
1662	while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
1663		msleep(1);
1664
1665	/* disable transmits in the hardware now that interrupts are off */
1666	for (i = 0; i < adapter->num_tx_queues; i++) {
1667		j = adapter->tx_ring[i].reg_idx;
1668		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1669		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j),
1670				(txdctl & ~IXGBE_TXDCTL_ENABLE));
1671	}
1672
1673	netif_carrier_off(netdev);
1674
1675	if (!pci_channel_offline(adapter->pdev))
1676		ixgbevf_reset(adapter);
1677
1678	ixgbevf_clean_all_tx_rings(adapter);
1679	ixgbevf_clean_all_rx_rings(adapter);
1680}
1681
1682void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
1683{
1684	WARN_ON(in_interrupt());
1685
1686	while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
1687		msleep(1);
1688
1689	/*
1690	 * Check if PF is up before re-init.  If not then skip until
1691	 * later when the PF is up and ready to service requests from
1692	 * the VF via mailbox.  If the VF is up and running then the
1693	 * watchdog task will continue to schedule reset tasks until
1694	 * the PF is up and running.
1695	 */
1696	ixgbevf_down(adapter);
1697	ixgbevf_up(adapter);
1698
1699	clear_bit(__IXGBEVF_RESETTING, &adapter->state);
1700}
1701
1702void ixgbevf_reset(struct ixgbevf_adapter *adapter)
1703{
1704	struct ixgbe_hw *hw = &adapter->hw;
1705	struct net_device *netdev = adapter->netdev;
1706
1707	spin_lock_bh(&adapter->mbx_lock);
1708
1709	if (hw->mac.ops.reset_hw(hw))
1710		hw_dbg(hw, "PF still resetting\n");
1711	else
1712		hw->mac.ops.init_hw(hw);
1713
1714	spin_unlock_bh(&adapter->mbx_lock);
1715
1716	if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1717		memcpy(netdev->dev_addr, adapter->hw.mac.addr,
1718		       netdev->addr_len);
1719		memcpy(netdev->perm_addr, adapter->hw.mac.addr,
1720		       netdev->addr_len);
1721	}
1722}
1723
1724static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1725					 int vectors)
1726{
1727	int err, vector_threshold;
1728
1729	/* We'll want at least 2 (vector_threshold):
1730	 * 1) TxQ[0] + RxQ[0] handler
1731	 * 2) Other (Link Status Change, etc.)
1732	 */
1733	vector_threshold = MIN_MSIX_COUNT;
1734
1735	/* The more we get, the more we will assign to Tx/Rx Cleanup
1736	 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1737	 * Right now, we simply care about how many we'll get; we'll
1738	 * set them up later while requesting irq's.
1739	 */
1740	while (vectors >= vector_threshold) {
1741		err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1742				      vectors);
1743		if (!err) /* Success in acquiring all requested vectors. */
1744			break;
1745		else if (err < 0)
1746			vectors = 0; /* Nasty failure, quit now */
1747		else /* err == number of vectors we should try again with */
1748			vectors = err;
1749	}
1750
1751	if (vectors < vector_threshold) {
1752		/* Can't allocate enough MSI-X interrupts?  Oh well.
1753		 * This just means we'll go with either a single MSI
1754		 * vector or fall back to legacy interrupts.
1755		 */
1756		hw_dbg(&adapter->hw,
1757		       "Unable to allocate MSI-X interrupts\n");
1758		kfree(adapter->msix_entries);
1759		adapter->msix_entries = NULL;
1760	} else {
1761		/*
1762		 * Adjust for only the vectors we'll use, which is minimum
1763		 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
1764		 * vectors we were allocated.
1765		 */
1766		adapter->num_msix_vectors = vectors;
1767	}
1768}
1769
1770/**
1771 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
1772 * @adapter: board private structure to initialize
1773 *
1774 * This is the top level queue allocation routine.  The order here is very
1775 * important, starting with the "most" number of features turned on at once,
1776 * and ending with the smallest set of features.  This way large combinations
1777 * can be allocated if they're turned on, and smaller combinations are the
1778 * fallthrough conditions.
1779 *
1780 **/
1781static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
1782{
1783	/* Start with base case */
1784	adapter->num_rx_queues = 1;
1785	adapter->num_tx_queues = 1;
1786}
1787
1788/**
1789 * ixgbevf_alloc_queues - Allocate memory for all rings
1790 * @adapter: board private structure to initialize
1791 *
1792 * We allocate one ring per queue at run-time since we don't know the
1793 * number of queues at compile-time.  The polling_netdev array is
1794 * intended for Multiqueue, but should work fine with a single queue.
1795 **/
1796static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
1797{
1798	int i;
1799
1800	adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1801				   sizeof(struct ixgbevf_ring), GFP_KERNEL);
1802	if (!adapter->tx_ring)
1803		goto err_tx_ring_allocation;
1804
1805	adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1806				   sizeof(struct ixgbevf_ring), GFP_KERNEL);
1807	if (!adapter->rx_ring)
1808		goto err_rx_ring_allocation;
1809
1810	for (i = 0; i < adapter->num_tx_queues; i++) {
1811		adapter->tx_ring[i].count = adapter->tx_ring_count;
1812		adapter->tx_ring[i].queue_index = i;
1813		/* reg_idx may be remapped later by DCB config */
1814		adapter->tx_ring[i].reg_idx = i;
1815		adapter->tx_ring[i].dev = &adapter->pdev->dev;
1816		adapter->tx_ring[i].netdev = adapter->netdev;
1817	}
1818
1819	for (i = 0; i < adapter->num_rx_queues; i++) {
1820		adapter->rx_ring[i].count = adapter->rx_ring_count;
1821		adapter->rx_ring[i].queue_index = i;
1822		adapter->rx_ring[i].reg_idx = i;
1823		adapter->rx_ring[i].dev = &adapter->pdev->dev;
1824		adapter->rx_ring[i].netdev = adapter->netdev;
1825	}
1826
1827	return 0;
1828
1829err_rx_ring_allocation:
1830	kfree(adapter->tx_ring);
1831err_tx_ring_allocation:
1832	return -ENOMEM;
1833}
1834
1835/**
1836 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
1837 * @adapter: board private structure to initialize
1838 *
1839 * Attempt to configure the interrupts using the best available
1840 * capabilities of the hardware and the kernel.
1841 **/
1842static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
1843{
1844	struct net_device *netdev = adapter->netdev;
1845	int err = 0;
1846	int vector, v_budget;
1847
1848	/*
1849	 * It's easy to be greedy for MSI-X vectors, but it really
1850	 * doesn't do us much good if we have a lot more vectors
1851	 * than CPU's.  So let's be conservative and only ask for
1852	 * (roughly) the same number of vectors as there are CPU's.
1853	 * The default is to use pairs of vectors.
1854	 */
1855	v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
1856	v_budget = min_t(int, v_budget, num_online_cpus());
1857	v_budget += NON_Q_VECTORS;
1858
1859	/* A failure in MSI-X entry allocation isn't fatal, but it does
1860	 * mean we disable MSI-X capabilities of the adapter. */
1861	adapter->msix_entries = kcalloc(v_budget,
1862					sizeof(struct msix_entry), GFP_KERNEL);
1863	if (!adapter->msix_entries) {
1864		err = -ENOMEM;
1865		goto out;
1866	}
1867
1868	for (vector = 0; vector < v_budget; vector++)
1869		adapter->msix_entries[vector].entry = vector;
1870
1871	ixgbevf_acquire_msix_vectors(adapter, v_budget);
1872
1873	err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
1874	if (err)
1875		goto out;
1876
1877	err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
1878
1879out:
1880	return err;
1881}
1882
1883/**
1884 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
1885 * @adapter: board private structure to initialize
1886 *
1887 * We allocate one q_vector per queue interrupt.  If allocation fails we
1888 * return -ENOMEM.
1889 **/
1890static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
1891{
1892	int q_idx, num_q_vectors;
1893	struct ixgbevf_q_vector *q_vector;
1894
1895	num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1896
1897	for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1898		q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
1899		if (!q_vector)
1900			goto err_out;
1901		q_vector->adapter = adapter;
1902		q_vector->v_idx = q_idx;
1903		netif_napi_add(adapter->netdev, &q_vector->napi,
1904			       ixgbevf_poll, 64);
1905		adapter->q_vector[q_idx] = q_vector;
1906	}
1907
1908	return 0;
1909
1910err_out:
1911	while (q_idx) {
1912		q_idx--;
1913		q_vector = adapter->q_vector[q_idx];
1914		netif_napi_del(&q_vector->napi);
1915		kfree(q_vector);
1916		adapter->q_vector[q_idx] = NULL;
1917	}
1918	return -ENOMEM;
1919}
1920
1921/**
1922 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
1923 * @adapter: board private structure to initialize
1924 *
1925 * This function frees the memory allocated to the q_vectors.  In addition if
1926 * NAPI is enabled it will delete any references to the NAPI struct prior
1927 * to freeing the q_vector.
1928 **/
1929static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
1930{
1931	int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1932
1933	for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1934		struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
1935
1936		adapter->q_vector[q_idx] = NULL;
1937		netif_napi_del(&q_vector->napi);
1938		kfree(q_vector);
1939	}
1940}
1941
1942/**
1943 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
1944 * @adapter: board private structure
1945 *
1946 **/
1947static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
1948{
1949	pci_disable_msix(adapter->pdev);
1950	kfree(adapter->msix_entries);
1951	adapter->msix_entries = NULL;
1952}
1953
1954/**
1955 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
1956 * @adapter: board private structure to initialize
1957 *
1958 **/
1959static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
1960{
1961	int err;
1962
1963	/* Number of supported queues */
1964	ixgbevf_set_num_queues(adapter);
1965
1966	err = ixgbevf_set_interrupt_capability(adapter);
1967	if (err) {
1968		hw_dbg(&adapter->hw,
1969		       "Unable to setup interrupt capabilities\n");
1970		goto err_set_interrupt;
1971	}
1972
1973	err = ixgbevf_alloc_q_vectors(adapter);
1974	if (err) {
1975		hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
1976		       "vectors\n");
1977		goto err_alloc_q_vectors;
1978	}
1979
1980	err = ixgbevf_alloc_queues(adapter);
1981	if (err) {
1982		pr_err("Unable to allocate memory for queues\n");
1983		goto err_alloc_queues;
1984	}
1985
1986	hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
1987	       "Tx Queue count = %u\n",
1988	       (adapter->num_rx_queues > 1) ? "Enabled" :
1989	       "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
1990
1991	set_bit(__IXGBEVF_DOWN, &adapter->state);
1992
1993	return 0;
1994err_alloc_queues:
1995	ixgbevf_free_q_vectors(adapter);
1996err_alloc_q_vectors:
1997	ixgbevf_reset_interrupt_capability(adapter);
1998err_set_interrupt:
1999	return err;
2000}
2001
2002/**
2003 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
2004 * @adapter: board private structure to clear interrupt scheme on
2005 *
2006 * We go through and clear interrupt specific resources and reset the structure
2007 * to pre-load conditions
2008 **/
2009static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
2010{
2011	adapter->num_tx_queues = 0;
2012	adapter->num_rx_queues = 0;
2013
2014	ixgbevf_free_q_vectors(adapter);
2015	ixgbevf_reset_interrupt_capability(adapter);
2016}
2017
2018/**
2019 * ixgbevf_sw_init - Initialize general software structures
2020 * (struct ixgbevf_adapter)
2021 * @adapter: board private structure to initialize
2022 *
2023 * ixgbevf_sw_init initializes the Adapter private data structure.
2024 * Fields are initialized based on PCI device information and
2025 * OS network device settings (MTU size).
2026 **/
2027static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
2028{
2029	struct ixgbe_hw *hw = &adapter->hw;
2030	struct pci_dev *pdev = adapter->pdev;
2031	int err;
2032
2033	/* PCI config space info */
2034
2035	hw->vendor_id = pdev->vendor;
2036	hw->device_id = pdev->device;
2037	hw->revision_id = pdev->revision;
2038	hw->subsystem_vendor_id = pdev->subsystem_vendor;
2039	hw->subsystem_device_id = pdev->subsystem_device;
2040
2041	hw->mbx.ops.init_params(hw);
2042
2043	/* assume legacy case in which PF would only give VF 2 queues */
2044	hw->mac.max_tx_queues = 2;
2045	hw->mac.max_rx_queues = 2;
2046
2047	err = hw->mac.ops.reset_hw(hw);
2048	if (err) {
2049		dev_info(&pdev->dev,
2050		         "PF still in reset state, assigning new address\n");
2051		eth_hw_addr_random(adapter->netdev);
2052		memcpy(adapter->hw.mac.addr, adapter->netdev->dev_addr,
2053			adapter->netdev->addr_len);
2054	} else {
2055		err = hw->mac.ops.init_hw(hw);
2056		if (err) {
2057			pr_err("init_shared_code failed: %d\n", err);
2058			goto out;
2059		}
2060		memcpy(adapter->netdev->dev_addr, adapter->hw.mac.addr,
2061			adapter->netdev->addr_len);
2062	}
2063
2064	/* lock to protect mailbox accesses */
2065	spin_lock_init(&adapter->mbx_lock);
2066
2067	/* Enable dynamic interrupt throttling rates */
2068	adapter->rx_itr_setting = 1;
2069	adapter->tx_itr_setting = 1;
2070
2071	/* set default ring sizes */
2072	adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
2073	adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
2074
2075	set_bit(__IXGBEVF_DOWN, &adapter->state);
2076	return 0;
2077
2078out:
2079	return err;
2080}
2081
2082#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter)	\
2083	{							\
2084		u32 current_counter = IXGBE_READ_REG(hw, reg);	\
2085		if (current_counter < last_counter)		\
2086			counter += 0x100000000LL;		\
2087		last_counter = current_counter;			\
2088		counter &= 0xFFFFFFFF00000000LL;		\
2089		counter |= current_counter;			\
2090	}
2091
2092#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2093	{								 \
2094		u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb);	 \
2095		u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb);	 \
2096		u64 current_counter = (current_counter_msb << 32) |      \
2097			current_counter_lsb;                             \
2098		if (current_counter < last_counter)			 \
2099			counter += 0x1000000000LL;			 \
2100		last_counter = current_counter;				 \
2101		counter &= 0xFFFFFFF000000000LL;			 \
2102		counter |= current_counter;				 \
2103	}
2104/**
2105 * ixgbevf_update_stats - Update the board statistics counters.
2106 * @adapter: board private structure
2107 **/
2108void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2109{
2110	struct ixgbe_hw *hw = &adapter->hw;
2111
2112	UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
2113				adapter->stats.vfgprc);
2114	UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
2115				adapter->stats.vfgptc);
2116	UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2117				adapter->stats.last_vfgorc,
2118				adapter->stats.vfgorc);
2119	UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2120				adapter->stats.last_vfgotc,
2121				adapter->stats.vfgotc);
2122	UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
2123				adapter->stats.vfmprc);
2124}
2125
2126/**
2127 * ixgbevf_watchdog - Timer Call-back
2128 * @data: pointer to adapter cast into an unsigned long
2129 **/
2130static void ixgbevf_watchdog(unsigned long data)
2131{
2132	struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
2133	struct ixgbe_hw *hw = &adapter->hw;
2134	u32 eics = 0;
2135	int i;
2136
2137	/*
2138	 * Do the watchdog outside of interrupt context due to the lovely
2139	 * delays that some of the newer hardware requires
2140	 */
2141
2142	if (test_bit(__IXGBEVF_DOWN, &adapter->state))
2143		goto watchdog_short_circuit;
2144
2145	/* get one bit for every active tx/rx interrupt vector */
2146	for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2147		struct ixgbevf_q_vector *qv = adapter->q_vector[i];
2148		if (qv->rx.ring || qv->tx.ring)
2149			eics |= 1 << i;
2150	}
2151
2152	IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
2153
2154watchdog_short_circuit:
2155	schedule_work(&adapter->watchdog_task);
2156}
2157
2158/**
2159 * ixgbevf_tx_timeout - Respond to a Tx Hang
2160 * @netdev: network interface device structure
2161 **/
2162static void ixgbevf_tx_timeout(struct net_device *netdev)
2163{
2164	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2165
2166	/* Do the reset outside of interrupt context */
2167	schedule_work(&adapter->reset_task);
2168}
2169
2170static void ixgbevf_reset_task(struct work_struct *work)
2171{
2172	struct ixgbevf_adapter *adapter;
2173	adapter = container_of(work, struct ixgbevf_adapter, reset_task);
2174
2175	/* If we're already down or resetting, just bail */
2176	if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2177	    test_bit(__IXGBEVF_RESETTING, &adapter->state))
2178		return;
2179
2180	adapter->tx_timeout_count++;
2181
2182	ixgbevf_reinit_locked(adapter);
2183}
2184
2185/**
2186 * ixgbevf_watchdog_task - worker thread to bring link up
2187 * @work: pointer to work_struct containing our data
2188 **/
2189static void ixgbevf_watchdog_task(struct work_struct *work)
2190{
2191	struct ixgbevf_adapter *adapter = container_of(work,
2192						       struct ixgbevf_adapter,
2193						       watchdog_task);
2194	struct net_device *netdev = adapter->netdev;
2195	struct ixgbe_hw *hw = &adapter->hw;
2196	u32 link_speed = adapter->link_speed;
2197	bool link_up = adapter->link_up;
2198
2199	adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
2200
2201	/*
2202	 * Always check the link on the watchdog because we have
2203	 * no LSC interrupt
2204	 */
2205	if (hw->mac.ops.check_link) {
2206		s32 need_reset;
2207
2208		spin_lock_bh(&adapter->mbx_lock);
2209
2210		need_reset = hw->mac.ops.check_link(hw, &link_speed,
2211						    &link_up, false);
2212
2213		spin_unlock_bh(&adapter->mbx_lock);
2214
2215		if (need_reset) {
2216			adapter->link_up = link_up;
2217			adapter->link_speed = link_speed;
2218			netif_carrier_off(netdev);
2219			netif_tx_stop_all_queues(netdev);
2220			schedule_work(&adapter->reset_task);
2221			goto pf_has_reset;
2222		}
2223	} else {
2224		/* always assume link is up, if no check link
2225		 * function */
2226		link_speed = IXGBE_LINK_SPEED_10GB_FULL;
2227		link_up = true;
2228	}
2229	adapter->link_up = link_up;
2230	adapter->link_speed = link_speed;
2231
2232	if (link_up) {
2233		if (!netif_carrier_ok(netdev)) {
2234			hw_dbg(&adapter->hw, "NIC Link is Up, %u Gbps\n",
2235			       (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
2236			       10 : 1);
2237			netif_carrier_on(netdev);
2238			netif_tx_wake_all_queues(netdev);
2239		}
2240	} else {
2241		adapter->link_up = false;
2242		adapter->link_speed = 0;
2243		if (netif_carrier_ok(netdev)) {
2244			hw_dbg(&adapter->hw, "NIC Link is Down\n");
2245			netif_carrier_off(netdev);
2246			netif_tx_stop_all_queues(netdev);
2247		}
2248	}
2249
2250	ixgbevf_update_stats(adapter);
2251
2252pf_has_reset:
2253	/* Reset the timer */
2254	if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
2255		mod_timer(&adapter->watchdog_timer,
2256			  round_jiffies(jiffies + (2 * HZ)));
2257
2258	adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
2259}
2260
2261/**
2262 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2263 * @adapter: board private structure
2264 * @tx_ring: Tx descriptor ring for a specific queue
2265 *
2266 * Free all transmit software resources
2267 **/
2268void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
2269			       struct ixgbevf_ring *tx_ring)
2270{
2271	struct pci_dev *pdev = adapter->pdev;
2272
2273	ixgbevf_clean_tx_ring(adapter, tx_ring);
2274
2275	vfree(tx_ring->tx_buffer_info);
2276	tx_ring->tx_buffer_info = NULL;
2277
2278	dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
2279			  tx_ring->dma);
2280
2281	tx_ring->desc = NULL;
2282}
2283
2284/**
2285 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2286 * @adapter: board private structure
2287 *
2288 * Free all transmit software resources
2289 **/
2290static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2291{
2292	int i;
2293
2294	for (i = 0; i < adapter->num_tx_queues; i++)
2295		if (adapter->tx_ring[i].desc)
2296			ixgbevf_free_tx_resources(adapter,
2297						  &adapter->tx_ring[i]);
2298
2299}
2300
2301/**
2302 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2303 * @adapter: board private structure
2304 * @tx_ring:    tx descriptor ring (for a specific queue) to setup
2305 *
2306 * Return 0 on success, negative on failure
2307 **/
2308int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
2309			       struct ixgbevf_ring *tx_ring)
2310{
2311	struct pci_dev *pdev = adapter->pdev;
2312	int size;
2313
2314	size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2315	tx_ring->tx_buffer_info = vzalloc(size);
2316	if (!tx_ring->tx_buffer_info)
2317		goto err;
2318
2319	/* round up to nearest 4K */
2320	tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2321	tx_ring->size = ALIGN(tx_ring->size, 4096);
2322
2323	tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
2324					   &tx_ring->dma, GFP_KERNEL);
2325	if (!tx_ring->desc)
2326		goto err;
2327
2328	tx_ring->next_to_use = 0;
2329	tx_ring->next_to_clean = 0;
2330	return 0;
2331
2332err:
2333	vfree(tx_ring->tx_buffer_info);
2334	tx_ring->tx_buffer_info = NULL;
2335	hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
2336	       "descriptor ring\n");
2337	return -ENOMEM;
2338}
2339
2340/**
2341 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2342 * @adapter: board private structure
2343 *
2344 * If this function returns with an error, then it's possible one or
2345 * more of the rings is populated (while the rest are not).  It is the
2346 * callers duty to clean those orphaned rings.
2347 *
2348 * Return 0 on success, negative on failure
2349 **/
2350static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
2351{
2352	int i, err = 0;
2353
2354	for (i = 0; i < adapter->num_tx_queues; i++) {
2355		err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]);
2356		if (!err)
2357			continue;
2358		hw_dbg(&adapter->hw,
2359		       "Allocation for Tx Queue %u failed\n", i);
2360		break;
2361	}
2362
2363	return err;
2364}
2365
2366/**
2367 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
2368 * @adapter: board private structure
2369 * @rx_ring:    rx descriptor ring (for a specific queue) to setup
2370 *
2371 * Returns 0 on success, negative on failure
2372 **/
2373int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
2374			       struct ixgbevf_ring *rx_ring)
2375{
2376	struct pci_dev *pdev = adapter->pdev;
2377	int size;
2378
2379	size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
2380	rx_ring->rx_buffer_info = vzalloc(size);
2381	if (!rx_ring->rx_buffer_info)
2382		goto alloc_failed;
2383
2384	/* Round up to nearest 4K */
2385	rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2386	rx_ring->size = ALIGN(rx_ring->size, 4096);
2387
2388	rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
2389					   &rx_ring->dma, GFP_KERNEL);
2390
2391	if (!rx_ring->desc) {
2392		hw_dbg(&adapter->hw,
2393		       "Unable to allocate memory for "
2394		       "the receive descriptor ring\n");
2395		vfree(rx_ring->rx_buffer_info);
2396		rx_ring->rx_buffer_info = NULL;
2397		goto alloc_failed;
2398	}
2399
2400	rx_ring->next_to_clean = 0;
2401	rx_ring->next_to_use = 0;
2402
2403	return 0;
2404alloc_failed:
2405	return -ENOMEM;
2406}
2407
2408/**
2409 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
2410 * @adapter: board private structure
2411 *
2412 * If this function returns with an error, then it's possible one or
2413 * more of the rings is populated (while the rest are not).  It is the
2414 * callers duty to clean those orphaned rings.
2415 *
2416 * Return 0 on success, negative on failure
2417 **/
2418static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
2419{
2420	int i, err = 0;
2421
2422	for (i = 0; i < adapter->num_rx_queues; i++) {
2423		err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]);
2424		if (!err)
2425			continue;
2426		hw_dbg(&adapter->hw,
2427		       "Allocation for Rx Queue %u failed\n", i);
2428		break;
2429	}
2430	return err;
2431}
2432
2433/**
2434 * ixgbevf_free_rx_resources - Free Rx Resources
2435 * @adapter: board private structure
2436 * @rx_ring: ring to clean the resources from
2437 *
2438 * Free all receive software resources
2439 **/
2440void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
2441			       struct ixgbevf_ring *rx_ring)
2442{
2443	struct pci_dev *pdev = adapter->pdev;
2444
2445	ixgbevf_clean_rx_ring(adapter, rx_ring);
2446
2447	vfree(rx_ring->rx_buffer_info);
2448	rx_ring->rx_buffer_info = NULL;
2449
2450	dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2451			  rx_ring->dma);
2452
2453	rx_ring->desc = NULL;
2454}
2455
2456/**
2457 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
2458 * @adapter: board private structure
2459 *
2460 * Free all receive software resources
2461 **/
2462static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
2463{
2464	int i;
2465
2466	for (i = 0; i < adapter->num_rx_queues; i++)
2467		if (adapter->rx_ring[i].desc)
2468			ixgbevf_free_rx_resources(adapter,
2469						  &adapter->rx_ring[i]);
2470}
2471
2472static int ixgbevf_setup_queues(struct ixgbevf_adapter *adapter)
2473{
2474	struct ixgbe_hw *hw = &adapter->hw;
2475	struct ixgbevf_ring *rx_ring;
2476	unsigned int def_q = 0;
2477	unsigned int num_tcs = 0;
2478	unsigned int num_rx_queues = 1;
2479	int err, i;
2480
2481	spin_lock_bh(&adapter->mbx_lock);
2482
2483	/* fetch queue configuration from the PF */
2484	err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2485
2486	spin_unlock_bh(&adapter->mbx_lock);
2487
2488	if (err)
2489		return err;
2490
2491	if (num_tcs > 1) {
2492		/* update default Tx ring register index */
2493		adapter->tx_ring[0].reg_idx = def_q;
2494
2495		/* we need as many queues as traffic classes */
2496		num_rx_queues = num_tcs;
2497	}
2498
2499	/* nothing to do if we have the correct number of queues */
2500	if (adapter->num_rx_queues == num_rx_queues)
2501		return 0;
2502
2503	/* allocate new rings */
2504	rx_ring = kcalloc(num_rx_queues,
2505			  sizeof(struct ixgbevf_ring), GFP_KERNEL);
2506	if (!rx_ring)
2507		return -ENOMEM;
2508
2509	/* setup ring fields */
2510	for (i = 0; i < num_rx_queues; i++) {
2511		rx_ring[i].count = adapter->rx_ring_count;
2512		rx_ring[i].queue_index = i;
2513		rx_ring[i].reg_idx = i;
2514		rx_ring[i].dev = &adapter->pdev->dev;
2515		rx_ring[i].netdev = adapter->netdev;
2516	}
2517
2518	/* free the existing ring and queues */
2519	adapter->num_rx_queues = 0;
2520	kfree(adapter->rx_ring);
2521
2522	/* move new rings into position on the adapter struct */
2523	adapter->rx_ring = rx_ring;
2524	adapter->num_rx_queues = num_rx_queues;
2525
2526	return 0;
2527}
2528
2529/**
2530 * ixgbevf_open - Called when a network interface is made active
2531 * @netdev: network interface device structure
2532 *
2533 * Returns 0 on success, negative value on failure
2534 *
2535 * The open entry point is called when a network interface is made
2536 * active by the system (IFF_UP).  At this point all resources needed
2537 * for transmit and receive operations are allocated, the interrupt
2538 * handler is registered with the OS, the watchdog timer is started,
2539 * and the stack is notified that the interface is ready.
2540 **/
2541static int ixgbevf_open(struct net_device *netdev)
2542{
2543	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2544	struct ixgbe_hw *hw = &adapter->hw;
2545	int err;
2546
2547	/* disallow open during test */
2548	if (test_bit(__IXGBEVF_TESTING, &adapter->state))
2549		return -EBUSY;
2550
2551	if (hw->adapter_stopped) {
2552		ixgbevf_reset(adapter);
2553		/* if adapter is still stopped then PF isn't up and
2554		 * the vf can't start. */
2555		if (hw->adapter_stopped) {
2556			err = IXGBE_ERR_MBX;
2557			pr_err("Unable to start - perhaps the PF Driver isn't "
2558			       "up yet\n");
2559			goto err_setup_reset;
2560		}
2561	}
2562
2563	ixgbevf_negotiate_api(adapter);
2564
2565	/* setup queue reg_idx and Rx queue count */
2566	err = ixgbevf_setup_queues(adapter);
2567	if (err)
2568		goto err_setup_queues;
2569
2570	/* allocate transmit descriptors */
2571	err = ixgbevf_setup_all_tx_resources(adapter);
2572	if (err)
2573		goto err_setup_tx;
2574
2575	/* allocate receive descriptors */
2576	err = ixgbevf_setup_all_rx_resources(adapter);
2577	if (err)
2578		goto err_setup_rx;
2579
2580	ixgbevf_configure(adapter);
2581
2582	/*
2583	 * Map the Tx/Rx rings to the vectors we were allotted.
2584	 * if request_irq will be called in this function map_rings
2585	 * must be called *before* up_complete
2586	 */
2587	ixgbevf_map_rings_to_vectors(adapter);
2588
2589	ixgbevf_up_complete(adapter);
2590
2591	/* clear any pending interrupts, may auto mask */
2592	IXGBE_READ_REG(hw, IXGBE_VTEICR);
2593	err = ixgbevf_request_irq(adapter);
2594	if (err)
2595		goto err_req_irq;
2596
2597	ixgbevf_irq_enable(adapter);
2598
2599	return 0;
2600
2601err_req_irq:
2602	ixgbevf_down(adapter);
2603	ixgbevf_free_irq(adapter);
2604err_setup_rx:
2605	ixgbevf_free_all_rx_resources(adapter);
2606err_setup_tx:
2607	ixgbevf_free_all_tx_resources(adapter);
2608err_setup_queues:
2609	ixgbevf_reset(adapter);
2610
2611err_setup_reset:
2612
2613	return err;
2614}
2615
2616/**
2617 * ixgbevf_close - Disables a network interface
2618 * @netdev: network interface device structure
2619 *
2620 * Returns 0, this is not allowed to fail
2621 *
2622 * The close entry point is called when an interface is de-activated
2623 * by the OS.  The hardware is still under the drivers control, but
2624 * needs to be disabled.  A global MAC reset is issued to stop the
2625 * hardware, and all transmit and receive resources are freed.
2626 **/
2627static int ixgbevf_close(struct net_device *netdev)
2628{
2629	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2630
2631	ixgbevf_down(adapter);
2632	ixgbevf_free_irq(adapter);
2633
2634	ixgbevf_free_all_tx_resources(adapter);
2635	ixgbevf_free_all_rx_resources(adapter);
2636
2637	return 0;
2638}
2639
2640static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
2641				u32 vlan_macip_lens, u32 type_tucmd,
2642				u32 mss_l4len_idx)
2643{
2644	struct ixgbe_adv_tx_context_desc *context_desc;
2645	u16 i = tx_ring->next_to_use;
2646
2647	context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
2648
2649	i++;
2650	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2651
2652	/* set bits to identify this as an advanced context descriptor */
2653	type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2654
2655	context_desc->vlan_macip_lens	= cpu_to_le32(vlan_macip_lens);
2656	context_desc->seqnum_seed	= 0;
2657	context_desc->type_tucmd_mlhl	= cpu_to_le32(type_tucmd);
2658	context_desc->mss_l4len_idx	= cpu_to_le32(mss_l4len_idx);
2659}
2660
2661static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
2662		       struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
2663{
2664	u32 vlan_macip_lens, type_tucmd;
2665	u32 mss_l4len_idx, l4len;
2666
2667	if (!skb_is_gso(skb))
2668		return 0;
2669
2670	if (skb_header_cloned(skb)) {
2671		int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2672		if (err)
2673			return err;
2674	}
2675
2676	/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2677	type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
2678
2679	if (skb->protocol == htons(ETH_P_IP)) {
2680		struct iphdr *iph = ip_hdr(skb);
2681		iph->tot_len = 0;
2682		iph->check = 0;
2683		tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2684							 iph->daddr, 0,
2685							 IPPROTO_TCP,
2686							 0);
2687		type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2688	} else if (skb_is_gso_v6(skb)) {
2689		ipv6_hdr(skb)->payload_len = 0;
2690		tcp_hdr(skb)->check =
2691		    ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2692				     &ipv6_hdr(skb)->daddr,
2693				     0, IPPROTO_TCP, 0);
2694	}
2695
2696	/* compute header lengths */
2697	l4len = tcp_hdrlen(skb);
2698	*hdr_len += l4len;
2699	*hdr_len = skb_transport_offset(skb) + l4len;
2700
2701	/* mss_l4len_id: use 1 as index for TSO */
2702	mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
2703	mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
2704	mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
2705
2706	/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
2707	vlan_macip_lens = skb_network_header_len(skb);
2708	vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2709	vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2710
2711	ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2712			    type_tucmd, mss_l4len_idx);
2713
2714	return 1;
2715}
2716
2717static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
2718			    struct sk_buff *skb, u32 tx_flags)
2719{
2720
2721
2722
2723	u32 vlan_macip_lens = 0;
2724	u32 mss_l4len_idx = 0;
2725	u32 type_tucmd = 0;
2726
2727	if (skb->ip_summed == CHECKSUM_PARTIAL) {
2728		u8 l4_hdr = 0;
2729		switch (skb->protocol) {
2730		case __constant_htons(ETH_P_IP):
2731			vlan_macip_lens |= skb_network_header_len(skb);
2732			type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2733			l4_hdr = ip_hdr(skb)->protocol;
2734			break;
2735		case __constant_htons(ETH_P_IPV6):
2736			vlan_macip_lens |= skb_network_header_len(skb);
2737			l4_hdr = ipv6_hdr(skb)->nexthdr;
2738			break;
2739		default:
2740			if (unlikely(net_ratelimit())) {
2741				dev_warn(tx_ring->dev,
2742				 "partial checksum but proto=%x!\n",
2743				 skb->protocol);
2744			}
2745			break;
2746		}
2747
2748		switch (l4_hdr) {
2749		case IPPROTO_TCP:
2750			type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2751			mss_l4len_idx = tcp_hdrlen(skb) <<
2752					IXGBE_ADVTXD_L4LEN_SHIFT;
2753			break;
2754		case IPPROTO_SCTP:
2755			type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
2756			mss_l4len_idx = sizeof(struct sctphdr) <<
2757					IXGBE_ADVTXD_L4LEN_SHIFT;
2758			break;
2759		case IPPROTO_UDP:
2760			mss_l4len_idx = sizeof(struct udphdr) <<
2761					IXGBE_ADVTXD_L4LEN_SHIFT;
2762			break;
2763		default:
2764			if (unlikely(net_ratelimit())) {
2765				dev_warn(tx_ring->dev,
2766				 "partial checksum but l4 proto=%x!\n",
2767				 l4_hdr);
2768			}
2769			break;
2770		}
2771	}
2772
2773	/* vlan_macip_lens: MACLEN, VLAN tag */
2774	vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2775	vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2776
2777	ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2778			    type_tucmd, mss_l4len_idx);
2779
2780	return (skb->ip_summed == CHECKSUM_PARTIAL);
2781}
2782
2783static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
2784			  struct sk_buff *skb, u32 tx_flags,
2785			  unsigned int first)
2786{
2787	struct ixgbevf_tx_buffer *tx_buffer_info;
2788	unsigned int len;
2789	unsigned int total = skb->len;
2790	unsigned int offset = 0, size;
2791	int count = 0;
2792	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2793	unsigned int f;
2794	int i;
2795
2796	i = tx_ring->next_to_use;
2797
2798	len = min(skb_headlen(skb), total);
2799	while (len) {
2800		tx_buffer_info = &tx_ring->tx_buffer_info[i];
2801		size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2802
2803		tx_buffer_info->length = size;
2804		tx_buffer_info->mapped_as_page = false;
2805		tx_buffer_info->dma = dma_map_single(tx_ring->dev,
2806						     skb->data + offset,
2807						     size, DMA_TO_DEVICE);
2808		if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma))
2809			goto dma_error;
2810		tx_buffer_info->next_to_watch = i;
2811
2812		len -= size;
2813		total -= size;
2814		offset += size;
2815		count++;
2816		i++;
2817		if (i == tx_ring->count)
2818			i = 0;
2819	}
2820
2821	for (f = 0; f < nr_frags; f++) {
2822		const struct skb_frag_struct *frag;
2823
2824		frag = &skb_shinfo(skb)->frags[f];
2825		len = min((unsigned int)skb_frag_size(frag), total);
2826		offset = 0;
2827
2828		while (len) {
2829			tx_buffer_info = &tx_ring->tx_buffer_info[i];
2830			size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2831
2832			tx_buffer_info->length = size;
2833			tx_buffer_info->dma =
2834				skb_frag_dma_map(tx_ring->dev, frag,
2835						 offset, size, DMA_TO_DEVICE);
2836			if (dma_mapping_error(tx_ring->dev,
2837					      tx_buffer_info->dma))
2838				goto dma_error;
2839			tx_buffer_info->mapped_as_page = true;
2840			tx_buffer_info->next_to_watch = i;
2841
2842			len -= size;
2843			total -= size;
2844			offset += size;
2845			count++;
2846			i++;
2847			if (i == tx_ring->count)
2848				i = 0;
2849		}
2850		if (total == 0)
2851			break;
2852	}
2853
2854	if (i == 0)
2855		i = tx_ring->count - 1;
2856	else
2857		i = i - 1;
2858	tx_ring->tx_buffer_info[i].skb = skb;
2859	tx_ring->tx_buffer_info[first].next_to_watch = i;
2860	tx_ring->tx_buffer_info[first].time_stamp = jiffies;
2861
2862	return count;
2863
2864dma_error:
2865	dev_err(tx_ring->dev, "TX DMA map failed\n");
2866
2867	/* clear timestamp and dma mappings for failed tx_buffer_info map */
2868	tx_buffer_info->dma = 0;
2869	tx_buffer_info->next_to_watch = 0;
2870	count--;
2871
2872	/* clear timestamp and dma mappings for remaining portion of packet */
2873	while (count >= 0) {
2874		count--;
2875		i--;
2876		if (i < 0)
2877			i += tx_ring->count;
2878		tx_buffer_info = &tx_ring->tx_buffer_info[i];
2879		ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
2880	}
2881
2882	return count;
2883}
2884
2885static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
2886			     int count, u32 paylen, u8 hdr_len)
2887{
2888	union ixgbe_adv_tx_desc *tx_desc = NULL;
2889	struct ixgbevf_tx_buffer *tx_buffer_info;
2890	u32 olinfo_status = 0, cmd_type_len = 0;
2891	unsigned int i;
2892
2893	u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
2894
2895	cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
2896
2897	cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
2898
2899	if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2900		cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
2901
2902	if (tx_flags & IXGBE_TX_FLAGS_CSUM)
2903		olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM;
2904
2905	if (tx_flags & IXGBE_TX_FLAGS_TSO) {
2906		cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
2907
2908		/* use index 1 context for tso */
2909		olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
2910		if (tx_flags & IXGBE_TX_FLAGS_IPV4)
2911			olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM;
2912
2913	}
2914
2915	/*
2916	 * Check Context must be set if Tx switch is enabled, which it
2917	 * always is for case where virtual functions are running
2918	 */
2919	olinfo_status |= IXGBE_ADVTXD_CC;
2920
2921	olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
2922
2923	i = tx_ring->next_to_use;
2924	while (count--) {
2925		tx_buffer_info = &tx_ring->tx_buffer_info[i];
2926		tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
2927		tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
2928		tx_desc->read.cmd_type_len =
2929			cpu_to_le32(cmd_type_len | tx_buffer_info->length);
2930		tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2931		i++;
2932		if (i == tx_ring->count)
2933			i = 0;
2934	}
2935
2936	tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
2937
2938	tx_ring->next_to_use = i;
2939}
2940
2941static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
2942{
2943	struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
2944
2945	netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2946	/* Herbert's original patch had:
2947	 *  smp_mb__after_netif_stop_queue();
2948	 * but since that doesn't exist yet, just open code it. */
2949	smp_mb();
2950
2951	/* We need to check again in a case another CPU has just
2952	 * made room available. */
2953	if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
2954		return -EBUSY;
2955
2956	/* A reprieve! - use start_queue because it doesn't call schedule */
2957	netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2958	++adapter->restart_queue;
2959	return 0;
2960}
2961
2962static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
2963{
2964	if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
2965		return 0;
2966	return __ixgbevf_maybe_stop_tx(tx_ring, size);
2967}
2968
2969static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2970{
2971	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2972	struct ixgbevf_ring *tx_ring;
2973	unsigned int first;
2974	unsigned int tx_flags = 0;
2975	u8 hdr_len = 0;
2976	int r_idx = 0, tso;
2977	u16 count = TXD_USE_COUNT(skb_headlen(skb));
2978#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
2979	unsigned short f;
2980#endif
2981	u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
2982	if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
2983		dev_kfree_skb(skb);
2984		return NETDEV_TX_OK;
2985	}
2986
2987	tx_ring = &adapter->tx_ring[r_idx];
2988
2989	/*
2990	 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
2991	 *       + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
2992	 *       + 2 desc gap to keep tail from touching head,
2993	 *       + 1 desc for context descriptor,
2994	 * otherwise try next time
2995	 */
2996#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
2997	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
2998		count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
2999#else
3000	count += skb_shinfo(skb)->nr_frags;
3001#endif
3002	if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
3003		adapter->tx_busy++;
3004		return NETDEV_TX_BUSY;
3005	}
3006
3007	if (vlan_tx_tag_present(skb)) {
3008		tx_flags |= vlan_tx_tag_get(skb);
3009		tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3010		tx_flags |= IXGBE_TX_FLAGS_VLAN;
3011	}
3012
3013	first = tx_ring->next_to_use;
3014
3015	if (skb->protocol == htons(ETH_P_IP))
3016		tx_flags |= IXGBE_TX_FLAGS_IPV4;
3017	tso = ixgbevf_tso(tx_ring, skb, tx_flags, &hdr_len);
3018	if (tso < 0) {
3019		dev_kfree_skb_any(skb);
3020		return NETDEV_TX_OK;
3021	}
3022
3023	if (tso)
3024		tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM;
3025	else if (ixgbevf_tx_csum(tx_ring, skb, tx_flags))
3026		tx_flags |= IXGBE_TX_FLAGS_CSUM;
3027
3028	ixgbevf_tx_queue(tx_ring, tx_flags,
3029			 ixgbevf_tx_map(tx_ring, skb, tx_flags, first),
3030			 skb->len, hdr_len);
3031	/*
3032	 * Force memory writes to complete before letting h/w
3033	 * know there are new descriptors to fetch.  (Only
3034	 * applicable for weak-ordered memory model archs,
3035	 * such as IA-64).
3036	 */
3037	wmb();
3038
3039	writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail);
3040
3041	ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
3042
3043	return NETDEV_TX_OK;
3044}
3045
3046/**
3047 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3048 * @netdev: network interface device structure
3049 * @p: pointer to an address structure
3050 *
3051 * Returns 0 on success, negative on failure
3052 **/
3053static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3054{
3055	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3056	struct ixgbe_hw *hw = &adapter->hw;
3057	struct sockaddr *addr = p;
3058
3059	if (!is_valid_ether_addr(addr->sa_data))
3060		return -EADDRNOTAVAIL;
3061
3062	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3063	memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
3064
3065	spin_lock_bh(&adapter->mbx_lock);
3066
3067	if (hw->mac.ops.set_rar)
3068		hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
3069
3070	spin_unlock_bh(&adapter->mbx_lock);
3071
3072	return 0;
3073}
3074
3075/**
3076 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3077 * @netdev: network interface device structure
3078 * @new_mtu: new value for maximum frame size
3079 *
3080 * Returns 0 on success, negative on failure
3081 **/
3082static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3083{
3084	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3085	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3086	int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
3087
3088	switch (adapter->hw.api_version) {
3089	case ixgbe_mbox_api_11:
3090		max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3091		break;
3092	default:
3093		if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
3094			max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3095		break;
3096	}
3097
3098	/* MTU < 68 is an error and causes problems on some kernels */
3099	if ((new_mtu < 68) || (max_frame > max_possible_frame))
3100		return -EINVAL;
3101
3102	hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
3103	       netdev->mtu, new_mtu);
3104	/* must set new MTU before calling down or up */
3105	netdev->mtu = new_mtu;
3106
3107	if (netif_running(netdev))
3108		ixgbevf_reinit_locked(adapter);
3109
3110	return 0;
3111}
3112
3113static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
3114{
3115	struct net_device *netdev = pci_get_drvdata(pdev);
3116	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3117#ifdef CONFIG_PM
3118	int retval = 0;
3119#endif
3120
3121	netif_device_detach(netdev);
3122
3123	if (netif_running(netdev)) {
3124		rtnl_lock();
3125		ixgbevf_down(adapter);
3126		ixgbevf_free_irq(adapter);
3127		ixgbevf_free_all_tx_resources(adapter);
3128		ixgbevf_free_all_rx_resources(adapter);
3129		rtnl_unlock();
3130	}
3131
3132	ixgbevf_clear_interrupt_scheme(adapter);
3133
3134#ifdef CONFIG_PM
3135	retval = pci_save_state(pdev);
3136	if (retval)
3137		return retval;
3138
3139#endif
3140	pci_disable_device(pdev);
3141
3142	return 0;
3143}
3144
3145#ifdef CONFIG_PM
3146static int ixgbevf_resume(struct pci_dev *pdev)
3147{
3148	struct ixgbevf_adapter *adapter = pci_get_drvdata(pdev);
3149	struct net_device *netdev = adapter->netdev;
3150	u32 err;
3151
3152	pci_set_power_state(pdev, PCI_D0);
3153	pci_restore_state(pdev);
3154	/*
3155	 * pci_restore_state clears dev->state_saved so call
3156	 * pci_save_state to restore it.
3157	 */
3158	pci_save_state(pdev);
3159
3160	err = pci_enable_device_mem(pdev);
3161	if (err) {
3162		dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
3163		return err;
3164	}
3165	pci_set_master(pdev);
3166
3167	rtnl_lock();
3168	err = ixgbevf_init_interrupt_scheme(adapter);
3169	rtnl_unlock();
3170	if (err) {
3171		dev_err(&pdev->dev, "Cannot initialize interrupts\n");
3172		return err;
3173	}
3174
3175	ixgbevf_reset(adapter);
3176
3177	if (netif_running(netdev)) {
3178		err = ixgbevf_open(netdev);
3179		if (err)
3180			return err;
3181	}
3182
3183	netif_device_attach(netdev);
3184
3185	return err;
3186}
3187
3188#endif /* CONFIG_PM */
3189static void ixgbevf_shutdown(struct pci_dev *pdev)
3190{
3191	ixgbevf_suspend(pdev, PMSG_SUSPEND);
3192}
3193
3194static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3195						struct rtnl_link_stats64 *stats)
3196{
3197	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3198	unsigned int start;
3199	u64 bytes, packets;
3200	const struct ixgbevf_ring *ring;
3201	int i;
3202
3203	ixgbevf_update_stats(adapter);
3204
3205	stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3206
3207	for (i = 0; i < adapter->num_rx_queues; i++) {
3208		ring = &adapter->rx_ring[i];
3209		do {
3210			start = u64_stats_fetch_begin_bh(&ring->syncp);
3211			bytes = ring->total_bytes;
3212			packets = ring->total_packets;
3213		} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
3214		stats->rx_bytes += bytes;
3215		stats->rx_packets += packets;
3216	}
3217
3218	for (i = 0; i < adapter->num_tx_queues; i++) {
3219		ring = &adapter->tx_ring[i];
3220		do {
3221			start = u64_stats_fetch_begin_bh(&ring->syncp);
3222			bytes = ring->total_bytes;
3223			packets = ring->total_packets;
3224		} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
3225		stats->tx_bytes += bytes;
3226		stats->tx_packets += packets;
3227	}
3228
3229	return stats;
3230}
3231
3232static const struct net_device_ops ixgbevf_netdev_ops = {
3233	.ndo_open		= ixgbevf_open,
3234	.ndo_stop		= ixgbevf_close,
3235	.ndo_start_xmit		= ixgbevf_xmit_frame,
3236	.ndo_set_rx_mode	= ixgbevf_set_rx_mode,
3237	.ndo_get_stats64	= ixgbevf_get_stats,
3238	.ndo_validate_addr	= eth_validate_addr,
3239	.ndo_set_mac_address	= ixgbevf_set_mac,
3240	.ndo_change_mtu		= ixgbevf_change_mtu,
3241	.ndo_tx_timeout		= ixgbevf_tx_timeout,
3242	.ndo_vlan_rx_add_vid	= ixgbevf_vlan_rx_add_vid,
3243	.ndo_vlan_rx_kill_vid	= ixgbevf_vlan_rx_kill_vid,
3244};
3245
3246static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3247{
3248	dev->netdev_ops = &ixgbevf_netdev_ops;
3249	ixgbevf_set_ethtool_ops(dev);
3250	dev->watchdog_timeo = 5 * HZ;
3251}
3252
3253/**
3254 * ixgbevf_probe - Device Initialization Routine
3255 * @pdev: PCI device information struct
3256 * @ent: entry in ixgbevf_pci_tbl
3257 *
3258 * Returns 0 on success, negative on failure
3259 *
3260 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3261 * The OS initialization, configuring of the adapter private structure,
3262 * and a hardware reset occur.
3263 **/
3264static int __devinit ixgbevf_probe(struct pci_dev *pdev,
3265				   const struct pci_device_id *ent)
3266{
3267	struct net_device *netdev;
3268	struct ixgbevf_adapter *adapter = NULL;
3269	struct ixgbe_hw *hw = NULL;
3270	const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
3271	static int cards_found;
3272	int err, pci_using_dac;
3273
3274	err = pci_enable_device(pdev);
3275	if (err)
3276		return err;
3277
3278	if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3279	    !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
3280		pci_using_dac = 1;
3281	} else {
3282		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3283		if (err) {
3284			err = dma_set_coherent_mask(&pdev->dev,
3285						    DMA_BIT_MASK(32));
3286			if (err) {
3287				dev_err(&pdev->dev, "No usable DMA "
3288					"configuration, aborting\n");
3289				goto err_dma;
3290			}
3291		}
3292		pci_using_dac = 0;
3293	}
3294
3295	err = pci_request_regions(pdev, ixgbevf_driver_name);
3296	if (err) {
3297		dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3298		goto err_pci_reg;
3299	}
3300
3301	pci_set_master(pdev);
3302
3303	netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
3304				   MAX_TX_QUEUES);
3305	if (!netdev) {
3306		err = -ENOMEM;
3307		goto err_alloc_etherdev;
3308	}
3309
3310	SET_NETDEV_DEV(netdev, &pdev->dev);
3311
3312	pci_set_drvdata(pdev, netdev);
3313	adapter = netdev_priv(netdev);
3314
3315	adapter->netdev = netdev;
3316	adapter->pdev = pdev;
3317	hw = &adapter->hw;
3318	hw->back = adapter;
3319	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3320
3321	/*
3322	 * call save state here in standalone driver because it relies on
3323	 * adapter struct to exist, and needs to call netdev_priv
3324	 */
3325	pci_save_state(pdev);
3326
3327	hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3328			      pci_resource_len(pdev, 0));
3329	if (!hw->hw_addr) {
3330		err = -EIO;
3331		goto err_ioremap;
3332	}
3333
3334	ixgbevf_assign_netdev_ops(netdev);
3335
3336	adapter->bd_number = cards_found;
3337
3338	/* Setup hw api */
3339	memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3340	hw->mac.type  = ii->mac;
3341
3342	memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
3343	       sizeof(struct ixgbe_mbx_operations));
3344
3345	/* setup the private structure */
3346	err = ixgbevf_sw_init(adapter);
3347	if (err)
3348		goto err_sw_init;
3349
3350	/* The HW MAC address was set and/or determined in sw_init */
3351	memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
3352
3353	if (!is_valid_ether_addr(netdev->dev_addr)) {
3354		pr_err("invalid MAC address\n");
3355		err = -EIO;
3356		goto err_sw_init;
3357	}
3358
3359	netdev->hw_features = NETIF_F_SG |
3360			   NETIF_F_IP_CSUM |
3361			   NETIF_F_IPV6_CSUM |
3362			   NETIF_F_TSO |
3363			   NETIF_F_TSO6 |
3364			   NETIF_F_RXCSUM;
3365
3366	netdev->features = netdev->hw_features |
3367			   NETIF_F_HW_VLAN_TX |
3368			   NETIF_F_HW_VLAN_RX |
3369			   NETIF_F_HW_VLAN_FILTER;
3370
3371	netdev->vlan_features |= NETIF_F_TSO;
3372	netdev->vlan_features |= NETIF_F_TSO6;
3373	netdev->vlan_features |= NETIF_F_IP_CSUM;
3374	netdev->vlan_features |= NETIF_F_IPV6_CSUM;
3375	netdev->vlan_features |= NETIF_F_SG;
3376
3377	if (pci_using_dac)
3378		netdev->features |= NETIF_F_HIGHDMA;
3379
3380	netdev->priv_flags |= IFF_UNICAST_FLT;
3381
3382	init_timer(&adapter->watchdog_timer);
3383	adapter->watchdog_timer.function = ixgbevf_watchdog;
3384	adapter->watchdog_timer.data = (unsigned long)adapter;
3385
3386	INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
3387	INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
3388
3389	err = ixgbevf_init_interrupt_scheme(adapter);
3390	if (err)
3391		goto err_sw_init;
3392
3393	/* pick up the PCI bus settings for reporting later */
3394	if (hw->mac.ops.get_bus_info)
3395		hw->mac.ops.get_bus_info(hw);
3396
3397	strcpy(netdev->name, "eth%d");
3398
3399	err = register_netdev(netdev);
3400	if (err)
3401		goto err_register;
3402
3403	netif_carrier_off(netdev);
3404
3405	ixgbevf_init_last_counter_stats(adapter);
3406
3407	/* print the MAC address */
3408	hw_dbg(hw, "%pM\n", netdev->dev_addr);
3409
3410	hw_dbg(hw, "MAC: %d\n", hw->mac.type);
3411
3412	hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
3413	cards_found++;
3414	return 0;
3415
3416err_register:
3417	ixgbevf_clear_interrupt_scheme(adapter);
3418err_sw_init:
3419	ixgbevf_reset_interrupt_capability(adapter);
3420	iounmap(hw->hw_addr);
3421err_ioremap:
3422	free_netdev(netdev);
3423err_alloc_etherdev:
3424	pci_release_regions(pdev);
3425err_pci_reg:
3426err_dma:
3427	pci_disable_device(pdev);
3428	return err;
3429}
3430
3431/**
3432 * ixgbevf_remove - Device Removal Routine
3433 * @pdev: PCI device information struct
3434 *
3435 * ixgbevf_remove is called by the PCI subsystem to alert the driver
3436 * that it should release a PCI device.  The could be caused by a
3437 * Hot-Plug event, or because the driver is going to be removed from
3438 * memory.
3439 **/
3440static void __devexit ixgbevf_remove(struct pci_dev *pdev)
3441{
3442	struct net_device *netdev = pci_get_drvdata(pdev);
3443	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3444
3445	set_bit(__IXGBEVF_DOWN, &adapter->state);
3446
3447	del_timer_sync(&adapter->watchdog_timer);
3448
3449	cancel_work_sync(&adapter->reset_task);
3450	cancel_work_sync(&adapter->watchdog_task);
3451
3452	if (netdev->reg_state == NETREG_REGISTERED)
3453		unregister_netdev(netdev);
3454
3455	ixgbevf_clear_interrupt_scheme(adapter);
3456	ixgbevf_reset_interrupt_capability(adapter);
3457
3458	iounmap(adapter->hw.hw_addr);
3459	pci_release_regions(pdev);
3460
3461	hw_dbg(&adapter->hw, "Remove complete\n");
3462
3463	kfree(adapter->tx_ring);
3464	kfree(adapter->rx_ring);
3465
3466	free_netdev(netdev);
3467
3468	pci_disable_device(pdev);
3469}
3470
3471/**
3472 * ixgbevf_io_error_detected - called when PCI error is detected
3473 * @pdev: Pointer to PCI device
3474 * @state: The current pci connection state
3475 *
3476 * This function is called after a PCI bus error affecting
3477 * this device has been detected.
3478 */
3479static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
3480						  pci_channel_state_t state)
3481{
3482	struct net_device *netdev = pci_get_drvdata(pdev);
3483	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3484
3485	netif_device_detach(netdev);
3486
3487	if (state == pci_channel_io_perm_failure)
3488		return PCI_ERS_RESULT_DISCONNECT;
3489
3490	if (netif_running(netdev))
3491		ixgbevf_down(adapter);
3492
3493	pci_disable_device(pdev);
3494
3495	/* Request a slot slot reset. */
3496	return PCI_ERS_RESULT_NEED_RESET;
3497}
3498
3499/**
3500 * ixgbevf_io_slot_reset - called after the pci bus has been reset.
3501 * @pdev: Pointer to PCI device
3502 *
3503 * Restart the card from scratch, as if from a cold-boot. Implementation
3504 * resembles the first-half of the ixgbevf_resume routine.
3505 */
3506static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
3507{
3508	struct net_device *netdev = pci_get_drvdata(pdev);
3509	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3510
3511	if (pci_enable_device_mem(pdev)) {
3512		dev_err(&pdev->dev,
3513			"Cannot re-enable PCI device after reset.\n");
3514		return PCI_ERS_RESULT_DISCONNECT;
3515	}
3516
3517	pci_set_master(pdev);
3518
3519	ixgbevf_reset(adapter);
3520
3521	return PCI_ERS_RESULT_RECOVERED;
3522}
3523
3524/**
3525 * ixgbevf_io_resume - called when traffic can start flowing again.
3526 * @pdev: Pointer to PCI device
3527 *
3528 * This callback is called when the error recovery driver tells us that
3529 * its OK to resume normal operation. Implementation resembles the
3530 * second-half of the ixgbevf_resume routine.
3531 */
3532static void ixgbevf_io_resume(struct pci_dev *pdev)
3533{
3534	struct net_device *netdev = pci_get_drvdata(pdev);
3535	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3536
3537	if (netif_running(netdev))
3538		ixgbevf_up(adapter);
3539
3540	netif_device_attach(netdev);
3541}
3542
3543/* PCI Error Recovery (ERS) */
3544static const struct pci_error_handlers ixgbevf_err_handler = {
3545	.error_detected = ixgbevf_io_error_detected,
3546	.slot_reset = ixgbevf_io_slot_reset,
3547	.resume = ixgbevf_io_resume,
3548};
3549
3550static struct pci_driver ixgbevf_driver = {
3551	.name     = ixgbevf_driver_name,
3552	.id_table = ixgbevf_pci_tbl,
3553	.probe    = ixgbevf_probe,
3554	.remove   = __devexit_p(ixgbevf_remove),
3555#ifdef CONFIG_PM
3556	/* Power Management Hooks */
3557	.suspend  = ixgbevf_suspend,
3558	.resume   = ixgbevf_resume,
3559#endif
3560	.shutdown = ixgbevf_shutdown,
3561	.err_handler = &ixgbevf_err_handler
3562};
3563
3564/**
3565 * ixgbevf_init_module - Driver Registration Routine
3566 *
3567 * ixgbevf_init_module is the first routine called when the driver is
3568 * loaded. All it does is register with the PCI subsystem.
3569 **/
3570static int __init ixgbevf_init_module(void)
3571{
3572	int ret;
3573	pr_info("%s - version %s\n", ixgbevf_driver_string,
3574		ixgbevf_driver_version);
3575
3576	pr_info("%s\n", ixgbevf_copyright);
3577
3578	ret = pci_register_driver(&ixgbevf_driver);
3579	return ret;
3580}
3581
3582module_init(ixgbevf_init_module);
3583
3584/**
3585 * ixgbevf_exit_module - Driver Exit Cleanup Routine
3586 *
3587 * ixgbevf_exit_module is called just before the driver is removed
3588 * from memory.
3589 **/
3590static void __exit ixgbevf_exit_module(void)
3591{
3592	pci_unregister_driver(&ixgbevf_driver);
3593}
3594
3595#ifdef DEBUG
3596/**
3597 * ixgbevf_get_hw_dev_name - return device name string
3598 * used by hardware layer to print debugging information
3599 **/
3600char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
3601{
3602	struct ixgbevf_adapter *adapter = hw->back;
3603	return adapter->netdev->name;
3604}
3605
3606#endif
3607module_exit(ixgbevf_exit_module);
3608
3609/* ixgbevf_main.c */
3610