ixgbevf_main.c revision 858c3dda5ea3519a3799a147904ae1d6e6c4e7c1
1/*******************************************************************************
2
3  Intel 82599 Virtual Function driver
4  Copyright(c) 1999 - 2012 Intel Corporation.
5
6  This program is free software; you can redistribute it and/or modify it
7  under the terms and conditions of the GNU General Public License,
8  version 2, as published by the Free Software Foundation.
9
10  This program is distributed in the hope it will be useful, but WITHOUT
11  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  more details.
14
15  You should have received a copy of the GNU General Public License along with
16  this program; if not, write to the Free Software Foundation, Inc.,
17  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19  The full GNU General Public License is included in this distribution in
20  the file called "COPYING".
21
22  Contact Information:
23  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28
29/******************************************************************************
30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
31******************************************************************************/
32
33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
35#include <linux/types.h>
36#include <linux/bitops.h>
37#include <linux/module.h>
38#include <linux/pci.h>
39#include <linux/netdevice.h>
40#include <linux/vmalloc.h>
41#include <linux/string.h>
42#include <linux/in.h>
43#include <linux/ip.h>
44#include <linux/tcp.h>
45#include <linux/sctp.h>
46#include <linux/ipv6.h>
47#include <linux/slab.h>
48#include <net/checksum.h>
49#include <net/ip6_checksum.h>
50#include <linux/ethtool.h>
51#include <linux/if.h>
52#include <linux/if_vlan.h>
53#include <linux/prefetch.h>
54
55#include "ixgbevf.h"
56
57const char ixgbevf_driver_name[] = "ixgbevf";
58static const char ixgbevf_driver_string[] =
59	"Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
60
61#define DRV_VERSION "2.7.12-k"
62const char ixgbevf_driver_version[] = DRV_VERSION;
63static char ixgbevf_copyright[] =
64	"Copyright (c) 2009 - 2012 Intel Corporation.";
65
66static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
67	[board_82599_vf] = &ixgbevf_82599_vf_info,
68	[board_X540_vf]  = &ixgbevf_X540_vf_info,
69};
70
71/* ixgbevf_pci_tbl - PCI Device ID Table
72 *
73 * Wildcard entries (PCI_ANY_ID) should come last
74 * Last entry must be all 0s
75 *
76 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
77 *   Class, Class Mask, private data (not used) }
78 */
79static DEFINE_PCI_DEVICE_TABLE(ixgbevf_pci_tbl) = {
80	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
81	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
82	/* required last entry */
83	{0, }
84};
85MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
86
87MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
88MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
89MODULE_LICENSE("GPL");
90MODULE_VERSION(DRV_VERSION);
91
92#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
93static int debug = -1;
94module_param(debug, int, 0);
95MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
96
97/* forward decls */
98static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
99static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
100
101static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
102					   struct ixgbevf_ring *rx_ring,
103					   u32 val)
104{
105	/*
106	 * Force memory writes to complete before letting h/w
107	 * know there are new descriptors to fetch.  (Only
108	 * applicable for weak-ordered memory model archs,
109	 * such as IA-64).
110	 */
111	wmb();
112	IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
113}
114
115/**
116 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
117 * @adapter: pointer to adapter struct
118 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
119 * @queue: queue to map the corresponding interrupt to
120 * @msix_vector: the vector to map to the corresponding queue
121 */
122static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
123			     u8 queue, u8 msix_vector)
124{
125	u32 ivar, index;
126	struct ixgbe_hw *hw = &adapter->hw;
127	if (direction == -1) {
128		/* other causes */
129		msix_vector |= IXGBE_IVAR_ALLOC_VAL;
130		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
131		ivar &= ~0xFF;
132		ivar |= msix_vector;
133		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
134	} else {
135		/* tx or rx causes */
136		msix_vector |= IXGBE_IVAR_ALLOC_VAL;
137		index = ((16 * (queue & 1)) + (8 * direction));
138		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
139		ivar &= ~(0xFF << index);
140		ivar |= (msix_vector << index);
141		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
142	}
143}
144
145static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
146					       struct ixgbevf_tx_buffer
147					       *tx_buffer_info)
148{
149	if (tx_buffer_info->dma) {
150		if (tx_buffer_info->mapped_as_page)
151			dma_unmap_page(tx_ring->dev,
152				       tx_buffer_info->dma,
153				       tx_buffer_info->length,
154				       DMA_TO_DEVICE);
155		else
156			dma_unmap_single(tx_ring->dev,
157					 tx_buffer_info->dma,
158					 tx_buffer_info->length,
159					 DMA_TO_DEVICE);
160		tx_buffer_info->dma = 0;
161	}
162	if (tx_buffer_info->skb) {
163		dev_kfree_skb_any(tx_buffer_info->skb);
164		tx_buffer_info->skb = NULL;
165	}
166	tx_buffer_info->time_stamp = 0;
167	/* tx_buffer_info must be completely set up in the transmit path */
168}
169
170#define IXGBE_MAX_TXD_PWR	14
171#define IXGBE_MAX_DATA_PER_TXD	(1 << IXGBE_MAX_TXD_PWR)
172
173/* Tx Descriptors needed, worst case */
174#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
175#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
176
177static void ixgbevf_tx_timeout(struct net_device *netdev);
178
179/**
180 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
181 * @q_vector: board private structure
182 * @tx_ring: tx ring to clean
183 **/
184static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
185				 struct ixgbevf_ring *tx_ring)
186{
187	struct ixgbevf_adapter *adapter = q_vector->adapter;
188	union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
189	struct ixgbevf_tx_buffer *tx_buffer_info;
190	unsigned int i, count = 0;
191	unsigned int total_bytes = 0, total_packets = 0;
192
193	if (test_bit(__IXGBEVF_DOWN, &adapter->state))
194		return true;
195
196	i = tx_ring->next_to_clean;
197	tx_buffer_info = &tx_ring->tx_buffer_info[i];
198	eop_desc = tx_buffer_info->next_to_watch;
199
200	do {
201		bool cleaned = false;
202
203		/* if next_to_watch is not set then there is no work pending */
204		if (!eop_desc)
205			break;
206
207		/* prevent any other reads prior to eop_desc */
208		read_barrier_depends();
209
210		/* if DD is not set pending work has not been completed */
211		if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
212			break;
213
214		/* clear next_to_watch to prevent false hangs */
215		tx_buffer_info->next_to_watch = NULL;
216
217		for ( ; !cleaned; count++) {
218			struct sk_buff *skb;
219			tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
220			cleaned = (tx_desc == eop_desc);
221			skb = tx_buffer_info->skb;
222
223			if (cleaned && skb) {
224				unsigned int segs, bytecount;
225
226				/* gso_segs is currently only valid for tcp */
227				segs = skb_shinfo(skb)->gso_segs ?: 1;
228				/* multiply data chunks by size of headers */
229				bytecount = ((segs - 1) * skb_headlen(skb)) +
230					    skb->len;
231				total_packets += segs;
232				total_bytes += bytecount;
233			}
234
235			ixgbevf_unmap_and_free_tx_resource(tx_ring,
236							   tx_buffer_info);
237
238			tx_desc->wb.status = 0;
239
240			i++;
241			if (i == tx_ring->count)
242				i = 0;
243
244			tx_buffer_info = &tx_ring->tx_buffer_info[i];
245		}
246
247		eop_desc = tx_buffer_info->next_to_watch;
248	} while (count < tx_ring->count);
249
250	tx_ring->next_to_clean = i;
251
252#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
253	if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
254		     (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
255		/* Make sure that anybody stopping the queue after this
256		 * sees the new next_to_clean.
257		 */
258		smp_mb();
259		if (__netif_subqueue_stopped(tx_ring->netdev,
260					     tx_ring->queue_index) &&
261		    !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
262			netif_wake_subqueue(tx_ring->netdev,
263					    tx_ring->queue_index);
264			++adapter->restart_queue;
265		}
266	}
267
268	u64_stats_update_begin(&tx_ring->syncp);
269	tx_ring->total_bytes += total_bytes;
270	tx_ring->total_packets += total_packets;
271	u64_stats_update_end(&tx_ring->syncp);
272	q_vector->tx.total_bytes += total_bytes;
273	q_vector->tx.total_packets += total_packets;
274
275	return count < tx_ring->count;
276}
277
278/**
279 * ixgbevf_receive_skb - Send a completed packet up the stack
280 * @q_vector: structure containing interrupt and ring information
281 * @skb: packet to send up
282 * @status: hardware indication of status of receive
283 * @rx_desc: rx descriptor
284 **/
285static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
286				struct sk_buff *skb, u8 status,
287				union ixgbe_adv_rx_desc *rx_desc)
288{
289	struct ixgbevf_adapter *adapter = q_vector->adapter;
290	bool is_vlan = (status & IXGBE_RXD_STAT_VP);
291	u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
292
293	if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
294		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
295
296	if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
297		napi_gro_receive(&q_vector->napi, skb);
298	else
299		netif_rx(skb);
300}
301
302/**
303 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
304 * @ring: pointer to Rx descriptor ring structure
305 * @status_err: hardware indication of status of receive
306 * @skb: skb currently being received and modified
307 **/
308static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
309				       u32 status_err, struct sk_buff *skb)
310{
311	skb_checksum_none_assert(skb);
312
313	/* Rx csum disabled */
314	if (!(ring->netdev->features & NETIF_F_RXCSUM))
315		return;
316
317	/* if IP and error */
318	if ((status_err & IXGBE_RXD_STAT_IPCS) &&
319	    (status_err & IXGBE_RXDADV_ERR_IPE)) {
320		ring->hw_csum_rx_error++;
321		return;
322	}
323
324	if (!(status_err & IXGBE_RXD_STAT_L4CS))
325		return;
326
327	if (status_err & IXGBE_RXDADV_ERR_TCPE) {
328		ring->hw_csum_rx_error++;
329		return;
330	}
331
332	/* It must be a TCP or UDP packet with a valid checksum */
333	skb->ip_summed = CHECKSUM_UNNECESSARY;
334	ring->hw_csum_rx_good++;
335}
336
337/**
338 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
339 * @adapter: address of board private structure
340 **/
341static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
342				     struct ixgbevf_ring *rx_ring,
343				     int cleaned_count)
344{
345	struct pci_dev *pdev = adapter->pdev;
346	union ixgbe_adv_rx_desc *rx_desc;
347	struct ixgbevf_rx_buffer *bi;
348	unsigned int i = rx_ring->next_to_use;
349
350	bi = &rx_ring->rx_buffer_info[i];
351
352	while (cleaned_count--) {
353		rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
354
355		if (!bi->skb) {
356			struct sk_buff *skb;
357
358			skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
359							rx_ring->rx_buf_len);
360			if (!skb) {
361				adapter->alloc_rx_buff_failed++;
362				goto no_buffers;
363			}
364			bi->skb = skb;
365
366			bi->dma = dma_map_single(&pdev->dev, skb->data,
367						 rx_ring->rx_buf_len,
368						 DMA_FROM_DEVICE);
369			if (dma_mapping_error(&pdev->dev, bi->dma)) {
370				dev_kfree_skb(skb);
371				bi->skb = NULL;
372				dev_err(&pdev->dev, "RX DMA map failed\n");
373				break;
374			}
375		}
376		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
377
378		i++;
379		if (i == rx_ring->count)
380			i = 0;
381		bi = &rx_ring->rx_buffer_info[i];
382	}
383
384no_buffers:
385	if (rx_ring->next_to_use != i) {
386		rx_ring->next_to_use = i;
387		ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
388	}
389}
390
391static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
392					     u32 qmask)
393{
394	struct ixgbe_hw *hw = &adapter->hw;
395
396	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
397}
398
399static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
400				 struct ixgbevf_ring *rx_ring,
401				 int budget)
402{
403	struct ixgbevf_adapter *adapter = q_vector->adapter;
404	struct pci_dev *pdev = adapter->pdev;
405	union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
406	struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
407	struct sk_buff *skb;
408	unsigned int i;
409	u32 len, staterr;
410	int cleaned_count = 0;
411	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
412
413	i = rx_ring->next_to_clean;
414	rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
415	staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
416	rx_buffer_info = &rx_ring->rx_buffer_info[i];
417
418	while (staterr & IXGBE_RXD_STAT_DD) {
419		if (!budget)
420			break;
421		budget--;
422
423		rmb(); /* read descriptor and rx_buffer_info after status DD */
424		len = le16_to_cpu(rx_desc->wb.upper.length);
425		skb = rx_buffer_info->skb;
426		prefetch(skb->data - NET_IP_ALIGN);
427		rx_buffer_info->skb = NULL;
428
429		if (rx_buffer_info->dma) {
430			dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
431					 rx_ring->rx_buf_len,
432					 DMA_FROM_DEVICE);
433			rx_buffer_info->dma = 0;
434			skb_put(skb, len);
435		}
436
437		i++;
438		if (i == rx_ring->count)
439			i = 0;
440
441		next_rxd = IXGBEVF_RX_DESC(rx_ring, i);
442		prefetch(next_rxd);
443		cleaned_count++;
444
445		next_buffer = &rx_ring->rx_buffer_info[i];
446
447		if (!(staterr & IXGBE_RXD_STAT_EOP)) {
448			skb->next = next_buffer->skb;
449			IXGBE_CB(skb->next)->prev = skb;
450			adapter->non_eop_descs++;
451			goto next_desc;
452		}
453
454		/* we should not be chaining buffers, if we did drop the skb */
455		if (IXGBE_CB(skb)->prev) {
456			do {
457				struct sk_buff *this = skb;
458				skb = IXGBE_CB(skb)->prev;
459				dev_kfree_skb(this);
460			} while (skb);
461			goto next_desc;
462		}
463
464		/* ERR_MASK will only have valid bits if EOP set */
465		if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
466			dev_kfree_skb_irq(skb);
467			goto next_desc;
468		}
469
470		ixgbevf_rx_checksum(rx_ring, staterr, skb);
471
472		/* probably a little skewed due to removing CRC */
473		total_rx_bytes += skb->len;
474		total_rx_packets++;
475
476		/*
477		 * Work around issue of some types of VM to VM loop back
478		 * packets not getting split correctly
479		 */
480		if (staterr & IXGBE_RXD_STAT_LB) {
481			u32 header_fixup_len = skb_headlen(skb);
482			if (header_fixup_len < 14)
483				skb_push(skb, header_fixup_len);
484		}
485		skb->protocol = eth_type_trans(skb, rx_ring->netdev);
486
487		/* Workaround hardware that can't do proper VEPA multicast
488		 * source pruning.
489		 */
490		if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) &&
491		    ether_addr_equal(adapter->netdev->dev_addr,
492				     eth_hdr(skb)->h_source)) {
493			dev_kfree_skb_irq(skb);
494			goto next_desc;
495		}
496
497		ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc);
498
499next_desc:
500		rx_desc->wb.upper.status_error = 0;
501
502		/* return some buffers to hardware, one at a time is too slow */
503		if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
504			ixgbevf_alloc_rx_buffers(adapter, rx_ring,
505						 cleaned_count);
506			cleaned_count = 0;
507		}
508
509		/* use prefetched values */
510		rx_desc = next_rxd;
511		rx_buffer_info = &rx_ring->rx_buffer_info[i];
512
513		staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
514	}
515
516	rx_ring->next_to_clean = i;
517	cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
518
519	if (cleaned_count)
520		ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
521
522	u64_stats_update_begin(&rx_ring->syncp);
523	rx_ring->total_packets += total_rx_packets;
524	rx_ring->total_bytes += total_rx_bytes;
525	u64_stats_update_end(&rx_ring->syncp);
526	q_vector->rx.total_packets += total_rx_packets;
527	q_vector->rx.total_bytes += total_rx_bytes;
528
529	return !!budget;
530}
531
532/**
533 * ixgbevf_poll - NAPI polling calback
534 * @napi: napi struct with our devices info in it
535 * @budget: amount of work driver is allowed to do this pass, in packets
536 *
537 * This function will clean more than one or more rings associated with a
538 * q_vector.
539 **/
540static int ixgbevf_poll(struct napi_struct *napi, int budget)
541{
542	struct ixgbevf_q_vector *q_vector =
543		container_of(napi, struct ixgbevf_q_vector, napi);
544	struct ixgbevf_adapter *adapter = q_vector->adapter;
545	struct ixgbevf_ring *ring;
546	int per_ring_budget;
547	bool clean_complete = true;
548
549	ixgbevf_for_each_ring(ring, q_vector->tx)
550		clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
551
552	/* attempt to distribute budget to each queue fairly, but don't allow
553	 * the budget to go below 1 because we'll exit polling */
554	if (q_vector->rx.count > 1)
555		per_ring_budget = max(budget/q_vector->rx.count, 1);
556	else
557		per_ring_budget = budget;
558
559	adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
560	ixgbevf_for_each_ring(ring, q_vector->rx)
561		clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring,
562						       per_ring_budget);
563	adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
564
565	/* If all work not completed, return budget and keep polling */
566	if (!clean_complete)
567		return budget;
568	/* all work done, exit the polling mode */
569	napi_complete(napi);
570	if (adapter->rx_itr_setting & 1)
571		ixgbevf_set_itr(q_vector);
572	if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
573		ixgbevf_irq_enable_queues(adapter,
574					  1 << q_vector->v_idx);
575
576	return 0;
577}
578
579/**
580 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
581 * @q_vector: structure containing interrupt and ring information
582 */
583static void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
584{
585	struct ixgbevf_adapter *adapter = q_vector->adapter;
586	struct ixgbe_hw *hw = &adapter->hw;
587	int v_idx = q_vector->v_idx;
588	u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
589
590	/*
591	 * set the WDIS bit to not clear the timer bits and cause an
592	 * immediate assertion of the interrupt
593	 */
594	itr_reg |= IXGBE_EITR_CNT_WDIS;
595
596	IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
597}
598
599/**
600 * ixgbevf_configure_msix - Configure MSI-X hardware
601 * @adapter: board private structure
602 *
603 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
604 * interrupts.
605 **/
606static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
607{
608	struct ixgbevf_q_vector *q_vector;
609	int q_vectors, v_idx;
610
611	q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
612	adapter->eims_enable_mask = 0;
613
614	/*
615	 * Populate the IVAR table and set the ITR values to the
616	 * corresponding register.
617	 */
618	for (v_idx = 0; v_idx < q_vectors; v_idx++) {
619		struct ixgbevf_ring *ring;
620		q_vector = adapter->q_vector[v_idx];
621
622		ixgbevf_for_each_ring(ring, q_vector->rx)
623			ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
624
625		ixgbevf_for_each_ring(ring, q_vector->tx)
626			ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
627
628		if (q_vector->tx.ring && !q_vector->rx.ring) {
629			/* tx only vector */
630			if (adapter->tx_itr_setting == 1)
631				q_vector->itr = IXGBE_10K_ITR;
632			else
633				q_vector->itr = adapter->tx_itr_setting;
634		} else {
635			/* rx or rx/tx vector */
636			if (adapter->rx_itr_setting == 1)
637				q_vector->itr = IXGBE_20K_ITR;
638			else
639				q_vector->itr = adapter->rx_itr_setting;
640		}
641
642		/* add q_vector eims value to global eims_enable_mask */
643		adapter->eims_enable_mask |= 1 << v_idx;
644
645		ixgbevf_write_eitr(q_vector);
646	}
647
648	ixgbevf_set_ivar(adapter, -1, 1, v_idx);
649	/* setup eims_other and add value to global eims_enable_mask */
650	adapter->eims_other = 1 << v_idx;
651	adapter->eims_enable_mask |= adapter->eims_other;
652}
653
654enum latency_range {
655	lowest_latency = 0,
656	low_latency = 1,
657	bulk_latency = 2,
658	latency_invalid = 255
659};
660
661/**
662 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
663 * @q_vector: structure containing interrupt and ring information
664 * @ring_container: structure containing ring performance data
665 *
666 *      Stores a new ITR value based on packets and byte
667 *      counts during the last interrupt.  The advantage of per interrupt
668 *      computation is faster updates and more accurate ITR for the current
669 *      traffic pattern.  Constants in this function were computed
670 *      based on theoretical maximum wire speed and thresholds were set based
671 *      on testing data as well as attempting to minimize response time
672 *      while increasing bulk throughput.
673 **/
674static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
675			       struct ixgbevf_ring_container *ring_container)
676{
677	int bytes = ring_container->total_bytes;
678	int packets = ring_container->total_packets;
679	u32 timepassed_us;
680	u64 bytes_perint;
681	u8 itr_setting = ring_container->itr;
682
683	if (packets == 0)
684		return;
685
686	/* simple throttlerate management
687	 *    0-20MB/s lowest (100000 ints/s)
688	 *   20-100MB/s low   (20000 ints/s)
689	 *  100-1249MB/s bulk (8000 ints/s)
690	 */
691	/* what was last interrupt timeslice? */
692	timepassed_us = q_vector->itr >> 2;
693	bytes_perint = bytes / timepassed_us; /* bytes/usec */
694
695	switch (itr_setting) {
696	case lowest_latency:
697		if (bytes_perint > 10)
698			itr_setting = low_latency;
699		break;
700	case low_latency:
701		if (bytes_perint > 20)
702			itr_setting = bulk_latency;
703		else if (bytes_perint <= 10)
704			itr_setting = lowest_latency;
705		break;
706	case bulk_latency:
707		if (bytes_perint <= 20)
708			itr_setting = low_latency;
709		break;
710	}
711
712	/* clear work counters since we have the values we need */
713	ring_container->total_bytes = 0;
714	ring_container->total_packets = 0;
715
716	/* write updated itr to ring container */
717	ring_container->itr = itr_setting;
718}
719
720static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
721{
722	u32 new_itr = q_vector->itr;
723	u8 current_itr;
724
725	ixgbevf_update_itr(q_vector, &q_vector->tx);
726	ixgbevf_update_itr(q_vector, &q_vector->rx);
727
728	current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
729
730	switch (current_itr) {
731	/* counts and packets in update_itr are dependent on these numbers */
732	case lowest_latency:
733		new_itr = IXGBE_100K_ITR;
734		break;
735	case low_latency:
736		new_itr = IXGBE_20K_ITR;
737		break;
738	case bulk_latency:
739	default:
740		new_itr = IXGBE_8K_ITR;
741		break;
742	}
743
744	if (new_itr != q_vector->itr) {
745		/* do an exponential smoothing */
746		new_itr = (10 * new_itr * q_vector->itr) /
747			  ((9 * new_itr) + q_vector->itr);
748
749		/* save the algorithm value here */
750		q_vector->itr = new_itr;
751
752		ixgbevf_write_eitr(q_vector);
753	}
754}
755
756static irqreturn_t ixgbevf_msix_other(int irq, void *data)
757{
758	struct ixgbevf_adapter *adapter = data;
759	struct ixgbe_hw *hw = &adapter->hw;
760
761	hw->mac.get_link_status = 1;
762
763	if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
764		mod_timer(&adapter->watchdog_timer, jiffies);
765
766	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
767
768	return IRQ_HANDLED;
769}
770
771/**
772 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
773 * @irq: unused
774 * @data: pointer to our q_vector struct for this interrupt vector
775 **/
776static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
777{
778	struct ixgbevf_q_vector *q_vector = data;
779
780	/* EIAM disabled interrupts (on this vector) for us */
781	if (q_vector->rx.ring || q_vector->tx.ring)
782		napi_schedule(&q_vector->napi);
783
784	return IRQ_HANDLED;
785}
786
787static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
788				     int r_idx)
789{
790	struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
791
792	a->rx_ring[r_idx].next = q_vector->rx.ring;
793	q_vector->rx.ring = &a->rx_ring[r_idx];
794	q_vector->rx.count++;
795}
796
797static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
798				     int t_idx)
799{
800	struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
801
802	a->tx_ring[t_idx].next = q_vector->tx.ring;
803	q_vector->tx.ring = &a->tx_ring[t_idx];
804	q_vector->tx.count++;
805}
806
807/**
808 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
809 * @adapter: board private structure to initialize
810 *
811 * This function maps descriptor rings to the queue-specific vectors
812 * we were allotted through the MSI-X enabling code.  Ideally, we'd have
813 * one vector per ring/queue, but on a constrained vector budget, we
814 * group the rings as "efficiently" as possible.  You would add new
815 * mapping configurations in here.
816 **/
817static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
818{
819	int q_vectors;
820	int v_start = 0;
821	int rxr_idx = 0, txr_idx = 0;
822	int rxr_remaining = adapter->num_rx_queues;
823	int txr_remaining = adapter->num_tx_queues;
824	int i, j;
825	int rqpv, tqpv;
826	int err = 0;
827
828	q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
829
830	/*
831	 * The ideal configuration...
832	 * We have enough vectors to map one per queue.
833	 */
834	if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
835		for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
836			map_vector_to_rxq(adapter, v_start, rxr_idx);
837
838		for (; txr_idx < txr_remaining; v_start++, txr_idx++)
839			map_vector_to_txq(adapter, v_start, txr_idx);
840		goto out;
841	}
842
843	/*
844	 * If we don't have enough vectors for a 1-to-1
845	 * mapping, we'll have to group them so there are
846	 * multiple queues per vector.
847	 */
848	/* Re-adjusting *qpv takes care of the remainder. */
849	for (i = v_start; i < q_vectors; i++) {
850		rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
851		for (j = 0; j < rqpv; j++) {
852			map_vector_to_rxq(adapter, i, rxr_idx);
853			rxr_idx++;
854			rxr_remaining--;
855		}
856	}
857	for (i = v_start; i < q_vectors; i++) {
858		tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
859		for (j = 0; j < tqpv; j++) {
860			map_vector_to_txq(adapter, i, txr_idx);
861			txr_idx++;
862			txr_remaining--;
863		}
864	}
865
866out:
867	return err;
868}
869
870/**
871 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
872 * @adapter: board private structure
873 *
874 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
875 * interrupts from the kernel.
876 **/
877static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
878{
879	struct net_device *netdev = adapter->netdev;
880	int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
881	int vector, err;
882	int ri = 0, ti = 0;
883
884	for (vector = 0; vector < q_vectors; vector++) {
885		struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
886		struct msix_entry *entry = &adapter->msix_entries[vector];
887
888		if (q_vector->tx.ring && q_vector->rx.ring) {
889			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
890				 "%s-%s-%d", netdev->name, "TxRx", ri++);
891			ti++;
892		} else if (q_vector->rx.ring) {
893			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
894				 "%s-%s-%d", netdev->name, "rx", ri++);
895		} else if (q_vector->tx.ring) {
896			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
897				 "%s-%s-%d", netdev->name, "tx", ti++);
898		} else {
899			/* skip this unused q_vector */
900			continue;
901		}
902		err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
903				  q_vector->name, q_vector);
904		if (err) {
905			hw_dbg(&adapter->hw,
906			       "request_irq failed for MSIX interrupt "
907			       "Error: %d\n", err);
908			goto free_queue_irqs;
909		}
910	}
911
912	err = request_irq(adapter->msix_entries[vector].vector,
913			  &ixgbevf_msix_other, 0, netdev->name, adapter);
914	if (err) {
915		hw_dbg(&adapter->hw,
916		       "request_irq for msix_other failed: %d\n", err);
917		goto free_queue_irqs;
918	}
919
920	return 0;
921
922free_queue_irqs:
923	while (vector) {
924		vector--;
925		free_irq(adapter->msix_entries[vector].vector,
926			 adapter->q_vector[vector]);
927	}
928	/* This failure is non-recoverable - it indicates the system is
929	 * out of MSIX vector resources and the VF driver cannot run
930	 * without them.  Set the number of msix vectors to zero
931	 * indicating that not enough can be allocated.  The error
932	 * will be returned to the user indicating device open failed.
933	 * Any further attempts to force the driver to open will also
934	 * fail.  The only way to recover is to unload the driver and
935	 * reload it again.  If the system has recovered some MSIX
936	 * vectors then it may succeed.
937	 */
938	adapter->num_msix_vectors = 0;
939	return err;
940}
941
942static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
943{
944	int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
945
946	for (i = 0; i < q_vectors; i++) {
947		struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
948		q_vector->rx.ring = NULL;
949		q_vector->tx.ring = NULL;
950		q_vector->rx.count = 0;
951		q_vector->tx.count = 0;
952	}
953}
954
955/**
956 * ixgbevf_request_irq - initialize interrupts
957 * @adapter: board private structure
958 *
959 * Attempts to configure interrupts using the best available
960 * capabilities of the hardware and kernel.
961 **/
962static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
963{
964	int err = 0;
965
966	err = ixgbevf_request_msix_irqs(adapter);
967
968	if (err)
969		hw_dbg(&adapter->hw,
970		       "request_irq failed, Error %d\n", err);
971
972	return err;
973}
974
975static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
976{
977	int i, q_vectors;
978
979	q_vectors = adapter->num_msix_vectors;
980	i = q_vectors - 1;
981
982	free_irq(adapter->msix_entries[i].vector, adapter);
983	i--;
984
985	for (; i >= 0; i--) {
986		/* free only the irqs that were actually requested */
987		if (!adapter->q_vector[i]->rx.ring &&
988		    !adapter->q_vector[i]->tx.ring)
989			continue;
990
991		free_irq(adapter->msix_entries[i].vector,
992			 adapter->q_vector[i]);
993	}
994
995	ixgbevf_reset_q_vectors(adapter);
996}
997
998/**
999 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1000 * @adapter: board private structure
1001 **/
1002static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1003{
1004	struct ixgbe_hw *hw = &adapter->hw;
1005	int i;
1006
1007	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
1008	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
1009	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
1010
1011	IXGBE_WRITE_FLUSH(hw);
1012
1013	for (i = 0; i < adapter->num_msix_vectors; i++)
1014		synchronize_irq(adapter->msix_entries[i].vector);
1015}
1016
1017/**
1018 * ixgbevf_irq_enable - Enable default interrupt generation settings
1019 * @adapter: board private structure
1020 **/
1021static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
1022{
1023	struct ixgbe_hw *hw = &adapter->hw;
1024
1025	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1026	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1027	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
1028}
1029
1030/**
1031 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1032 * @adapter: board private structure
1033 *
1034 * Configure the Tx unit of the MAC after a reset.
1035 **/
1036static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1037{
1038	u64 tdba;
1039	struct ixgbe_hw *hw = &adapter->hw;
1040	u32 i, j, tdlen, txctrl;
1041
1042	/* Setup the HW Tx Head and Tail descriptor pointers */
1043	for (i = 0; i < adapter->num_tx_queues; i++) {
1044		struct ixgbevf_ring *ring = &adapter->tx_ring[i];
1045		j = ring->reg_idx;
1046		tdba = ring->dma;
1047		tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
1048		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1049				(tdba & DMA_BIT_MASK(32)));
1050		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1051		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen);
1052		IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0);
1053		IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0);
1054		adapter->tx_ring[i].head = IXGBE_VFTDH(j);
1055		adapter->tx_ring[i].tail = IXGBE_VFTDT(j);
1056		/* Disable Tx Head Writeback RO bit, since this hoses
1057		 * bookkeeping if things aren't delivered in order.
1058		 */
1059		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1060		txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1061		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1062	}
1063}
1064
1065#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT	2
1066
1067static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1068{
1069	struct ixgbevf_ring *rx_ring;
1070	struct ixgbe_hw *hw = &adapter->hw;
1071	u32 srrctl;
1072
1073	rx_ring = &adapter->rx_ring[index];
1074
1075	srrctl = IXGBE_SRRCTL_DROP_EN;
1076
1077	srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1078
1079	srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
1080		  IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1081
1082	IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1083}
1084
1085static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
1086{
1087	struct ixgbe_hw *hw = &adapter->hw;
1088	struct net_device *netdev = adapter->netdev;
1089	int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1090	int i;
1091	u16 rx_buf_len;
1092
1093	/* notify the PF of our intent to use this size of frame */
1094	ixgbevf_rlpml_set_vf(hw, max_frame);
1095
1096	/* PF will allow an extra 4 bytes past for vlan tagged frames */
1097	max_frame += VLAN_HLEN;
1098
1099	/*
1100	 * Allocate buffer sizes that fit well into 32K and
1101	 * take into account max frame size of 9.5K
1102	 */
1103	if ((hw->mac.type == ixgbe_mac_X540_vf) &&
1104	    (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE))
1105		rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1106	else if (max_frame <= IXGBEVF_RXBUFFER_2K)
1107		rx_buf_len = IXGBEVF_RXBUFFER_2K;
1108	else if (max_frame <= IXGBEVF_RXBUFFER_4K)
1109		rx_buf_len = IXGBEVF_RXBUFFER_4K;
1110	else if (max_frame <= IXGBEVF_RXBUFFER_8K)
1111		rx_buf_len = IXGBEVF_RXBUFFER_8K;
1112	else
1113		rx_buf_len = IXGBEVF_RXBUFFER_10K;
1114
1115	for (i = 0; i < adapter->num_rx_queues; i++)
1116		adapter->rx_ring[i].rx_buf_len = rx_buf_len;
1117}
1118
1119/**
1120 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1121 * @adapter: board private structure
1122 *
1123 * Configure the Rx unit of the MAC after a reset.
1124 **/
1125static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1126{
1127	u64 rdba;
1128	struct ixgbe_hw *hw = &adapter->hw;
1129	int i, j;
1130	u32 rdlen;
1131
1132	/* PSRTYPE must be initialized in 82599 */
1133	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
1134
1135	/* set_rx_buffer_len must be called before ring initialization */
1136	ixgbevf_set_rx_buffer_len(adapter);
1137
1138	rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
1139	/* Setup the HW Rx Head and Tail Descriptor Pointers and
1140	 * the Base and Length of the Rx Descriptor Ring */
1141	for (i = 0; i < adapter->num_rx_queues; i++) {
1142		rdba = adapter->rx_ring[i].dma;
1143		j = adapter->rx_ring[i].reg_idx;
1144		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1145				(rdba & DMA_BIT_MASK(32)));
1146		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1147		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen);
1148		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0);
1149		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
1150		adapter->rx_ring[i].head = IXGBE_VFRDH(j);
1151		adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
1152
1153		ixgbevf_configure_srrctl(adapter, j);
1154	}
1155}
1156
1157static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
1158				   __be16 proto, u16 vid)
1159{
1160	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1161	struct ixgbe_hw *hw = &adapter->hw;
1162	int err;
1163
1164	spin_lock_bh(&adapter->mbx_lock);
1165
1166	/* add VID to filter table */
1167	err = hw->mac.ops.set_vfta(hw, vid, 0, true);
1168
1169	spin_unlock_bh(&adapter->mbx_lock);
1170
1171	/* translate error return types so error makes sense */
1172	if (err == IXGBE_ERR_MBX)
1173		return -EIO;
1174
1175	if (err == IXGBE_ERR_INVALID_ARGUMENT)
1176		return -EACCES;
1177
1178	set_bit(vid, adapter->active_vlans);
1179
1180	return err;
1181}
1182
1183static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,
1184				    __be16 proto, u16 vid)
1185{
1186	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1187	struct ixgbe_hw *hw = &adapter->hw;
1188	int err = -EOPNOTSUPP;
1189
1190	spin_lock_bh(&adapter->mbx_lock);
1191
1192	/* remove VID from filter table */
1193	err = hw->mac.ops.set_vfta(hw, vid, 0, false);
1194
1195	spin_unlock_bh(&adapter->mbx_lock);
1196
1197	clear_bit(vid, adapter->active_vlans);
1198
1199	return err;
1200}
1201
1202static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1203{
1204	u16 vid;
1205
1206	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1207		ixgbevf_vlan_rx_add_vid(adapter->netdev,
1208					htons(ETH_P_8021Q), vid);
1209}
1210
1211static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
1212{
1213	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1214	struct ixgbe_hw *hw = &adapter->hw;
1215	int count = 0;
1216
1217	if ((netdev_uc_count(netdev)) > 10) {
1218		pr_err("Too many unicast filters - No Space\n");
1219		return -ENOSPC;
1220	}
1221
1222	if (!netdev_uc_empty(netdev)) {
1223		struct netdev_hw_addr *ha;
1224		netdev_for_each_uc_addr(ha, netdev) {
1225			hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
1226			udelay(200);
1227		}
1228	} else {
1229		/*
1230		 * If the list is empty then send message to PF driver to
1231		 * clear all macvlans on this VF.
1232		 */
1233		hw->mac.ops.set_uc_addr(hw, 0, NULL);
1234	}
1235
1236	return count;
1237}
1238
1239/**
1240 * ixgbevf_set_rx_mode - Multicast and unicast set
1241 * @netdev: network interface device structure
1242 *
1243 * The set_rx_method entry point is called whenever the multicast address
1244 * list, unicast address list or the network interface flags are updated.
1245 * This routine is responsible for configuring the hardware for proper
1246 * multicast mode and configuring requested unicast filters.
1247 **/
1248static void ixgbevf_set_rx_mode(struct net_device *netdev)
1249{
1250	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1251	struct ixgbe_hw *hw = &adapter->hw;
1252
1253	spin_lock_bh(&adapter->mbx_lock);
1254
1255	/* reprogram multicast list */
1256	hw->mac.ops.update_mc_addr_list(hw, netdev);
1257
1258	ixgbevf_write_uc_addr_list(netdev);
1259
1260	spin_unlock_bh(&adapter->mbx_lock);
1261}
1262
1263static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1264{
1265	int q_idx;
1266	struct ixgbevf_q_vector *q_vector;
1267	int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1268
1269	for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1270		q_vector = adapter->q_vector[q_idx];
1271		napi_enable(&q_vector->napi);
1272	}
1273}
1274
1275static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1276{
1277	int q_idx;
1278	struct ixgbevf_q_vector *q_vector;
1279	int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1280
1281	for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1282		q_vector = adapter->q_vector[q_idx];
1283		napi_disable(&q_vector->napi);
1284	}
1285}
1286
1287static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1288{
1289	struct net_device *netdev = adapter->netdev;
1290	int i;
1291
1292	ixgbevf_set_rx_mode(netdev);
1293
1294	ixgbevf_restore_vlan(adapter);
1295
1296	ixgbevf_configure_tx(adapter);
1297	ixgbevf_configure_rx(adapter);
1298	for (i = 0; i < adapter->num_rx_queues; i++) {
1299		struct ixgbevf_ring *ring = &adapter->rx_ring[i];
1300		ixgbevf_alloc_rx_buffers(adapter, ring,
1301					 IXGBE_DESC_UNUSED(ring));
1302	}
1303}
1304
1305#define IXGBEVF_MAX_RX_DESC_POLL 10
1306static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1307					 int rxr)
1308{
1309	struct ixgbe_hw *hw = &adapter->hw;
1310	int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1311	u32 rxdctl;
1312	int j = adapter->rx_ring[rxr].reg_idx;
1313
1314	do {
1315		usleep_range(1000, 2000);
1316		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1317	} while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
1318
1319	if (!wait_loop)
1320		hw_dbg(hw, "RXDCTL.ENABLE queue %d not set while polling\n",
1321		       rxr);
1322
1323	ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
1324				(adapter->rx_ring[rxr].count - 1));
1325}
1326
1327static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
1328				     struct ixgbevf_ring *ring)
1329{
1330	struct ixgbe_hw *hw = &adapter->hw;
1331	int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1332	u32 rxdctl;
1333	u8 reg_idx = ring->reg_idx;
1334
1335	rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1336	rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1337
1338	/* write value back with RXDCTL.ENABLE bit cleared */
1339	IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1340
1341	/* the hardware may take up to 100us to really disable the rx queue */
1342	do {
1343		udelay(10);
1344		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1345	} while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
1346
1347	if (!wait_loop)
1348		hw_dbg(hw, "RXDCTL.ENABLE queue %d not cleared while polling\n",
1349		       reg_idx);
1350}
1351
1352static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
1353{
1354	/* Only save pre-reset stats if there are some */
1355	if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
1356		adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
1357			adapter->stats.base_vfgprc;
1358		adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
1359			adapter->stats.base_vfgptc;
1360		adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
1361			adapter->stats.base_vfgorc;
1362		adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
1363			adapter->stats.base_vfgotc;
1364		adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
1365			adapter->stats.base_vfmprc;
1366	}
1367}
1368
1369static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
1370{
1371	struct ixgbe_hw *hw = &adapter->hw;
1372
1373	adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1374	adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1375	adapter->stats.last_vfgorc |=
1376		(((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1377	adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1378	adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1379	adapter->stats.last_vfgotc |=
1380		(((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1381	adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1382
1383	adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
1384	adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
1385	adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
1386	adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
1387	adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
1388}
1389
1390static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
1391{
1392	struct ixgbe_hw *hw = &adapter->hw;
1393	int api[] = { ixgbe_mbox_api_11,
1394		      ixgbe_mbox_api_10,
1395		      ixgbe_mbox_api_unknown };
1396	int err = 0, idx = 0;
1397
1398	spin_lock_bh(&adapter->mbx_lock);
1399
1400	while (api[idx] != ixgbe_mbox_api_unknown) {
1401		err = ixgbevf_negotiate_api_version(hw, api[idx]);
1402		if (!err)
1403			break;
1404		idx++;
1405	}
1406
1407	spin_unlock_bh(&adapter->mbx_lock);
1408}
1409
1410static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1411{
1412	struct net_device *netdev = adapter->netdev;
1413	struct ixgbe_hw *hw = &adapter->hw;
1414	int i, j = 0;
1415	int num_rx_rings = adapter->num_rx_queues;
1416	u32 txdctl, rxdctl;
1417
1418	for (i = 0; i < adapter->num_tx_queues; i++) {
1419		j = adapter->tx_ring[i].reg_idx;
1420		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1421		/* enable WTHRESH=8 descriptors, to encourage burst writeback */
1422		txdctl |= (8 << 16);
1423		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1424	}
1425
1426	for (i = 0; i < adapter->num_tx_queues; i++) {
1427		j = adapter->tx_ring[i].reg_idx;
1428		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1429		txdctl |= IXGBE_TXDCTL_ENABLE;
1430		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1431	}
1432
1433	for (i = 0; i < num_rx_rings; i++) {
1434		j = adapter->rx_ring[i].reg_idx;
1435		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1436		rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1437		if (hw->mac.type == ixgbe_mac_X540_vf) {
1438			rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
1439			rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) |
1440				   IXGBE_RXDCTL_RLPML_EN);
1441		}
1442		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1443		ixgbevf_rx_desc_queue_enable(adapter, i);
1444	}
1445
1446	ixgbevf_configure_msix(adapter);
1447
1448	spin_lock_bh(&adapter->mbx_lock);
1449
1450	if (is_valid_ether_addr(hw->mac.addr))
1451		hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
1452	else
1453		hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
1454
1455	spin_unlock_bh(&adapter->mbx_lock);
1456
1457	clear_bit(__IXGBEVF_DOWN, &adapter->state);
1458	ixgbevf_napi_enable_all(adapter);
1459
1460	/* enable transmits */
1461	netif_tx_start_all_queues(netdev);
1462
1463	ixgbevf_save_reset_stats(adapter);
1464	ixgbevf_init_last_counter_stats(adapter);
1465
1466	hw->mac.get_link_status = 1;
1467	mod_timer(&adapter->watchdog_timer, jiffies);
1468}
1469
1470static int ixgbevf_reset_queues(struct ixgbevf_adapter *adapter)
1471{
1472	struct ixgbe_hw *hw = &adapter->hw;
1473	struct ixgbevf_ring *rx_ring;
1474	unsigned int def_q = 0;
1475	unsigned int num_tcs = 0;
1476	unsigned int num_rx_queues = 1;
1477	int err, i;
1478
1479	spin_lock_bh(&adapter->mbx_lock);
1480
1481	/* fetch queue configuration from the PF */
1482	err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1483
1484	spin_unlock_bh(&adapter->mbx_lock);
1485
1486	if (err)
1487		return err;
1488
1489	if (num_tcs > 1) {
1490		/* update default Tx ring register index */
1491		adapter->tx_ring[0].reg_idx = def_q;
1492
1493		/* we need as many queues as traffic classes */
1494		num_rx_queues = num_tcs;
1495	}
1496
1497	/* nothing to do if we have the correct number of queues */
1498	if (adapter->num_rx_queues == num_rx_queues)
1499		return 0;
1500
1501	/* allocate new rings */
1502	rx_ring = kcalloc(num_rx_queues,
1503			  sizeof(struct ixgbevf_ring), GFP_KERNEL);
1504	if (!rx_ring)
1505		return -ENOMEM;
1506
1507	/* setup ring fields */
1508	for (i = 0; i < num_rx_queues; i++) {
1509		rx_ring[i].count = adapter->rx_ring_count;
1510		rx_ring[i].queue_index = i;
1511		rx_ring[i].reg_idx = i;
1512		rx_ring[i].dev = &adapter->pdev->dev;
1513		rx_ring[i].netdev = adapter->netdev;
1514
1515		/* allocate resources on the ring */
1516		err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
1517		if (err) {
1518			while (i) {
1519				i--;
1520				ixgbevf_free_rx_resources(adapter, &rx_ring[i]);
1521			}
1522			kfree(rx_ring);
1523			return err;
1524		}
1525	}
1526
1527	/* free the existing rings and queues */
1528	ixgbevf_free_all_rx_resources(adapter);
1529	adapter->num_rx_queues = 0;
1530	kfree(adapter->rx_ring);
1531
1532	/* move new rings into position on the adapter struct */
1533	adapter->rx_ring = rx_ring;
1534	adapter->num_rx_queues = num_rx_queues;
1535
1536	/* reset ring to vector mapping */
1537	ixgbevf_reset_q_vectors(adapter);
1538	ixgbevf_map_rings_to_vectors(adapter);
1539
1540	return 0;
1541}
1542
1543void ixgbevf_up(struct ixgbevf_adapter *adapter)
1544{
1545	struct ixgbe_hw *hw = &adapter->hw;
1546
1547	ixgbevf_negotiate_api(adapter);
1548
1549	ixgbevf_reset_queues(adapter);
1550
1551	ixgbevf_configure(adapter);
1552
1553	ixgbevf_up_complete(adapter);
1554
1555	/* clear any pending interrupts, may auto mask */
1556	IXGBE_READ_REG(hw, IXGBE_VTEICR);
1557
1558	ixgbevf_irq_enable(adapter);
1559}
1560
1561/**
1562 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
1563 * @adapter: board private structure
1564 * @rx_ring: ring to free buffers from
1565 **/
1566static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
1567				  struct ixgbevf_ring *rx_ring)
1568{
1569	struct pci_dev *pdev = adapter->pdev;
1570	unsigned long size;
1571	unsigned int i;
1572
1573	if (!rx_ring->rx_buffer_info)
1574		return;
1575
1576	/* Free all the Rx ring sk_buffs */
1577	for (i = 0; i < rx_ring->count; i++) {
1578		struct ixgbevf_rx_buffer *rx_buffer_info;
1579
1580		rx_buffer_info = &rx_ring->rx_buffer_info[i];
1581		if (rx_buffer_info->dma) {
1582			dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
1583					 rx_ring->rx_buf_len,
1584					 DMA_FROM_DEVICE);
1585			rx_buffer_info->dma = 0;
1586		}
1587		if (rx_buffer_info->skb) {
1588			struct sk_buff *skb = rx_buffer_info->skb;
1589			rx_buffer_info->skb = NULL;
1590			do {
1591				struct sk_buff *this = skb;
1592				skb = IXGBE_CB(skb)->prev;
1593				dev_kfree_skb(this);
1594			} while (skb);
1595		}
1596	}
1597
1598	size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
1599	memset(rx_ring->rx_buffer_info, 0, size);
1600
1601	/* Zero out the descriptor ring */
1602	memset(rx_ring->desc, 0, rx_ring->size);
1603
1604	rx_ring->next_to_clean = 0;
1605	rx_ring->next_to_use = 0;
1606
1607	if (rx_ring->head)
1608		writel(0, adapter->hw.hw_addr + rx_ring->head);
1609	if (rx_ring->tail)
1610		writel(0, adapter->hw.hw_addr + rx_ring->tail);
1611}
1612
1613/**
1614 * ixgbevf_clean_tx_ring - Free Tx Buffers
1615 * @adapter: board private structure
1616 * @tx_ring: ring to be cleaned
1617 **/
1618static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
1619				  struct ixgbevf_ring *tx_ring)
1620{
1621	struct ixgbevf_tx_buffer *tx_buffer_info;
1622	unsigned long size;
1623	unsigned int i;
1624
1625	if (!tx_ring->tx_buffer_info)
1626		return;
1627
1628	/* Free all the Tx ring sk_buffs */
1629	for (i = 0; i < tx_ring->count; i++) {
1630		tx_buffer_info = &tx_ring->tx_buffer_info[i];
1631		ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
1632	}
1633
1634	size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
1635	memset(tx_ring->tx_buffer_info, 0, size);
1636
1637	memset(tx_ring->desc, 0, tx_ring->size);
1638
1639	tx_ring->next_to_use = 0;
1640	tx_ring->next_to_clean = 0;
1641
1642	if (tx_ring->head)
1643		writel(0, adapter->hw.hw_addr + tx_ring->head);
1644	if (tx_ring->tail)
1645		writel(0, adapter->hw.hw_addr + tx_ring->tail);
1646}
1647
1648/**
1649 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
1650 * @adapter: board private structure
1651 **/
1652static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
1653{
1654	int i;
1655
1656	for (i = 0; i < adapter->num_rx_queues; i++)
1657		ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]);
1658}
1659
1660/**
1661 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
1662 * @adapter: board private structure
1663 **/
1664static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
1665{
1666	int i;
1667
1668	for (i = 0; i < adapter->num_tx_queues; i++)
1669		ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1670}
1671
1672void ixgbevf_down(struct ixgbevf_adapter *adapter)
1673{
1674	struct net_device *netdev = adapter->netdev;
1675	struct ixgbe_hw *hw = &adapter->hw;
1676	u32 txdctl;
1677	int i, j;
1678
1679	/* signal that we are down to the interrupt handler */
1680	set_bit(__IXGBEVF_DOWN, &adapter->state);
1681
1682	/* disable all enabled rx queues */
1683	for (i = 0; i < adapter->num_rx_queues; i++)
1684		ixgbevf_disable_rx_queue(adapter, &adapter->rx_ring[i]);
1685
1686	netif_tx_disable(netdev);
1687
1688	msleep(10);
1689
1690	netif_tx_stop_all_queues(netdev);
1691
1692	ixgbevf_irq_disable(adapter);
1693
1694	ixgbevf_napi_disable_all(adapter);
1695
1696	del_timer_sync(&adapter->watchdog_timer);
1697	/* can't call flush scheduled work here because it can deadlock
1698	 * if linkwatch_event tries to acquire the rtnl_lock which we are
1699	 * holding */
1700	while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
1701		msleep(1);
1702
1703	/* disable transmits in the hardware now that interrupts are off */
1704	for (i = 0; i < adapter->num_tx_queues; i++) {
1705		j = adapter->tx_ring[i].reg_idx;
1706		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1707		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j),
1708				(txdctl & ~IXGBE_TXDCTL_ENABLE));
1709	}
1710
1711	netif_carrier_off(netdev);
1712
1713	if (!pci_channel_offline(adapter->pdev))
1714		ixgbevf_reset(adapter);
1715
1716	ixgbevf_clean_all_tx_rings(adapter);
1717	ixgbevf_clean_all_rx_rings(adapter);
1718}
1719
1720void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
1721{
1722	WARN_ON(in_interrupt());
1723
1724	while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
1725		msleep(1);
1726
1727	ixgbevf_down(adapter);
1728	ixgbevf_up(adapter);
1729
1730	clear_bit(__IXGBEVF_RESETTING, &adapter->state);
1731}
1732
1733void ixgbevf_reset(struct ixgbevf_adapter *adapter)
1734{
1735	struct ixgbe_hw *hw = &adapter->hw;
1736	struct net_device *netdev = adapter->netdev;
1737
1738	if (hw->mac.ops.reset_hw(hw))
1739		hw_dbg(hw, "PF still resetting\n");
1740	else
1741		hw->mac.ops.init_hw(hw);
1742
1743	if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1744		memcpy(netdev->dev_addr, adapter->hw.mac.addr,
1745		       netdev->addr_len);
1746		memcpy(netdev->perm_addr, adapter->hw.mac.addr,
1747		       netdev->addr_len);
1748	}
1749}
1750
1751static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1752					int vectors)
1753{
1754	int err = 0;
1755	int vector_threshold;
1756
1757	/* We'll want at least 2 (vector_threshold):
1758	 * 1) TxQ[0] + RxQ[0] handler
1759	 * 2) Other (Link Status Change, etc.)
1760	 */
1761	vector_threshold = MIN_MSIX_COUNT;
1762
1763	/* The more we get, the more we will assign to Tx/Rx Cleanup
1764	 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1765	 * Right now, we simply care about how many we'll get; we'll
1766	 * set them up later while requesting irq's.
1767	 */
1768	while (vectors >= vector_threshold) {
1769		err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1770				      vectors);
1771		if (!err || err < 0) /* Success or a nasty failure. */
1772			break;
1773		else /* err == number of vectors we should try again with */
1774			vectors = err;
1775	}
1776
1777	if (vectors < vector_threshold)
1778		err = -ENOMEM;
1779
1780	if (err) {
1781		dev_err(&adapter->pdev->dev,
1782			"Unable to allocate MSI-X interrupts\n");
1783		kfree(adapter->msix_entries);
1784		adapter->msix_entries = NULL;
1785	} else {
1786		/*
1787		 * Adjust for only the vectors we'll use, which is minimum
1788		 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
1789		 * vectors we were allocated.
1790		 */
1791		adapter->num_msix_vectors = vectors;
1792	}
1793
1794	return err;
1795}
1796
1797/**
1798 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
1799 * @adapter: board private structure to initialize
1800 *
1801 * This is the top level queue allocation routine.  The order here is very
1802 * important, starting with the "most" number of features turned on at once,
1803 * and ending with the smallest set of features.  This way large combinations
1804 * can be allocated if they're turned on, and smaller combinations are the
1805 * fallthrough conditions.
1806 *
1807 **/
1808static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
1809{
1810	/* Start with base case */
1811	adapter->num_rx_queues = 1;
1812	adapter->num_tx_queues = 1;
1813}
1814
1815/**
1816 * ixgbevf_alloc_queues - Allocate memory for all rings
1817 * @adapter: board private structure to initialize
1818 *
1819 * We allocate one ring per queue at run-time since we don't know the
1820 * number of queues at compile-time.  The polling_netdev array is
1821 * intended for Multiqueue, but should work fine with a single queue.
1822 **/
1823static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
1824{
1825	int i;
1826
1827	adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1828				   sizeof(struct ixgbevf_ring), GFP_KERNEL);
1829	if (!adapter->tx_ring)
1830		goto err_tx_ring_allocation;
1831
1832	adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1833				   sizeof(struct ixgbevf_ring), GFP_KERNEL);
1834	if (!adapter->rx_ring)
1835		goto err_rx_ring_allocation;
1836
1837	for (i = 0; i < adapter->num_tx_queues; i++) {
1838		adapter->tx_ring[i].count = adapter->tx_ring_count;
1839		adapter->tx_ring[i].queue_index = i;
1840		/* reg_idx may be remapped later by DCB config */
1841		adapter->tx_ring[i].reg_idx = i;
1842		adapter->tx_ring[i].dev = &adapter->pdev->dev;
1843		adapter->tx_ring[i].netdev = adapter->netdev;
1844	}
1845
1846	for (i = 0; i < adapter->num_rx_queues; i++) {
1847		adapter->rx_ring[i].count = adapter->rx_ring_count;
1848		adapter->rx_ring[i].queue_index = i;
1849		adapter->rx_ring[i].reg_idx = i;
1850		adapter->rx_ring[i].dev = &adapter->pdev->dev;
1851		adapter->rx_ring[i].netdev = adapter->netdev;
1852	}
1853
1854	return 0;
1855
1856err_rx_ring_allocation:
1857	kfree(adapter->tx_ring);
1858err_tx_ring_allocation:
1859	return -ENOMEM;
1860}
1861
1862/**
1863 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
1864 * @adapter: board private structure to initialize
1865 *
1866 * Attempt to configure the interrupts using the best available
1867 * capabilities of the hardware and the kernel.
1868 **/
1869static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
1870{
1871	struct net_device *netdev = adapter->netdev;
1872	int err = 0;
1873	int vector, v_budget;
1874
1875	/*
1876	 * It's easy to be greedy for MSI-X vectors, but it really
1877	 * doesn't do us much good if we have a lot more vectors
1878	 * than CPU's.  So let's be conservative and only ask for
1879	 * (roughly) the same number of vectors as there are CPU's.
1880	 * The default is to use pairs of vectors.
1881	 */
1882	v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
1883	v_budget = min_t(int, v_budget, num_online_cpus());
1884	v_budget += NON_Q_VECTORS;
1885
1886	/* A failure in MSI-X entry allocation isn't fatal, but it does
1887	 * mean we disable MSI-X capabilities of the adapter. */
1888	adapter->msix_entries = kcalloc(v_budget,
1889					sizeof(struct msix_entry), GFP_KERNEL);
1890	if (!adapter->msix_entries) {
1891		err = -ENOMEM;
1892		goto out;
1893	}
1894
1895	for (vector = 0; vector < v_budget; vector++)
1896		adapter->msix_entries[vector].entry = vector;
1897
1898	err = ixgbevf_acquire_msix_vectors(adapter, v_budget);
1899	if (err)
1900		goto out;
1901
1902	err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
1903	if (err)
1904		goto out;
1905
1906	err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
1907
1908out:
1909	return err;
1910}
1911
1912/**
1913 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
1914 * @adapter: board private structure to initialize
1915 *
1916 * We allocate one q_vector per queue interrupt.  If allocation fails we
1917 * return -ENOMEM.
1918 **/
1919static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
1920{
1921	int q_idx, num_q_vectors;
1922	struct ixgbevf_q_vector *q_vector;
1923
1924	num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1925
1926	for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1927		q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
1928		if (!q_vector)
1929			goto err_out;
1930		q_vector->adapter = adapter;
1931		q_vector->v_idx = q_idx;
1932		netif_napi_add(adapter->netdev, &q_vector->napi,
1933			       ixgbevf_poll, 64);
1934		adapter->q_vector[q_idx] = q_vector;
1935	}
1936
1937	return 0;
1938
1939err_out:
1940	while (q_idx) {
1941		q_idx--;
1942		q_vector = adapter->q_vector[q_idx];
1943		netif_napi_del(&q_vector->napi);
1944		kfree(q_vector);
1945		adapter->q_vector[q_idx] = NULL;
1946	}
1947	return -ENOMEM;
1948}
1949
1950/**
1951 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
1952 * @adapter: board private structure to initialize
1953 *
1954 * This function frees the memory allocated to the q_vectors.  In addition if
1955 * NAPI is enabled it will delete any references to the NAPI struct prior
1956 * to freeing the q_vector.
1957 **/
1958static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
1959{
1960	int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1961
1962	for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1963		struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
1964
1965		adapter->q_vector[q_idx] = NULL;
1966		netif_napi_del(&q_vector->napi);
1967		kfree(q_vector);
1968	}
1969}
1970
1971/**
1972 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
1973 * @adapter: board private structure
1974 *
1975 **/
1976static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
1977{
1978	pci_disable_msix(adapter->pdev);
1979	kfree(adapter->msix_entries);
1980	adapter->msix_entries = NULL;
1981}
1982
1983/**
1984 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
1985 * @adapter: board private structure to initialize
1986 *
1987 **/
1988static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
1989{
1990	int err;
1991
1992	/* Number of supported queues */
1993	ixgbevf_set_num_queues(adapter);
1994
1995	err = ixgbevf_set_interrupt_capability(adapter);
1996	if (err) {
1997		hw_dbg(&adapter->hw,
1998		       "Unable to setup interrupt capabilities\n");
1999		goto err_set_interrupt;
2000	}
2001
2002	err = ixgbevf_alloc_q_vectors(adapter);
2003	if (err) {
2004		hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
2005		       "vectors\n");
2006		goto err_alloc_q_vectors;
2007	}
2008
2009	err = ixgbevf_alloc_queues(adapter);
2010	if (err) {
2011		pr_err("Unable to allocate memory for queues\n");
2012		goto err_alloc_queues;
2013	}
2014
2015	hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
2016	       "Tx Queue count = %u\n",
2017	       (adapter->num_rx_queues > 1) ? "Enabled" :
2018	       "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2019
2020	set_bit(__IXGBEVF_DOWN, &adapter->state);
2021
2022	return 0;
2023err_alloc_queues:
2024	ixgbevf_free_q_vectors(adapter);
2025err_alloc_q_vectors:
2026	ixgbevf_reset_interrupt_capability(adapter);
2027err_set_interrupt:
2028	return err;
2029}
2030
2031/**
2032 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
2033 * @adapter: board private structure to clear interrupt scheme on
2034 *
2035 * We go through and clear interrupt specific resources and reset the structure
2036 * to pre-load conditions
2037 **/
2038static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
2039{
2040	adapter->num_tx_queues = 0;
2041	adapter->num_rx_queues = 0;
2042
2043	ixgbevf_free_q_vectors(adapter);
2044	ixgbevf_reset_interrupt_capability(adapter);
2045}
2046
2047/**
2048 * ixgbevf_sw_init - Initialize general software structures
2049 * (struct ixgbevf_adapter)
2050 * @adapter: board private structure to initialize
2051 *
2052 * ixgbevf_sw_init initializes the Adapter private data structure.
2053 * Fields are initialized based on PCI device information and
2054 * OS network device settings (MTU size).
2055 **/
2056static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
2057{
2058	struct ixgbe_hw *hw = &adapter->hw;
2059	struct pci_dev *pdev = adapter->pdev;
2060	struct net_device *netdev = adapter->netdev;
2061	int err;
2062
2063	/* PCI config space info */
2064
2065	hw->vendor_id = pdev->vendor;
2066	hw->device_id = pdev->device;
2067	hw->revision_id = pdev->revision;
2068	hw->subsystem_vendor_id = pdev->subsystem_vendor;
2069	hw->subsystem_device_id = pdev->subsystem_device;
2070
2071	hw->mbx.ops.init_params(hw);
2072
2073	/* assume legacy case in which PF would only give VF 2 queues */
2074	hw->mac.max_tx_queues = 2;
2075	hw->mac.max_rx_queues = 2;
2076
2077	err = hw->mac.ops.reset_hw(hw);
2078	if (err) {
2079		dev_info(&pdev->dev,
2080			 "PF still in reset state.  Is the PF interface up?\n");
2081	} else {
2082		err = hw->mac.ops.init_hw(hw);
2083		if (err) {
2084			pr_err("init_shared_code failed: %d\n", err);
2085			goto out;
2086		}
2087		err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2088		if (err)
2089			dev_info(&pdev->dev, "Error reading MAC address\n");
2090		else if (is_zero_ether_addr(adapter->hw.mac.addr))
2091			dev_info(&pdev->dev,
2092				 "MAC address not assigned by administrator.\n");
2093		memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
2094	}
2095
2096	if (!is_valid_ether_addr(netdev->dev_addr)) {
2097		dev_info(&pdev->dev, "Assigning random MAC address\n");
2098		eth_hw_addr_random(netdev);
2099		memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len);
2100	}
2101
2102	/* lock to protect mailbox accesses */
2103	spin_lock_init(&adapter->mbx_lock);
2104
2105	/* Enable dynamic interrupt throttling rates */
2106	adapter->rx_itr_setting = 1;
2107	adapter->tx_itr_setting = 1;
2108
2109	/* set default ring sizes */
2110	adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
2111	adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
2112
2113	set_bit(__IXGBEVF_DOWN, &adapter->state);
2114	return 0;
2115
2116out:
2117	return err;
2118}
2119
2120#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter)	\
2121	{							\
2122		u32 current_counter = IXGBE_READ_REG(hw, reg);	\
2123		if (current_counter < last_counter)		\
2124			counter += 0x100000000LL;		\
2125		last_counter = current_counter;			\
2126		counter &= 0xFFFFFFFF00000000LL;		\
2127		counter |= current_counter;			\
2128	}
2129
2130#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2131	{								 \
2132		u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb);	 \
2133		u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb);	 \
2134		u64 current_counter = (current_counter_msb << 32) |      \
2135			current_counter_lsb;                             \
2136		if (current_counter < last_counter)			 \
2137			counter += 0x1000000000LL;			 \
2138		last_counter = current_counter;				 \
2139		counter &= 0xFFFFFFF000000000LL;			 \
2140		counter |= current_counter;				 \
2141	}
2142/**
2143 * ixgbevf_update_stats - Update the board statistics counters.
2144 * @adapter: board private structure
2145 **/
2146void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2147{
2148	struct ixgbe_hw *hw = &adapter->hw;
2149	int i;
2150
2151	if (!adapter->link_up)
2152		return;
2153
2154	UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
2155				adapter->stats.vfgprc);
2156	UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
2157				adapter->stats.vfgptc);
2158	UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2159				adapter->stats.last_vfgorc,
2160				adapter->stats.vfgorc);
2161	UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2162				adapter->stats.last_vfgotc,
2163				adapter->stats.vfgotc);
2164	UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
2165				adapter->stats.vfmprc);
2166
2167	for (i = 0;  i  < adapter->num_rx_queues;  i++) {
2168		adapter->hw_csum_rx_error +=
2169			adapter->rx_ring[i].hw_csum_rx_error;
2170		adapter->hw_csum_rx_good +=
2171			adapter->rx_ring[i].hw_csum_rx_good;
2172		adapter->rx_ring[i].hw_csum_rx_error = 0;
2173		adapter->rx_ring[i].hw_csum_rx_good = 0;
2174	}
2175}
2176
2177/**
2178 * ixgbevf_watchdog - Timer Call-back
2179 * @data: pointer to adapter cast into an unsigned long
2180 **/
2181static void ixgbevf_watchdog(unsigned long data)
2182{
2183	struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
2184	struct ixgbe_hw *hw = &adapter->hw;
2185	u32 eics = 0;
2186	int i;
2187
2188	/*
2189	 * Do the watchdog outside of interrupt context due to the lovely
2190	 * delays that some of the newer hardware requires
2191	 */
2192
2193	if (test_bit(__IXGBEVF_DOWN, &adapter->state))
2194		goto watchdog_short_circuit;
2195
2196	/* get one bit for every active tx/rx interrupt vector */
2197	for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2198		struct ixgbevf_q_vector *qv = adapter->q_vector[i];
2199		if (qv->rx.ring || qv->tx.ring)
2200			eics |= 1 << i;
2201	}
2202
2203	IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
2204
2205watchdog_short_circuit:
2206	schedule_work(&adapter->watchdog_task);
2207}
2208
2209/**
2210 * ixgbevf_tx_timeout - Respond to a Tx Hang
2211 * @netdev: network interface device structure
2212 **/
2213static void ixgbevf_tx_timeout(struct net_device *netdev)
2214{
2215	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2216
2217	/* Do the reset outside of interrupt context */
2218	schedule_work(&adapter->reset_task);
2219}
2220
2221static void ixgbevf_reset_task(struct work_struct *work)
2222{
2223	struct ixgbevf_adapter *adapter;
2224	adapter = container_of(work, struct ixgbevf_adapter, reset_task);
2225
2226	/* If we're already down or resetting, just bail */
2227	if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2228	    test_bit(__IXGBEVF_RESETTING, &adapter->state))
2229		return;
2230
2231	adapter->tx_timeout_count++;
2232
2233	ixgbevf_reinit_locked(adapter);
2234}
2235
2236/**
2237 * ixgbevf_watchdog_task - worker thread to bring link up
2238 * @work: pointer to work_struct containing our data
2239 **/
2240static void ixgbevf_watchdog_task(struct work_struct *work)
2241{
2242	struct ixgbevf_adapter *adapter = container_of(work,
2243						       struct ixgbevf_adapter,
2244						       watchdog_task);
2245	struct net_device *netdev = adapter->netdev;
2246	struct ixgbe_hw *hw = &adapter->hw;
2247	u32 link_speed = adapter->link_speed;
2248	bool link_up = adapter->link_up;
2249	s32 need_reset;
2250
2251	adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
2252
2253	/*
2254	 * Always check the link on the watchdog because we have
2255	 * no LSC interrupt
2256	 */
2257	spin_lock_bh(&adapter->mbx_lock);
2258
2259	need_reset = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
2260
2261	spin_unlock_bh(&adapter->mbx_lock);
2262
2263	if (need_reset) {
2264		adapter->link_up = link_up;
2265		adapter->link_speed = link_speed;
2266		netif_carrier_off(netdev);
2267		netif_tx_stop_all_queues(netdev);
2268		schedule_work(&adapter->reset_task);
2269		goto pf_has_reset;
2270	}
2271	adapter->link_up = link_up;
2272	adapter->link_speed = link_speed;
2273
2274	if (link_up) {
2275		if (!netif_carrier_ok(netdev)) {
2276			char *link_speed_string;
2277			switch (link_speed) {
2278			case IXGBE_LINK_SPEED_10GB_FULL:
2279				link_speed_string = "10 Gbps";
2280				break;
2281			case IXGBE_LINK_SPEED_1GB_FULL:
2282				link_speed_string = "1 Gbps";
2283				break;
2284			case IXGBE_LINK_SPEED_100_FULL:
2285				link_speed_string = "100 Mbps";
2286				break;
2287			default:
2288				link_speed_string = "unknown speed";
2289				break;
2290			}
2291			dev_info(&adapter->pdev->dev,
2292				"NIC Link is Up, %s\n", link_speed_string);
2293			netif_carrier_on(netdev);
2294			netif_tx_wake_all_queues(netdev);
2295		}
2296	} else {
2297		adapter->link_up = false;
2298		adapter->link_speed = 0;
2299		if (netif_carrier_ok(netdev)) {
2300			dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
2301			netif_carrier_off(netdev);
2302			netif_tx_stop_all_queues(netdev);
2303		}
2304	}
2305
2306	ixgbevf_update_stats(adapter);
2307
2308pf_has_reset:
2309	/* Reset the timer */
2310	if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
2311		mod_timer(&adapter->watchdog_timer,
2312			  round_jiffies(jiffies + (2 * HZ)));
2313
2314	adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
2315}
2316
2317/**
2318 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2319 * @adapter: board private structure
2320 * @tx_ring: Tx descriptor ring for a specific queue
2321 *
2322 * Free all transmit software resources
2323 **/
2324void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
2325			       struct ixgbevf_ring *tx_ring)
2326{
2327	struct pci_dev *pdev = adapter->pdev;
2328
2329	ixgbevf_clean_tx_ring(adapter, tx_ring);
2330
2331	vfree(tx_ring->tx_buffer_info);
2332	tx_ring->tx_buffer_info = NULL;
2333
2334	dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
2335			  tx_ring->dma);
2336
2337	tx_ring->desc = NULL;
2338}
2339
2340/**
2341 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2342 * @adapter: board private structure
2343 *
2344 * Free all transmit software resources
2345 **/
2346static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2347{
2348	int i;
2349
2350	for (i = 0; i < adapter->num_tx_queues; i++)
2351		if (adapter->tx_ring[i].desc)
2352			ixgbevf_free_tx_resources(adapter,
2353						  &adapter->tx_ring[i]);
2354
2355}
2356
2357/**
2358 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2359 * @adapter: board private structure
2360 * @tx_ring:    tx descriptor ring (for a specific queue) to setup
2361 *
2362 * Return 0 on success, negative on failure
2363 **/
2364int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
2365			       struct ixgbevf_ring *tx_ring)
2366{
2367	struct pci_dev *pdev = adapter->pdev;
2368	int size;
2369
2370	size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2371	tx_ring->tx_buffer_info = vzalloc(size);
2372	if (!tx_ring->tx_buffer_info)
2373		goto err;
2374
2375	/* round up to nearest 4K */
2376	tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2377	tx_ring->size = ALIGN(tx_ring->size, 4096);
2378
2379	tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
2380					   &tx_ring->dma, GFP_KERNEL);
2381	if (!tx_ring->desc)
2382		goto err;
2383
2384	tx_ring->next_to_use = 0;
2385	tx_ring->next_to_clean = 0;
2386	return 0;
2387
2388err:
2389	vfree(tx_ring->tx_buffer_info);
2390	tx_ring->tx_buffer_info = NULL;
2391	hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
2392	       "descriptor ring\n");
2393	return -ENOMEM;
2394}
2395
2396/**
2397 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2398 * @adapter: board private structure
2399 *
2400 * If this function returns with an error, then it's possible one or
2401 * more of the rings is populated (while the rest are not).  It is the
2402 * callers duty to clean those orphaned rings.
2403 *
2404 * Return 0 on success, negative on failure
2405 **/
2406static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
2407{
2408	int i, err = 0;
2409
2410	for (i = 0; i < adapter->num_tx_queues; i++) {
2411		err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]);
2412		if (!err)
2413			continue;
2414		hw_dbg(&adapter->hw,
2415		       "Allocation for Tx Queue %u failed\n", i);
2416		break;
2417	}
2418
2419	return err;
2420}
2421
2422/**
2423 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
2424 * @adapter: board private structure
2425 * @rx_ring:    rx descriptor ring (for a specific queue) to setup
2426 *
2427 * Returns 0 on success, negative on failure
2428 **/
2429int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
2430			       struct ixgbevf_ring *rx_ring)
2431{
2432	struct pci_dev *pdev = adapter->pdev;
2433	int size;
2434
2435	size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
2436	rx_ring->rx_buffer_info = vzalloc(size);
2437	if (!rx_ring->rx_buffer_info)
2438		goto alloc_failed;
2439
2440	/* Round up to nearest 4K */
2441	rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2442	rx_ring->size = ALIGN(rx_ring->size, 4096);
2443
2444	rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
2445					   &rx_ring->dma, GFP_KERNEL);
2446
2447	if (!rx_ring->desc) {
2448		vfree(rx_ring->rx_buffer_info);
2449		rx_ring->rx_buffer_info = NULL;
2450		goto alloc_failed;
2451	}
2452
2453	rx_ring->next_to_clean = 0;
2454	rx_ring->next_to_use = 0;
2455
2456	return 0;
2457alloc_failed:
2458	return -ENOMEM;
2459}
2460
2461/**
2462 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
2463 * @adapter: board private structure
2464 *
2465 * If this function returns with an error, then it's possible one or
2466 * more of the rings is populated (while the rest are not).  It is the
2467 * callers duty to clean those orphaned rings.
2468 *
2469 * Return 0 on success, negative on failure
2470 **/
2471static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
2472{
2473	int i, err = 0;
2474
2475	for (i = 0; i < adapter->num_rx_queues; i++) {
2476		err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]);
2477		if (!err)
2478			continue;
2479		hw_dbg(&adapter->hw,
2480		       "Allocation for Rx Queue %u failed\n", i);
2481		break;
2482	}
2483	return err;
2484}
2485
2486/**
2487 * ixgbevf_free_rx_resources - Free Rx Resources
2488 * @adapter: board private structure
2489 * @rx_ring: ring to clean the resources from
2490 *
2491 * Free all receive software resources
2492 **/
2493void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
2494			       struct ixgbevf_ring *rx_ring)
2495{
2496	struct pci_dev *pdev = adapter->pdev;
2497
2498	ixgbevf_clean_rx_ring(adapter, rx_ring);
2499
2500	vfree(rx_ring->rx_buffer_info);
2501	rx_ring->rx_buffer_info = NULL;
2502
2503	dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2504			  rx_ring->dma);
2505
2506	rx_ring->desc = NULL;
2507}
2508
2509/**
2510 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
2511 * @adapter: board private structure
2512 *
2513 * Free all receive software resources
2514 **/
2515static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
2516{
2517	int i;
2518
2519	for (i = 0; i < adapter->num_rx_queues; i++)
2520		if (adapter->rx_ring[i].desc)
2521			ixgbevf_free_rx_resources(adapter,
2522						  &adapter->rx_ring[i]);
2523}
2524
2525static int ixgbevf_setup_queues(struct ixgbevf_adapter *adapter)
2526{
2527	struct ixgbe_hw *hw = &adapter->hw;
2528	struct ixgbevf_ring *rx_ring;
2529	unsigned int def_q = 0;
2530	unsigned int num_tcs = 0;
2531	unsigned int num_rx_queues = 1;
2532	int err, i;
2533
2534	spin_lock_bh(&adapter->mbx_lock);
2535
2536	/* fetch queue configuration from the PF */
2537	err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2538
2539	spin_unlock_bh(&adapter->mbx_lock);
2540
2541	if (err)
2542		return err;
2543
2544	if (num_tcs > 1) {
2545		/* update default Tx ring register index */
2546		adapter->tx_ring[0].reg_idx = def_q;
2547
2548		/* we need as many queues as traffic classes */
2549		num_rx_queues = num_tcs;
2550	}
2551
2552	/* nothing to do if we have the correct number of queues */
2553	if (adapter->num_rx_queues == num_rx_queues)
2554		return 0;
2555
2556	/* allocate new rings */
2557	rx_ring = kcalloc(num_rx_queues,
2558			  sizeof(struct ixgbevf_ring), GFP_KERNEL);
2559	if (!rx_ring)
2560		return -ENOMEM;
2561
2562	/* setup ring fields */
2563	for (i = 0; i < num_rx_queues; i++) {
2564		rx_ring[i].count = adapter->rx_ring_count;
2565		rx_ring[i].queue_index = i;
2566		rx_ring[i].reg_idx = i;
2567		rx_ring[i].dev = &adapter->pdev->dev;
2568		rx_ring[i].netdev = adapter->netdev;
2569	}
2570
2571	/* free the existing ring and queues */
2572	adapter->num_rx_queues = 0;
2573	kfree(adapter->rx_ring);
2574
2575	/* move new rings into position on the adapter struct */
2576	adapter->rx_ring = rx_ring;
2577	adapter->num_rx_queues = num_rx_queues;
2578
2579	return 0;
2580}
2581
2582/**
2583 * ixgbevf_open - Called when a network interface is made active
2584 * @netdev: network interface device structure
2585 *
2586 * Returns 0 on success, negative value on failure
2587 *
2588 * The open entry point is called when a network interface is made
2589 * active by the system (IFF_UP).  At this point all resources needed
2590 * for transmit and receive operations are allocated, the interrupt
2591 * handler is registered with the OS, the watchdog timer is started,
2592 * and the stack is notified that the interface is ready.
2593 **/
2594static int ixgbevf_open(struct net_device *netdev)
2595{
2596	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2597	struct ixgbe_hw *hw = &adapter->hw;
2598	int err;
2599
2600	/* A previous failure to open the device because of a lack of
2601	 * available MSIX vector resources may have reset the number
2602	 * of msix vectors variable to zero.  The only way to recover
2603	 * is to unload/reload the driver and hope that the system has
2604	 * been able to recover some MSIX vector resources.
2605	 */
2606	if (!adapter->num_msix_vectors)
2607		return -ENOMEM;
2608
2609	/* disallow open during test */
2610	if (test_bit(__IXGBEVF_TESTING, &adapter->state))
2611		return -EBUSY;
2612
2613	if (hw->adapter_stopped) {
2614		ixgbevf_reset(adapter);
2615		/* if adapter is still stopped then PF isn't up and
2616		 * the vf can't start. */
2617		if (hw->adapter_stopped) {
2618			err = IXGBE_ERR_MBX;
2619			pr_err("Unable to start - perhaps the PF Driver isn't "
2620			       "up yet\n");
2621			goto err_setup_reset;
2622		}
2623	}
2624
2625	ixgbevf_negotiate_api(adapter);
2626
2627	/* setup queue reg_idx and Rx queue count */
2628	err = ixgbevf_setup_queues(adapter);
2629	if (err)
2630		goto err_setup_queues;
2631
2632	/* allocate transmit descriptors */
2633	err = ixgbevf_setup_all_tx_resources(adapter);
2634	if (err)
2635		goto err_setup_tx;
2636
2637	/* allocate receive descriptors */
2638	err = ixgbevf_setup_all_rx_resources(adapter);
2639	if (err)
2640		goto err_setup_rx;
2641
2642	ixgbevf_configure(adapter);
2643
2644	/*
2645	 * Map the Tx/Rx rings to the vectors we were allotted.
2646	 * if request_irq will be called in this function map_rings
2647	 * must be called *before* up_complete
2648	 */
2649	ixgbevf_map_rings_to_vectors(adapter);
2650
2651	ixgbevf_up_complete(adapter);
2652
2653	/* clear any pending interrupts, may auto mask */
2654	IXGBE_READ_REG(hw, IXGBE_VTEICR);
2655	err = ixgbevf_request_irq(adapter);
2656	if (err)
2657		goto err_req_irq;
2658
2659	ixgbevf_irq_enable(adapter);
2660
2661	return 0;
2662
2663err_req_irq:
2664	ixgbevf_down(adapter);
2665err_setup_rx:
2666	ixgbevf_free_all_rx_resources(adapter);
2667err_setup_tx:
2668	ixgbevf_free_all_tx_resources(adapter);
2669err_setup_queues:
2670	ixgbevf_reset(adapter);
2671
2672err_setup_reset:
2673
2674	return err;
2675}
2676
2677/**
2678 * ixgbevf_close - Disables a network interface
2679 * @netdev: network interface device structure
2680 *
2681 * Returns 0, this is not allowed to fail
2682 *
2683 * The close entry point is called when an interface is de-activated
2684 * by the OS.  The hardware is still under the drivers control, but
2685 * needs to be disabled.  A global MAC reset is issued to stop the
2686 * hardware, and all transmit and receive resources are freed.
2687 **/
2688static int ixgbevf_close(struct net_device *netdev)
2689{
2690	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2691
2692	ixgbevf_down(adapter);
2693	ixgbevf_free_irq(adapter);
2694
2695	ixgbevf_free_all_tx_resources(adapter);
2696	ixgbevf_free_all_rx_resources(adapter);
2697
2698	return 0;
2699}
2700
2701static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
2702				u32 vlan_macip_lens, u32 type_tucmd,
2703				u32 mss_l4len_idx)
2704{
2705	struct ixgbe_adv_tx_context_desc *context_desc;
2706	u16 i = tx_ring->next_to_use;
2707
2708	context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
2709
2710	i++;
2711	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2712
2713	/* set bits to identify this as an advanced context descriptor */
2714	type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2715
2716	context_desc->vlan_macip_lens	= cpu_to_le32(vlan_macip_lens);
2717	context_desc->seqnum_seed	= 0;
2718	context_desc->type_tucmd_mlhl	= cpu_to_le32(type_tucmd);
2719	context_desc->mss_l4len_idx	= cpu_to_le32(mss_l4len_idx);
2720}
2721
2722static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
2723		       struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
2724{
2725	u32 vlan_macip_lens, type_tucmd;
2726	u32 mss_l4len_idx, l4len;
2727
2728	if (!skb_is_gso(skb))
2729		return 0;
2730
2731	if (skb_header_cloned(skb)) {
2732		int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2733		if (err)
2734			return err;
2735	}
2736
2737	/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2738	type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
2739
2740	if (skb->protocol == htons(ETH_P_IP)) {
2741		struct iphdr *iph = ip_hdr(skb);
2742		iph->tot_len = 0;
2743		iph->check = 0;
2744		tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2745							 iph->daddr, 0,
2746							 IPPROTO_TCP,
2747							 0);
2748		type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2749	} else if (skb_is_gso_v6(skb)) {
2750		ipv6_hdr(skb)->payload_len = 0;
2751		tcp_hdr(skb)->check =
2752		    ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2753				     &ipv6_hdr(skb)->daddr,
2754				     0, IPPROTO_TCP, 0);
2755	}
2756
2757	/* compute header lengths */
2758	l4len = tcp_hdrlen(skb);
2759	*hdr_len += l4len;
2760	*hdr_len = skb_transport_offset(skb) + l4len;
2761
2762	/* mss_l4len_id: use 1 as index for TSO */
2763	mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
2764	mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
2765	mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
2766
2767	/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
2768	vlan_macip_lens = skb_network_header_len(skb);
2769	vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2770	vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2771
2772	ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2773			    type_tucmd, mss_l4len_idx);
2774
2775	return 1;
2776}
2777
2778static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
2779			    struct sk_buff *skb, u32 tx_flags)
2780{
2781	u32 vlan_macip_lens = 0;
2782	u32 mss_l4len_idx = 0;
2783	u32 type_tucmd = 0;
2784
2785	if (skb->ip_summed == CHECKSUM_PARTIAL) {
2786		u8 l4_hdr = 0;
2787		switch (skb->protocol) {
2788		case __constant_htons(ETH_P_IP):
2789			vlan_macip_lens |= skb_network_header_len(skb);
2790			type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2791			l4_hdr = ip_hdr(skb)->protocol;
2792			break;
2793		case __constant_htons(ETH_P_IPV6):
2794			vlan_macip_lens |= skb_network_header_len(skb);
2795			l4_hdr = ipv6_hdr(skb)->nexthdr;
2796			break;
2797		default:
2798			if (unlikely(net_ratelimit())) {
2799				dev_warn(tx_ring->dev,
2800				 "partial checksum but proto=%x!\n",
2801				 skb->protocol);
2802			}
2803			break;
2804		}
2805
2806		switch (l4_hdr) {
2807		case IPPROTO_TCP:
2808			type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2809			mss_l4len_idx = tcp_hdrlen(skb) <<
2810					IXGBE_ADVTXD_L4LEN_SHIFT;
2811			break;
2812		case IPPROTO_SCTP:
2813			type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
2814			mss_l4len_idx = sizeof(struct sctphdr) <<
2815					IXGBE_ADVTXD_L4LEN_SHIFT;
2816			break;
2817		case IPPROTO_UDP:
2818			mss_l4len_idx = sizeof(struct udphdr) <<
2819					IXGBE_ADVTXD_L4LEN_SHIFT;
2820			break;
2821		default:
2822			if (unlikely(net_ratelimit())) {
2823				dev_warn(tx_ring->dev,
2824				 "partial checksum but l4 proto=%x!\n",
2825				 l4_hdr);
2826			}
2827			break;
2828		}
2829	}
2830
2831	/* vlan_macip_lens: MACLEN, VLAN tag */
2832	vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2833	vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2834
2835	ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2836			    type_tucmd, mss_l4len_idx);
2837
2838	return (skb->ip_summed == CHECKSUM_PARTIAL);
2839}
2840
2841static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
2842			  struct sk_buff *skb, u32 tx_flags)
2843{
2844	struct ixgbevf_tx_buffer *tx_buffer_info;
2845	unsigned int len;
2846	unsigned int total = skb->len;
2847	unsigned int offset = 0, size;
2848	int count = 0;
2849	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2850	unsigned int f;
2851	int i;
2852
2853	i = tx_ring->next_to_use;
2854
2855	len = min(skb_headlen(skb), total);
2856	while (len) {
2857		tx_buffer_info = &tx_ring->tx_buffer_info[i];
2858		size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2859
2860		tx_buffer_info->length = size;
2861		tx_buffer_info->mapped_as_page = false;
2862		tx_buffer_info->dma = dma_map_single(tx_ring->dev,
2863						     skb->data + offset,
2864						     size, DMA_TO_DEVICE);
2865		if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma))
2866			goto dma_error;
2867
2868		len -= size;
2869		total -= size;
2870		offset += size;
2871		count++;
2872		i++;
2873		if (i == tx_ring->count)
2874			i = 0;
2875	}
2876
2877	for (f = 0; f < nr_frags; f++) {
2878		const struct skb_frag_struct *frag;
2879
2880		frag = &skb_shinfo(skb)->frags[f];
2881		len = min((unsigned int)skb_frag_size(frag), total);
2882		offset = 0;
2883
2884		while (len) {
2885			tx_buffer_info = &tx_ring->tx_buffer_info[i];
2886			size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2887
2888			tx_buffer_info->length = size;
2889			tx_buffer_info->dma =
2890				skb_frag_dma_map(tx_ring->dev, frag,
2891						 offset, size, DMA_TO_DEVICE);
2892			if (dma_mapping_error(tx_ring->dev,
2893					      tx_buffer_info->dma))
2894				goto dma_error;
2895			tx_buffer_info->mapped_as_page = true;
2896
2897			len -= size;
2898			total -= size;
2899			offset += size;
2900			count++;
2901			i++;
2902			if (i == tx_ring->count)
2903				i = 0;
2904		}
2905		if (total == 0)
2906			break;
2907	}
2908
2909	if (i == 0)
2910		i = tx_ring->count - 1;
2911	else
2912		i = i - 1;
2913	tx_ring->tx_buffer_info[i].skb = skb;
2914
2915	return count;
2916
2917dma_error:
2918	dev_err(tx_ring->dev, "TX DMA map failed\n");
2919
2920	/* clear timestamp and dma mappings for failed tx_buffer_info map */
2921	tx_buffer_info->dma = 0;
2922	count--;
2923
2924	/* clear timestamp and dma mappings for remaining portion of packet */
2925	while (count >= 0) {
2926		count--;
2927		i--;
2928		if (i < 0)
2929			i += tx_ring->count;
2930		tx_buffer_info = &tx_ring->tx_buffer_info[i];
2931		ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
2932	}
2933
2934	return count;
2935}
2936
2937static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
2938			     int count, unsigned int first, u32 paylen,
2939			     u8 hdr_len)
2940{
2941	union ixgbe_adv_tx_desc *tx_desc = NULL;
2942	struct ixgbevf_tx_buffer *tx_buffer_info;
2943	u32 olinfo_status = 0, cmd_type_len = 0;
2944	unsigned int i;
2945
2946	u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
2947
2948	cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
2949
2950	cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
2951
2952	if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2953		cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
2954
2955	if (tx_flags & IXGBE_TX_FLAGS_CSUM)
2956		olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM;
2957
2958	if (tx_flags & IXGBE_TX_FLAGS_TSO) {
2959		cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
2960
2961		/* use index 1 context for tso */
2962		olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
2963		if (tx_flags & IXGBE_TX_FLAGS_IPV4)
2964			olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM;
2965	}
2966
2967	/*
2968	 * Check Context must be set if Tx switch is enabled, which it
2969	 * always is for case where virtual functions are running
2970	 */
2971	olinfo_status |= IXGBE_ADVTXD_CC;
2972
2973	olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
2974
2975	i = tx_ring->next_to_use;
2976	while (count--) {
2977		tx_buffer_info = &tx_ring->tx_buffer_info[i];
2978		tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
2979		tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
2980		tx_desc->read.cmd_type_len =
2981			cpu_to_le32(cmd_type_len | tx_buffer_info->length);
2982		tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2983		i++;
2984		if (i == tx_ring->count)
2985			i = 0;
2986	}
2987
2988	tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
2989
2990	tx_ring->tx_buffer_info[first].time_stamp = jiffies;
2991
2992	/* Force memory writes to complete before letting h/w
2993	 * know there are new descriptors to fetch.  (Only
2994	 * applicable for weak-ordered memory model archs,
2995	 * such as IA-64).
2996	 */
2997	wmb();
2998
2999	tx_ring->tx_buffer_info[first].next_to_watch = tx_desc;
3000	tx_ring->next_to_use = i;
3001}
3002
3003static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
3004{
3005	struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
3006
3007	netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
3008	/* Herbert's original patch had:
3009	 *  smp_mb__after_netif_stop_queue();
3010	 * but since that doesn't exist yet, just open code it. */
3011	smp_mb();
3012
3013	/* We need to check again in a case another CPU has just
3014	 * made room available. */
3015	if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
3016		return -EBUSY;
3017
3018	/* A reprieve! - use start_queue because it doesn't call schedule */
3019	netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
3020	++adapter->restart_queue;
3021	return 0;
3022}
3023
3024static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
3025{
3026	if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
3027		return 0;
3028	return __ixgbevf_maybe_stop_tx(tx_ring, size);
3029}
3030
3031static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3032{
3033	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3034	struct ixgbevf_ring *tx_ring;
3035	unsigned int first;
3036	unsigned int tx_flags = 0;
3037	u8 hdr_len = 0;
3038	int r_idx = 0, tso;
3039	u16 count = TXD_USE_COUNT(skb_headlen(skb));
3040#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3041	unsigned short f;
3042#endif
3043	u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
3044	if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
3045		dev_kfree_skb(skb);
3046		return NETDEV_TX_OK;
3047	}
3048
3049	tx_ring = &adapter->tx_ring[r_idx];
3050
3051	/*
3052	 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
3053	 *       + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
3054	 *       + 2 desc gap to keep tail from touching head,
3055	 *       + 1 desc for context descriptor,
3056	 * otherwise try next time
3057	 */
3058#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3059	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
3060		count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
3061#else
3062	count += skb_shinfo(skb)->nr_frags;
3063#endif
3064	if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
3065		adapter->tx_busy++;
3066		return NETDEV_TX_BUSY;
3067	}
3068
3069	if (vlan_tx_tag_present(skb)) {
3070		tx_flags |= vlan_tx_tag_get(skb);
3071		tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3072		tx_flags |= IXGBE_TX_FLAGS_VLAN;
3073	}
3074
3075	first = tx_ring->next_to_use;
3076
3077	if (skb->protocol == htons(ETH_P_IP))
3078		tx_flags |= IXGBE_TX_FLAGS_IPV4;
3079	tso = ixgbevf_tso(tx_ring, skb, tx_flags, &hdr_len);
3080	if (tso < 0) {
3081		dev_kfree_skb_any(skb);
3082		return NETDEV_TX_OK;
3083	}
3084
3085	if (tso)
3086		tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM;
3087	else if (ixgbevf_tx_csum(tx_ring, skb, tx_flags))
3088		tx_flags |= IXGBE_TX_FLAGS_CSUM;
3089
3090	ixgbevf_tx_queue(tx_ring, tx_flags,
3091			 ixgbevf_tx_map(tx_ring, skb, tx_flags),
3092			 first, skb->len, hdr_len);
3093
3094	writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail);
3095
3096	ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
3097
3098	return NETDEV_TX_OK;
3099}
3100
3101/**
3102 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3103 * @netdev: network interface device structure
3104 * @p: pointer to an address structure
3105 *
3106 * Returns 0 on success, negative on failure
3107 **/
3108static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3109{
3110	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3111	struct ixgbe_hw *hw = &adapter->hw;
3112	struct sockaddr *addr = p;
3113
3114	if (!is_valid_ether_addr(addr->sa_data))
3115		return -EADDRNOTAVAIL;
3116
3117	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3118	memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
3119
3120	spin_lock_bh(&adapter->mbx_lock);
3121
3122	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
3123
3124	spin_unlock_bh(&adapter->mbx_lock);
3125
3126	return 0;
3127}
3128
3129/**
3130 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3131 * @netdev: network interface device structure
3132 * @new_mtu: new value for maximum frame size
3133 *
3134 * Returns 0 on success, negative on failure
3135 **/
3136static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3137{
3138	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3139	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3140	int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
3141
3142	switch (adapter->hw.api_version) {
3143	case ixgbe_mbox_api_11:
3144		max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3145		break;
3146	default:
3147		if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
3148			max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3149		break;
3150	}
3151
3152	/* MTU < 68 is an error and causes problems on some kernels */
3153	if ((new_mtu < 68) || (max_frame > max_possible_frame))
3154		return -EINVAL;
3155
3156	hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
3157	       netdev->mtu, new_mtu);
3158	/* must set new MTU before calling down or up */
3159	netdev->mtu = new_mtu;
3160
3161	if (netif_running(netdev))
3162		ixgbevf_reinit_locked(adapter);
3163
3164	return 0;
3165}
3166
3167static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
3168{
3169	struct net_device *netdev = pci_get_drvdata(pdev);
3170	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3171#ifdef CONFIG_PM
3172	int retval = 0;
3173#endif
3174
3175	netif_device_detach(netdev);
3176
3177	if (netif_running(netdev)) {
3178		rtnl_lock();
3179		ixgbevf_down(adapter);
3180		ixgbevf_free_irq(adapter);
3181		ixgbevf_free_all_tx_resources(adapter);
3182		ixgbevf_free_all_rx_resources(adapter);
3183		rtnl_unlock();
3184	}
3185
3186	ixgbevf_clear_interrupt_scheme(adapter);
3187
3188#ifdef CONFIG_PM
3189	retval = pci_save_state(pdev);
3190	if (retval)
3191		return retval;
3192
3193#endif
3194	pci_disable_device(pdev);
3195
3196	return 0;
3197}
3198
3199#ifdef CONFIG_PM
3200static int ixgbevf_resume(struct pci_dev *pdev)
3201{
3202	struct ixgbevf_adapter *adapter = pci_get_drvdata(pdev);
3203	struct net_device *netdev = adapter->netdev;
3204	u32 err;
3205
3206	pci_set_power_state(pdev, PCI_D0);
3207	pci_restore_state(pdev);
3208	/*
3209	 * pci_restore_state clears dev->state_saved so call
3210	 * pci_save_state to restore it.
3211	 */
3212	pci_save_state(pdev);
3213
3214	err = pci_enable_device_mem(pdev);
3215	if (err) {
3216		dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
3217		return err;
3218	}
3219	pci_set_master(pdev);
3220
3221	rtnl_lock();
3222	err = ixgbevf_init_interrupt_scheme(adapter);
3223	rtnl_unlock();
3224	if (err) {
3225		dev_err(&pdev->dev, "Cannot initialize interrupts\n");
3226		return err;
3227	}
3228
3229	ixgbevf_reset(adapter);
3230
3231	if (netif_running(netdev)) {
3232		err = ixgbevf_open(netdev);
3233		if (err)
3234			return err;
3235	}
3236
3237	netif_device_attach(netdev);
3238
3239	return err;
3240}
3241
3242#endif /* CONFIG_PM */
3243static void ixgbevf_shutdown(struct pci_dev *pdev)
3244{
3245	ixgbevf_suspend(pdev, PMSG_SUSPEND);
3246}
3247
3248static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3249						struct rtnl_link_stats64 *stats)
3250{
3251	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3252	unsigned int start;
3253	u64 bytes, packets;
3254	const struct ixgbevf_ring *ring;
3255	int i;
3256
3257	ixgbevf_update_stats(adapter);
3258
3259	stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3260
3261	for (i = 0; i < adapter->num_rx_queues; i++) {
3262		ring = &adapter->rx_ring[i];
3263		do {
3264			start = u64_stats_fetch_begin_bh(&ring->syncp);
3265			bytes = ring->total_bytes;
3266			packets = ring->total_packets;
3267		} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
3268		stats->rx_bytes += bytes;
3269		stats->rx_packets += packets;
3270	}
3271
3272	for (i = 0; i < adapter->num_tx_queues; i++) {
3273		ring = &adapter->tx_ring[i];
3274		do {
3275			start = u64_stats_fetch_begin_bh(&ring->syncp);
3276			bytes = ring->total_bytes;
3277			packets = ring->total_packets;
3278		} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
3279		stats->tx_bytes += bytes;
3280		stats->tx_packets += packets;
3281	}
3282
3283	return stats;
3284}
3285
3286static const struct net_device_ops ixgbevf_netdev_ops = {
3287	.ndo_open		= ixgbevf_open,
3288	.ndo_stop		= ixgbevf_close,
3289	.ndo_start_xmit		= ixgbevf_xmit_frame,
3290	.ndo_set_rx_mode	= ixgbevf_set_rx_mode,
3291	.ndo_get_stats64	= ixgbevf_get_stats,
3292	.ndo_validate_addr	= eth_validate_addr,
3293	.ndo_set_mac_address	= ixgbevf_set_mac,
3294	.ndo_change_mtu		= ixgbevf_change_mtu,
3295	.ndo_tx_timeout		= ixgbevf_tx_timeout,
3296	.ndo_vlan_rx_add_vid	= ixgbevf_vlan_rx_add_vid,
3297	.ndo_vlan_rx_kill_vid	= ixgbevf_vlan_rx_kill_vid,
3298};
3299
3300static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3301{
3302	dev->netdev_ops = &ixgbevf_netdev_ops;
3303	ixgbevf_set_ethtool_ops(dev);
3304	dev->watchdog_timeo = 5 * HZ;
3305}
3306
3307/**
3308 * ixgbevf_probe - Device Initialization Routine
3309 * @pdev: PCI device information struct
3310 * @ent: entry in ixgbevf_pci_tbl
3311 *
3312 * Returns 0 on success, negative on failure
3313 *
3314 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3315 * The OS initialization, configuring of the adapter private structure,
3316 * and a hardware reset occur.
3317 **/
3318static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3319{
3320	struct net_device *netdev;
3321	struct ixgbevf_adapter *adapter = NULL;
3322	struct ixgbe_hw *hw = NULL;
3323	const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
3324	static int cards_found;
3325	int err, pci_using_dac;
3326
3327	err = pci_enable_device(pdev);
3328	if (err)
3329		return err;
3330
3331	if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3332	    !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
3333		pci_using_dac = 1;
3334	} else {
3335		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3336		if (err) {
3337			err = dma_set_coherent_mask(&pdev->dev,
3338						    DMA_BIT_MASK(32));
3339			if (err) {
3340				dev_err(&pdev->dev, "No usable DMA "
3341					"configuration, aborting\n");
3342				goto err_dma;
3343			}
3344		}
3345		pci_using_dac = 0;
3346	}
3347
3348	err = pci_request_regions(pdev, ixgbevf_driver_name);
3349	if (err) {
3350		dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3351		goto err_pci_reg;
3352	}
3353
3354	pci_set_master(pdev);
3355
3356	netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
3357				   MAX_TX_QUEUES);
3358	if (!netdev) {
3359		err = -ENOMEM;
3360		goto err_alloc_etherdev;
3361	}
3362
3363	SET_NETDEV_DEV(netdev, &pdev->dev);
3364
3365	pci_set_drvdata(pdev, netdev);
3366	adapter = netdev_priv(netdev);
3367
3368	adapter->netdev = netdev;
3369	adapter->pdev = pdev;
3370	hw = &adapter->hw;
3371	hw->back = adapter;
3372	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3373
3374	/*
3375	 * call save state here in standalone driver because it relies on
3376	 * adapter struct to exist, and needs to call netdev_priv
3377	 */
3378	pci_save_state(pdev);
3379
3380	hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3381			      pci_resource_len(pdev, 0));
3382	if (!hw->hw_addr) {
3383		err = -EIO;
3384		goto err_ioremap;
3385	}
3386
3387	ixgbevf_assign_netdev_ops(netdev);
3388
3389	adapter->bd_number = cards_found;
3390
3391	/* Setup hw api */
3392	memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3393	hw->mac.type  = ii->mac;
3394
3395	memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
3396	       sizeof(struct ixgbe_mbx_operations));
3397
3398	/* setup the private structure */
3399	err = ixgbevf_sw_init(adapter);
3400	if (err)
3401		goto err_sw_init;
3402
3403	/* The HW MAC address was set and/or determined in sw_init */
3404	if (!is_valid_ether_addr(netdev->dev_addr)) {
3405		pr_err("invalid MAC address\n");
3406		err = -EIO;
3407		goto err_sw_init;
3408	}
3409
3410	netdev->hw_features = NETIF_F_SG |
3411			   NETIF_F_IP_CSUM |
3412			   NETIF_F_IPV6_CSUM |
3413			   NETIF_F_TSO |
3414			   NETIF_F_TSO6 |
3415			   NETIF_F_RXCSUM;
3416
3417	netdev->features = netdev->hw_features |
3418			   NETIF_F_HW_VLAN_CTAG_TX |
3419			   NETIF_F_HW_VLAN_CTAG_RX |
3420			   NETIF_F_HW_VLAN_CTAG_FILTER;
3421
3422	netdev->vlan_features |= NETIF_F_TSO;
3423	netdev->vlan_features |= NETIF_F_TSO6;
3424	netdev->vlan_features |= NETIF_F_IP_CSUM;
3425	netdev->vlan_features |= NETIF_F_IPV6_CSUM;
3426	netdev->vlan_features |= NETIF_F_SG;
3427
3428	if (pci_using_dac)
3429		netdev->features |= NETIF_F_HIGHDMA;
3430
3431	netdev->priv_flags |= IFF_UNICAST_FLT;
3432
3433	init_timer(&adapter->watchdog_timer);
3434	adapter->watchdog_timer.function = ixgbevf_watchdog;
3435	adapter->watchdog_timer.data = (unsigned long)adapter;
3436
3437	INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
3438	INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
3439
3440	err = ixgbevf_init_interrupt_scheme(adapter);
3441	if (err)
3442		goto err_sw_init;
3443
3444	strcpy(netdev->name, "eth%d");
3445
3446	err = register_netdev(netdev);
3447	if (err)
3448		goto err_register;
3449
3450	netif_carrier_off(netdev);
3451
3452	ixgbevf_init_last_counter_stats(adapter);
3453
3454	/* print the MAC address */
3455	hw_dbg(hw, "%pM\n", netdev->dev_addr);
3456
3457	hw_dbg(hw, "MAC: %d\n", hw->mac.type);
3458
3459	hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
3460	cards_found++;
3461	return 0;
3462
3463err_register:
3464	ixgbevf_clear_interrupt_scheme(adapter);
3465err_sw_init:
3466	ixgbevf_reset_interrupt_capability(adapter);
3467	iounmap(hw->hw_addr);
3468err_ioremap:
3469	free_netdev(netdev);
3470err_alloc_etherdev:
3471	pci_release_regions(pdev);
3472err_pci_reg:
3473err_dma:
3474	pci_disable_device(pdev);
3475	return err;
3476}
3477
3478/**
3479 * ixgbevf_remove - Device Removal Routine
3480 * @pdev: PCI device information struct
3481 *
3482 * ixgbevf_remove is called by the PCI subsystem to alert the driver
3483 * that it should release a PCI device.  The could be caused by a
3484 * Hot-Plug event, or because the driver is going to be removed from
3485 * memory.
3486 **/
3487static void ixgbevf_remove(struct pci_dev *pdev)
3488{
3489	struct net_device *netdev = pci_get_drvdata(pdev);
3490	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3491
3492	set_bit(__IXGBEVF_DOWN, &adapter->state);
3493
3494	del_timer_sync(&adapter->watchdog_timer);
3495
3496	cancel_work_sync(&adapter->reset_task);
3497	cancel_work_sync(&adapter->watchdog_task);
3498
3499	if (netdev->reg_state == NETREG_REGISTERED)
3500		unregister_netdev(netdev);
3501
3502	ixgbevf_clear_interrupt_scheme(adapter);
3503	ixgbevf_reset_interrupt_capability(adapter);
3504
3505	iounmap(adapter->hw.hw_addr);
3506	pci_release_regions(pdev);
3507
3508	hw_dbg(&adapter->hw, "Remove complete\n");
3509
3510	kfree(adapter->tx_ring);
3511	kfree(adapter->rx_ring);
3512
3513	free_netdev(netdev);
3514
3515	pci_disable_device(pdev);
3516}
3517
3518/**
3519 * ixgbevf_io_error_detected - called when PCI error is detected
3520 * @pdev: Pointer to PCI device
3521 * @state: The current pci connection state
3522 *
3523 * This function is called after a PCI bus error affecting
3524 * this device has been detected.
3525 */
3526static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
3527						  pci_channel_state_t state)
3528{
3529	struct net_device *netdev = pci_get_drvdata(pdev);
3530	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3531
3532	netif_device_detach(netdev);
3533
3534	if (state == pci_channel_io_perm_failure)
3535		return PCI_ERS_RESULT_DISCONNECT;
3536
3537	if (netif_running(netdev))
3538		ixgbevf_down(adapter);
3539
3540	pci_disable_device(pdev);
3541
3542	/* Request a slot slot reset. */
3543	return PCI_ERS_RESULT_NEED_RESET;
3544}
3545
3546/**
3547 * ixgbevf_io_slot_reset - called after the pci bus has been reset.
3548 * @pdev: Pointer to PCI device
3549 *
3550 * Restart the card from scratch, as if from a cold-boot. Implementation
3551 * resembles the first-half of the ixgbevf_resume routine.
3552 */
3553static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
3554{
3555	struct net_device *netdev = pci_get_drvdata(pdev);
3556	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3557
3558	if (pci_enable_device_mem(pdev)) {
3559		dev_err(&pdev->dev,
3560			"Cannot re-enable PCI device after reset.\n");
3561		return PCI_ERS_RESULT_DISCONNECT;
3562	}
3563
3564	pci_set_master(pdev);
3565
3566	ixgbevf_reset(adapter);
3567
3568	return PCI_ERS_RESULT_RECOVERED;
3569}
3570
3571/**
3572 * ixgbevf_io_resume - called when traffic can start flowing again.
3573 * @pdev: Pointer to PCI device
3574 *
3575 * This callback is called when the error recovery driver tells us that
3576 * its OK to resume normal operation. Implementation resembles the
3577 * second-half of the ixgbevf_resume routine.
3578 */
3579static void ixgbevf_io_resume(struct pci_dev *pdev)
3580{
3581	struct net_device *netdev = pci_get_drvdata(pdev);
3582	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3583
3584	if (netif_running(netdev))
3585		ixgbevf_up(adapter);
3586
3587	netif_device_attach(netdev);
3588}
3589
3590/* PCI Error Recovery (ERS) */
3591static const struct pci_error_handlers ixgbevf_err_handler = {
3592	.error_detected = ixgbevf_io_error_detected,
3593	.slot_reset = ixgbevf_io_slot_reset,
3594	.resume = ixgbevf_io_resume,
3595};
3596
3597static struct pci_driver ixgbevf_driver = {
3598	.name     = ixgbevf_driver_name,
3599	.id_table = ixgbevf_pci_tbl,
3600	.probe    = ixgbevf_probe,
3601	.remove   = ixgbevf_remove,
3602#ifdef CONFIG_PM
3603	/* Power Management Hooks */
3604	.suspend  = ixgbevf_suspend,
3605	.resume   = ixgbevf_resume,
3606#endif
3607	.shutdown = ixgbevf_shutdown,
3608	.err_handler = &ixgbevf_err_handler
3609};
3610
3611/**
3612 * ixgbevf_init_module - Driver Registration Routine
3613 *
3614 * ixgbevf_init_module is the first routine called when the driver is
3615 * loaded. All it does is register with the PCI subsystem.
3616 **/
3617static int __init ixgbevf_init_module(void)
3618{
3619	int ret;
3620	pr_info("%s - version %s\n", ixgbevf_driver_string,
3621		ixgbevf_driver_version);
3622
3623	pr_info("%s\n", ixgbevf_copyright);
3624
3625	ret = pci_register_driver(&ixgbevf_driver);
3626	return ret;
3627}
3628
3629module_init(ixgbevf_init_module);
3630
3631/**
3632 * ixgbevf_exit_module - Driver Exit Cleanup Routine
3633 *
3634 * ixgbevf_exit_module is called just before the driver is removed
3635 * from memory.
3636 **/
3637static void __exit ixgbevf_exit_module(void)
3638{
3639	pci_unregister_driver(&ixgbevf_driver);
3640}
3641
3642#ifdef DEBUG
3643/**
3644 * ixgbevf_get_hw_dev_name - return device name string
3645 * used by hardware layer to print debugging information
3646 **/
3647char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
3648{
3649	struct ixgbevf_adapter *adapter = hw->back;
3650	return adapter->netdev->name;
3651}
3652
3653#endif
3654module_exit(ixgbevf_exit_module);
3655
3656/* ixgbevf_main.c */
3657