qlcnic_io.c revision 4a99ab56cea66f9f67b9d07ace5cd40a336c8e6f
1/*
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2013 QLogic Corporation
4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
6 */
7
8#include <linux/netdevice.h>
9#include <linux/if_vlan.h>
10#include <net/ip.h>
11#include <linux/ipv6.h>
12#include <net/checksum.h>
13
14#include "qlcnic.h"
15
16#define TX_ETHER_PKT	0x01
17#define TX_TCP_PKT	0x02
18#define TX_UDP_PKT	0x03
19#define TX_IP_PKT	0x04
20#define TX_TCP_LSO	0x05
21#define TX_TCP_LSO6	0x06
22#define TX_TCPV6_PKT	0x0b
23#define TX_UDPV6_PKT	0x0c
24#define FLAGS_VLAN_TAGGED	0x10
25#define FLAGS_VLAN_OOB		0x40
26
27#define qlcnic_set_tx_vlan_tci(cmd_desc, v)	\
28	(cmd_desc)->vlan_TCI = cpu_to_le16(v);
29#define qlcnic_set_cmd_desc_port(cmd_desc, var)	\
30	((cmd_desc)->port_ctxid |= ((var) & 0x0F))
31#define qlcnic_set_cmd_desc_ctxid(cmd_desc, var)	\
32	((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
33
34#define qlcnic_set_tx_port(_desc, _port) \
35	((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0))
36
37#define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \
38	((_desc)->flags_opcode |= \
39	cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)))
40
41#define qlcnic_set_tx_frags_len(_desc, _frags, _len) \
42	((_desc)->nfrags__length = \
43	cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8)))
44
45/* owner bits of status_desc */
46#define STATUS_OWNER_HOST	(0x1ULL << 56)
47#define STATUS_OWNER_PHANTOM	(0x2ULL << 56)
48
49/* Status descriptor:
50   0-3 port, 4-7 status, 8-11 type, 12-27 total_length
51   28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
52   53-55 desc_cnt, 56-57 owner, 58-63 opcode
53 */
54#define qlcnic_get_sts_port(sts_data)	\
55	((sts_data) & 0x0F)
56#define qlcnic_get_sts_status(sts_data)	\
57	(((sts_data) >> 4) & 0x0F)
58#define qlcnic_get_sts_type(sts_data)	\
59	(((sts_data) >> 8) & 0x0F)
60#define qlcnic_get_sts_totallength(sts_data)	\
61	(((sts_data) >> 12) & 0xFFFF)
62#define qlcnic_get_sts_refhandle(sts_data)	\
63	(((sts_data) >> 28) & 0xFFFF)
64#define qlcnic_get_sts_prot(sts_data)	\
65	(((sts_data) >> 44) & 0x0F)
66#define qlcnic_get_sts_pkt_offset(sts_data)	\
67	(((sts_data) >> 48) & 0x1F)
68#define qlcnic_get_sts_desc_cnt(sts_data)	\
69	(((sts_data) >> 53) & 0x7)
70#define qlcnic_get_sts_opcode(sts_data)	\
71	(((sts_data) >> 58) & 0x03F)
72
73#define qlcnic_get_lro_sts_refhandle(sts_data) 	\
74	((sts_data) & 0x07FFF)
75#define qlcnic_get_lro_sts_length(sts_data)	\
76	(((sts_data) >> 16) & 0x0FFFF)
77#define qlcnic_get_lro_sts_l2_hdr_offset(sts_data)	\
78	(((sts_data) >> 32) & 0x0FF)
79#define qlcnic_get_lro_sts_l4_hdr_offset(sts_data)	\
80	(((sts_data) >> 40) & 0x0FF)
81#define qlcnic_get_lro_sts_timestamp(sts_data)	\
82	(((sts_data) >> 48) & 0x1)
83#define qlcnic_get_lro_sts_type(sts_data)	\
84	(((sts_data) >> 49) & 0x7)
85#define qlcnic_get_lro_sts_push_flag(sts_data)		\
86	(((sts_data) >> 52) & 0x1)
87#define qlcnic_get_lro_sts_seq_number(sts_data)		\
88	((sts_data) & 0x0FFFFFFFF)
89#define qlcnic_get_lro_sts_mss(sts_data1)		\
90	((sts_data1 >> 32) & 0x0FFFF)
91
92#define qlcnic_83xx_get_lro_sts_mss(sts) ((sts) & 0xffff)
93
94/* opcode field in status_desc */
95#define QLCNIC_SYN_OFFLOAD	0x03
96#define QLCNIC_RXPKT_DESC  	0x04
97#define QLCNIC_OLD_RXPKT_DESC	0x3f
98#define QLCNIC_RESPONSE_DESC	0x05
99#define QLCNIC_LRO_DESC  	0x12
100
101#define QLCNIC_TX_POLL_BUDGET		128
102#define QLCNIC_TCP_HDR_SIZE		20
103#define QLCNIC_TCP_TS_OPTION_SIZE	12
104#define QLCNIC_FETCH_RING_ID(handle)	((handle) >> 63)
105#define QLCNIC_DESC_OWNER_FW		cpu_to_le64(STATUS_OWNER_PHANTOM)
106
107#define QLCNIC_TCP_TS_HDR_SIZE (QLCNIC_TCP_HDR_SIZE + QLCNIC_TCP_TS_OPTION_SIZE)
108
109/* for status field in status_desc */
110#define STATUS_CKSUM_LOOP	0
111#define STATUS_CKSUM_OK		2
112
113#define qlcnic_83xx_pktln(sts)		((sts >> 32) & 0x3FFF)
114#define qlcnic_83xx_hndl(sts)		((sts >> 48) & 0x7FFF)
115#define qlcnic_83xx_csum_status(sts)	((sts >> 39) & 7)
116#define qlcnic_83xx_opcode(sts)	((sts >> 42) & 0xF)
117#define qlcnic_83xx_vlan_tag(sts)	(((sts) >> 48) & 0xFFFF)
118#define qlcnic_83xx_lro_pktln(sts)	(((sts) >> 32) & 0x3FFF)
119#define qlcnic_83xx_l2_hdr_off(sts)	(((sts) >> 16) & 0xFF)
120#define qlcnic_83xx_l4_hdr_off(sts)	(((sts) >> 24) & 0xFF)
121#define qlcnic_83xx_pkt_cnt(sts)	(((sts) >> 16) & 0x7)
122#define qlcnic_83xx_is_tstamp(sts)	(((sts) >> 40) & 1)
123#define qlcnic_83xx_is_psh_bit(sts)	(((sts) >> 41) & 1)
124#define qlcnic_83xx_is_ip_align(sts)	(((sts) >> 46) & 1)
125#define qlcnic_83xx_has_vlan_tag(sts)	(((sts) >> 47) & 1)
126
127struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *,
128				     struct qlcnic_host_rds_ring *, u16, u16);
129
130inline void qlcnic_83xx_enable_tx_intr(struct qlcnic_adapter *adapter,
131				       struct qlcnic_host_tx_ring *tx_ring)
132{
133	writel(0, tx_ring->crb_intr_mask);
134}
135
136inline void qlcnic_83xx_disable_tx_intr(struct qlcnic_adapter *adapter,
137					struct qlcnic_host_tx_ring *tx_ring)
138{
139	writel(1, tx_ring->crb_intr_mask);
140}
141
142static inline u8 qlcnic_mac_hash(u64 mac)
143{
144	return (u8)((mac & 0xff) ^ ((mac >> 40) & 0xff));
145}
146
147static inline u32 qlcnic_get_ref_handle(struct qlcnic_adapter *adapter,
148					u16 handle, u8 ring_id)
149{
150	unsigned short device = adapter->pdev->device;
151
152	if ((device == PCI_DEVICE_ID_QLOGIC_QLE834X) ||
153	    (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X))
154		return handle | (ring_id << 15);
155	else
156		return handle;
157}
158
159static inline int qlcnic_82xx_is_lb_pkt(u64 sts_data)
160{
161	return (qlcnic_get_sts_status(sts_data) == STATUS_CKSUM_LOOP) ? 1 : 0;
162}
163
164void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
165			  int loopback_pkt, u16 vlan_id)
166{
167	struct ethhdr *phdr = (struct ethhdr *)(skb->data);
168	struct qlcnic_filter *fil, *tmp_fil;
169	struct hlist_node *n;
170	struct hlist_head *head;
171	unsigned long time;
172	u64 src_addr = 0;
173	u8 hindex, found = 0, op;
174	int ret;
175
176	memcpy(&src_addr, phdr->h_source, ETH_ALEN);
177
178	if (loopback_pkt) {
179		if (adapter->rx_fhash.fnum >= adapter->rx_fhash.fmax)
180			return;
181
182		hindex = qlcnic_mac_hash(src_addr) &
183			 (adapter->fhash.fbucket_size - 1);
184		head = &(adapter->rx_fhash.fhead[hindex]);
185
186		hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
187			if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
188			    tmp_fil->vlan_id == vlan_id) {
189				time = tmp_fil->ftime;
190				if (jiffies > (QLCNIC_READD_AGE * HZ + time))
191					tmp_fil->ftime = jiffies;
192				return;
193			}
194		}
195
196		fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
197		if (!fil)
198			return;
199
200		fil->ftime = jiffies;
201		memcpy(fil->faddr, &src_addr, ETH_ALEN);
202		fil->vlan_id = vlan_id;
203		spin_lock(&adapter->rx_mac_learn_lock);
204		hlist_add_head(&(fil->fnode), head);
205		adapter->rx_fhash.fnum++;
206		spin_unlock(&adapter->rx_mac_learn_lock);
207	} else {
208		hindex = qlcnic_mac_hash(src_addr) &
209			 (adapter->fhash.fbucket_size - 1);
210		head = &(adapter->rx_fhash.fhead[hindex]);
211		spin_lock(&adapter->rx_mac_learn_lock);
212		hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
213			if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
214			    tmp_fil->vlan_id == vlan_id) {
215				found = 1;
216				break;
217			}
218		}
219
220		if (!found) {
221			spin_unlock(&adapter->rx_mac_learn_lock);
222			return;
223		}
224
225		op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
226		ret = qlcnic_sre_macaddr_change(adapter, (u8 *)&src_addr,
227						vlan_id, op);
228		if (!ret) {
229			op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL;
230			ret = qlcnic_sre_macaddr_change(adapter,
231							(u8 *)&src_addr,
232							vlan_id, op);
233			if (!ret) {
234				hlist_del(&(tmp_fil->fnode));
235				adapter->rx_fhash.fnum--;
236			}
237		}
238		spin_unlock(&adapter->rx_mac_learn_lock);
239	}
240}
241
242void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
243			       u16 vlan_id)
244{
245	struct cmd_desc_type0 *hwdesc;
246	struct qlcnic_nic_req *req;
247	struct qlcnic_mac_req *mac_req;
248	struct qlcnic_vlan_req *vlan_req;
249	struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
250	u32 producer;
251	u64 word;
252
253	producer = tx_ring->producer;
254	hwdesc = &tx_ring->desc_head[tx_ring->producer];
255
256	req = (struct qlcnic_nic_req *)hwdesc;
257	memset(req, 0, sizeof(struct qlcnic_nic_req));
258	req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
259
260	word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16);
261	req->req_hdr = cpu_to_le64(word);
262
263	mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
264	mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
265	memcpy(mac_req->mac_addr, uaddr, ETH_ALEN);
266
267	vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
268	vlan_req->vlan_id = cpu_to_le16(vlan_id);
269
270	tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
271	smp_mb();
272}
273
274static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
275			       struct cmd_desc_type0 *first_desc,
276			       struct sk_buff *skb)
277{
278	struct qlcnic_filter *fil, *tmp_fil;
279	struct hlist_node *n;
280	struct hlist_head *head;
281	struct net_device *netdev = adapter->netdev;
282	struct ethhdr *phdr = (struct ethhdr *)(skb->data);
283	u64 src_addr = 0;
284	u16 vlan_id = 0;
285	u8 hindex;
286
287	if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
288		return;
289
290	if (adapter->fhash.fnum >= adapter->fhash.fmax) {
291		adapter->stats.mac_filter_limit_overrun++;
292		netdev_info(netdev, "Can not add more than %d mac addresses\n",
293			    adapter->fhash.fmax);
294		return;
295	}
296
297	memcpy(&src_addr, phdr->h_source, ETH_ALEN);
298	hindex = qlcnic_mac_hash(src_addr) & (adapter->fhash.fbucket_size - 1);
299	head = &(adapter->fhash.fhead[hindex]);
300
301	hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
302		if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
303		    tmp_fil->vlan_id == vlan_id) {
304			if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
305				qlcnic_change_filter(adapter, &src_addr,
306						     vlan_id);
307			tmp_fil->ftime = jiffies;
308			return;
309		}
310	}
311
312	fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
313	if (!fil)
314		return;
315
316	qlcnic_change_filter(adapter, &src_addr, vlan_id);
317	fil->ftime = jiffies;
318	fil->vlan_id = vlan_id;
319	memcpy(fil->faddr, &src_addr, ETH_ALEN);
320	spin_lock(&adapter->mac_learn_lock);
321	hlist_add_head(&(fil->fnode), head);
322	adapter->fhash.fnum++;
323	spin_unlock(&adapter->mac_learn_lock);
324}
325
326static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
327			 struct cmd_desc_type0 *first_desc, struct sk_buff *skb)
328{
329	u8 l4proto, opcode = 0, hdr_len = 0;
330	u16 flags = 0, vlan_tci = 0;
331	int copied, offset, copy_len, size;
332	struct cmd_desc_type0 *hwdesc;
333	struct vlan_ethhdr *vh;
334	struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
335	u16 protocol = ntohs(skb->protocol);
336	u32 producer = tx_ring->producer;
337
338	if (protocol == ETH_P_8021Q) {
339		vh = (struct vlan_ethhdr *)skb->data;
340		flags = FLAGS_VLAN_TAGGED;
341		vlan_tci = ntohs(vh->h_vlan_TCI);
342		protocol = ntohs(vh->h_vlan_encapsulated_proto);
343	} else if (vlan_tx_tag_present(skb)) {
344		flags = FLAGS_VLAN_OOB;
345		vlan_tci = vlan_tx_tag_get(skb);
346	}
347	if (unlikely(adapter->tx_pvid)) {
348		if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
349			return -EIO;
350		if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
351			goto set_flags;
352
353		flags = FLAGS_VLAN_OOB;
354		vlan_tci = adapter->tx_pvid;
355	}
356set_flags:
357	qlcnic_set_tx_vlan_tci(first_desc, vlan_tci);
358	qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
359
360	if (*(skb->data) & BIT_0) {
361		flags |= BIT_0;
362		memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
363	}
364	opcode = TX_ETHER_PKT;
365	if (skb_is_gso(skb)) {
366		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
367		first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
368		first_desc->total_hdr_length = hdr_len;
369		opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO;
370
371		/* For LSO, we need to copy the MAC/IP/TCP headers into
372		* the descriptor ring */
373		copied = 0;
374		offset = 2;
375
376		if (flags & FLAGS_VLAN_OOB) {
377			first_desc->total_hdr_length += VLAN_HLEN;
378			first_desc->tcp_hdr_offset = VLAN_HLEN;
379			first_desc->ip_hdr_offset = VLAN_HLEN;
380
381			/* Only in case of TSO on vlan device */
382			flags |= FLAGS_VLAN_TAGGED;
383
384			/* Create a TSO vlan header template for firmware */
385			hwdesc = &tx_ring->desc_head[producer];
386			tx_ring->cmd_buf_arr[producer].skb = NULL;
387
388			copy_len = min((int)sizeof(struct cmd_desc_type0) -
389				       offset, hdr_len + VLAN_HLEN);
390
391			vh = (struct vlan_ethhdr *)((char *) hwdesc + 2);
392			skb_copy_from_linear_data(skb, vh, 12);
393			vh->h_vlan_proto = htons(ETH_P_8021Q);
394			vh->h_vlan_TCI = htons(vlan_tci);
395
396			skb_copy_from_linear_data_offset(skb, 12,
397							 (char *)vh + 16,
398							 copy_len - 16);
399			copied = copy_len - VLAN_HLEN;
400			offset = 0;
401			producer = get_next_index(producer, tx_ring->num_desc);
402		}
403
404		while (copied < hdr_len) {
405			size = (int)sizeof(struct cmd_desc_type0) - offset;
406			copy_len = min(size, (hdr_len - copied));
407			hwdesc = &tx_ring->desc_head[producer];
408			tx_ring->cmd_buf_arr[producer].skb = NULL;
409			skb_copy_from_linear_data_offset(skb, copied,
410							 (char *)hwdesc +
411							 offset, copy_len);
412			copied += copy_len;
413			offset = 0;
414			producer = get_next_index(producer, tx_ring->num_desc);
415		}
416
417		tx_ring->producer = producer;
418		smp_mb();
419		adapter->stats.lso_frames++;
420
421	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
422		if (protocol == ETH_P_IP) {
423			l4proto = ip_hdr(skb)->protocol;
424
425			if (l4proto == IPPROTO_TCP)
426				opcode = TX_TCP_PKT;
427			else if (l4proto == IPPROTO_UDP)
428				opcode = TX_UDP_PKT;
429		} else if (protocol == ETH_P_IPV6) {
430			l4proto = ipv6_hdr(skb)->nexthdr;
431
432			if (l4proto == IPPROTO_TCP)
433				opcode = TX_TCPV6_PKT;
434			else if (l4proto == IPPROTO_UDP)
435				opcode = TX_UDPV6_PKT;
436		}
437	}
438	first_desc->tcp_hdr_offset += skb_transport_offset(skb);
439	first_desc->ip_hdr_offset += skb_network_offset(skb);
440	qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
441
442	return 0;
443}
444
445static int qlcnic_map_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
446			     struct qlcnic_cmd_buffer *pbuf)
447{
448	struct qlcnic_skb_frag *nf;
449	struct skb_frag_struct *frag;
450	int i, nr_frags;
451	dma_addr_t map;
452
453	nr_frags = skb_shinfo(skb)->nr_frags;
454	nf = &pbuf->frag_array[0];
455
456	map = pci_map_single(pdev, skb->data, skb_headlen(skb),
457			     PCI_DMA_TODEVICE);
458	if (pci_dma_mapping_error(pdev, map))
459		goto out_err;
460
461	nf->dma = map;
462	nf->length = skb_headlen(skb);
463
464	for (i = 0; i < nr_frags; i++) {
465		frag = &skb_shinfo(skb)->frags[i];
466		nf = &pbuf->frag_array[i+1];
467		map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
468				       DMA_TO_DEVICE);
469		if (dma_mapping_error(&pdev->dev, map))
470			goto unwind;
471
472		nf->dma = map;
473		nf->length = skb_frag_size(frag);
474	}
475
476	return 0;
477
478unwind:
479	while (--i >= 0) {
480		nf = &pbuf->frag_array[i+1];
481		pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
482	}
483
484	nf = &pbuf->frag_array[0];
485	pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
486
487out_err:
488	return -ENOMEM;
489}
490
491static void qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
492				 struct qlcnic_cmd_buffer *pbuf)
493{
494	struct qlcnic_skb_frag *nf = &pbuf->frag_array[0];
495	int i, nr_frags = skb_shinfo(skb)->nr_frags;
496
497	for (i = 0; i < nr_frags; i++) {
498		nf = &pbuf->frag_array[i+1];
499		pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
500	}
501
502	nf = &pbuf->frag_array[0];
503	pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
504	pbuf->skb = NULL;
505}
506
507static inline void qlcnic_clear_cmddesc(u64 *desc)
508{
509	desc[0] = 0ULL;
510	desc[2] = 0ULL;
511	desc[7] = 0ULL;
512}
513
514netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
515{
516	struct qlcnic_adapter *adapter = netdev_priv(netdev);
517	struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
518	struct qlcnic_cmd_buffer *pbuf;
519	struct qlcnic_skb_frag *buffrag;
520	struct cmd_desc_type0 *hwdesc, *first_desc;
521	struct pci_dev *pdev;
522	struct ethhdr *phdr;
523	int i, k, frag_count, delta = 0;
524	u32 producer, num_txd;
525
526	num_txd = tx_ring->num_desc;
527
528	if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
529		netif_stop_queue(netdev);
530		return NETDEV_TX_BUSY;
531	}
532
533	if (adapter->flags & QLCNIC_MACSPOOF) {
534		phdr = (struct ethhdr *)skb->data;
535		if (!ether_addr_equal(phdr->h_source, adapter->mac_addr))
536			goto drop_packet;
537	}
538
539	frag_count = skb_shinfo(skb)->nr_frags + 1;
540	/* 14 frags supported for normal packet and
541	 * 32 frags supported for TSO packet
542	 */
543	if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
544		for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
545			delta += skb_frag_size(&skb_shinfo(skb)->frags[i]);
546
547		if (!__pskb_pull_tail(skb, delta))
548			goto drop_packet;
549
550		frag_count = 1 + skb_shinfo(skb)->nr_frags;
551	}
552
553	if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
554		netif_stop_queue(netdev);
555		if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
556			netif_start_queue(netdev);
557		} else {
558			adapter->stats.xmit_off++;
559			return NETDEV_TX_BUSY;
560		}
561	}
562
563	producer = tx_ring->producer;
564	pbuf = &tx_ring->cmd_buf_arr[producer];
565	pdev = adapter->pdev;
566	first_desc = &tx_ring->desc_head[producer];
567	hwdesc = &tx_ring->desc_head[producer];
568	qlcnic_clear_cmddesc((u64 *)hwdesc);
569
570	if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
571		adapter->stats.tx_dma_map_error++;
572		goto drop_packet;
573	}
574
575	pbuf->skb = skb;
576	pbuf->frag_count = frag_count;
577
578	qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
579	qlcnic_set_tx_port(first_desc, adapter->portnum);
580
581	for (i = 0; i < frag_count; i++) {
582		k = i % 4;
583
584		if ((k == 0) && (i > 0)) {
585			/* move to next desc.*/
586			producer = get_next_index(producer, num_txd);
587			hwdesc = &tx_ring->desc_head[producer];
588			qlcnic_clear_cmddesc((u64 *)hwdesc);
589			tx_ring->cmd_buf_arr[producer].skb = NULL;
590		}
591
592		buffrag = &pbuf->frag_array[i];
593		hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
594		switch (k) {
595		case 0:
596			hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
597			break;
598		case 1:
599			hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
600			break;
601		case 2:
602			hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
603			break;
604		case 3:
605			hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
606			break;
607		}
608	}
609
610	tx_ring->producer = get_next_index(producer, num_txd);
611	smp_mb();
612
613	if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb)))
614		goto unwind_buff;
615
616	if (adapter->drv_mac_learn)
617		qlcnic_send_filter(adapter, first_desc, skb);
618
619	adapter->stats.txbytes += skb->len;
620	adapter->stats.xmitcalled++;
621
622	qlcnic_update_cmd_producer(tx_ring);
623
624	return NETDEV_TX_OK;
625
626unwind_buff:
627	qlcnic_unmap_buffers(pdev, skb, pbuf);
628drop_packet:
629	adapter->stats.txdropped++;
630	dev_kfree_skb_any(skb);
631	return NETDEV_TX_OK;
632}
633
634void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
635{
636	struct net_device *netdev = adapter->netdev;
637
638	if (adapter->ahw->linkup && !linkup) {
639		netdev_info(netdev, "NIC Link is down\n");
640		adapter->ahw->linkup = 0;
641		if (netif_running(netdev)) {
642			netif_carrier_off(netdev);
643			netif_stop_queue(netdev);
644		}
645	} else if (!adapter->ahw->linkup && linkup) {
646		netdev_info(netdev, "NIC Link is up\n");
647		adapter->ahw->linkup = 1;
648		if (netif_running(netdev)) {
649			netif_carrier_on(netdev);
650			netif_wake_queue(netdev);
651		}
652	}
653}
654
655static int qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
656			       struct qlcnic_host_rds_ring *rds_ring,
657			       struct qlcnic_rx_buffer *buffer)
658{
659	struct sk_buff *skb;
660	dma_addr_t dma;
661	struct pci_dev *pdev = adapter->pdev;
662
663	skb = netdev_alloc_skb(adapter->netdev, rds_ring->skb_size);
664	if (!skb) {
665		adapter->stats.skb_alloc_failure++;
666		return -ENOMEM;
667	}
668
669	skb_reserve(skb, NET_IP_ALIGN);
670	dma = pci_map_single(pdev, skb->data,
671			     rds_ring->dma_size, PCI_DMA_FROMDEVICE);
672
673	if (pci_dma_mapping_error(pdev, dma)) {
674		adapter->stats.rx_dma_map_error++;
675		dev_kfree_skb_any(skb);
676		return -ENOMEM;
677	}
678
679	buffer->skb = skb;
680	buffer->dma = dma;
681
682	return 0;
683}
684
685static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
686					struct qlcnic_host_rds_ring *rds_ring,
687					u8 ring_id)
688{
689	struct rcv_desc *pdesc;
690	struct qlcnic_rx_buffer *buffer;
691	int  count = 0;
692	uint32_t producer, handle;
693	struct list_head *head;
694
695	if (!spin_trylock(&rds_ring->lock))
696		return;
697
698	producer = rds_ring->producer;
699	head = &rds_ring->free_list;
700	while (!list_empty(head)) {
701		buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
702
703		if (!buffer->skb) {
704			if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
705				break;
706		}
707		count++;
708		list_del(&buffer->list);
709
710		/* make a rcv descriptor  */
711		pdesc = &rds_ring->desc_head[producer];
712		handle = qlcnic_get_ref_handle(adapter,
713					       buffer->ref_handle, ring_id);
714		pdesc->reference_handle = cpu_to_le16(handle);
715		pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
716		pdesc->addr_buffer = cpu_to_le64(buffer->dma);
717		producer = get_next_index(producer, rds_ring->num_desc);
718	}
719	if (count) {
720		rds_ring->producer = producer;
721		writel((producer - 1) & (rds_ring->num_desc - 1),
722		       rds_ring->crb_rcv_producer);
723	}
724	spin_unlock(&rds_ring->lock);
725}
726
727static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
728				   struct qlcnic_host_tx_ring *tx_ring,
729				   int budget)
730{
731	u32 sw_consumer, hw_consumer;
732	int i, done, count = 0;
733	struct qlcnic_cmd_buffer *buffer;
734	struct pci_dev *pdev = adapter->pdev;
735	struct net_device *netdev = adapter->netdev;
736	struct qlcnic_skb_frag *frag;
737
738	if (!spin_trylock(&adapter->tx_clean_lock))
739		return 1;
740
741	sw_consumer = tx_ring->sw_consumer;
742	hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
743
744	while (sw_consumer != hw_consumer) {
745		buffer = &tx_ring->cmd_buf_arr[sw_consumer];
746		if (buffer->skb) {
747			frag = &buffer->frag_array[0];
748			pci_unmap_single(pdev, frag->dma, frag->length,
749					 PCI_DMA_TODEVICE);
750			frag->dma = 0ULL;
751			for (i = 1; i < buffer->frag_count; i++) {
752				frag++;
753				pci_unmap_page(pdev, frag->dma, frag->length,
754					       PCI_DMA_TODEVICE);
755				frag->dma = 0ULL;
756			}
757			adapter->stats.xmitfinished++;
758			dev_kfree_skb_any(buffer->skb);
759			buffer->skb = NULL;
760		}
761
762		sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
763		if (++count >= budget)
764			break;
765	}
766
767	if (count && netif_running(netdev)) {
768		tx_ring->sw_consumer = sw_consumer;
769		smp_mb();
770		if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
771			if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
772				netif_wake_queue(netdev);
773				adapter->stats.xmit_on++;
774			}
775		}
776		adapter->tx_timeo_cnt = 0;
777	}
778	/*
779	 * If everything is freed up to consumer then check if the ring is full
780	 * If the ring is full then check if more needs to be freed and
781	 * schedule the call back again.
782	 *
783	 * This happens when there are 2 CPUs. One could be freeing and the
784	 * other filling it. If the ring is full when we get out of here and
785	 * the card has already interrupted the host then the host can miss the
786	 * interrupt.
787	 *
788	 * There is still a possible race condition and the host could miss an
789	 * interrupt. The card has to take care of this.
790	 */
791	hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
792	done = (sw_consumer == hw_consumer);
793	spin_unlock(&adapter->tx_clean_lock);
794
795	return done;
796}
797
798static int qlcnic_poll(struct napi_struct *napi, int budget)
799{
800	int tx_complete, work_done;
801	struct qlcnic_host_sds_ring *sds_ring;
802	struct qlcnic_adapter *adapter;
803
804	sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
805	adapter = sds_ring->adapter;
806	tx_complete = qlcnic_process_cmd_ring(adapter, adapter->tx_ring,
807					      budget);
808	work_done = qlcnic_process_rcv_ring(sds_ring, budget);
809	if ((work_done < budget) && tx_complete) {
810		napi_complete(&sds_ring->napi);
811		if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
812			qlcnic_enable_int(sds_ring);
813	}
814
815	return work_done;
816}
817
818static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
819{
820	struct qlcnic_host_sds_ring *sds_ring;
821	struct qlcnic_adapter *adapter;
822	int work_done;
823
824	sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
825	adapter = sds_ring->adapter;
826
827	work_done = qlcnic_process_rcv_ring(sds_ring, budget);
828
829	if (work_done < budget) {
830		napi_complete(&sds_ring->napi);
831		if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
832			qlcnic_enable_int(sds_ring);
833	}
834
835	return work_done;
836}
837
838static void qlcnic_handle_linkevent(struct qlcnic_adapter *adapter,
839				    struct qlcnic_fw_msg *msg)
840{
841	u32 cable_OUI;
842	u16 cable_len, link_speed;
843	u8  link_status, module, duplex, autoneg, lb_status = 0;
844	struct net_device *netdev = adapter->netdev;
845
846	adapter->ahw->has_link_events = 1;
847
848	cable_OUI = msg->body[1] & 0xffffffff;
849	cable_len = (msg->body[1] >> 32) & 0xffff;
850	link_speed = (msg->body[1] >> 48) & 0xffff;
851
852	link_status = msg->body[2] & 0xff;
853	duplex = (msg->body[2] >> 16) & 0xff;
854	autoneg = (msg->body[2] >> 24) & 0xff;
855	lb_status = (msg->body[2] >> 32) & 0x3;
856
857	module = (msg->body[2] >> 8) & 0xff;
858	if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE)
859		dev_info(&netdev->dev,
860			 "unsupported cable: OUI 0x%x, length %d\n",
861			 cable_OUI, cable_len);
862	else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN)
863		dev_info(&netdev->dev, "unsupported cable length %d\n",
864			 cable_len);
865
866	if (!link_status && (lb_status == QLCNIC_ILB_MODE ||
867	    lb_status == QLCNIC_ELB_MODE))
868		adapter->ahw->loopback_state |= QLCNIC_LINKEVENT;
869
870	qlcnic_advert_link_change(adapter, link_status);
871
872	if (duplex == LINKEVENT_FULL_DUPLEX)
873		adapter->ahw->link_duplex = DUPLEX_FULL;
874	else
875		adapter->ahw->link_duplex = DUPLEX_HALF;
876
877	adapter->ahw->module_type = module;
878	adapter->ahw->link_autoneg = autoneg;
879
880	if (link_status) {
881		adapter->ahw->link_speed = link_speed;
882	} else {
883		adapter->ahw->link_speed = SPEED_UNKNOWN;
884		adapter->ahw->link_duplex = DUPLEX_UNKNOWN;
885	}
886}
887
888static void qlcnic_handle_fw_message(int desc_cnt, int index,
889				     struct qlcnic_host_sds_ring *sds_ring)
890{
891	struct qlcnic_fw_msg msg;
892	struct status_desc *desc;
893	struct qlcnic_adapter *adapter;
894	struct device *dev;
895	int i = 0, opcode, ret;
896
897	while (desc_cnt > 0 && i < 8) {
898		desc = &sds_ring->desc_head[index];
899		msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]);
900		msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]);
901
902		index = get_next_index(index, sds_ring->num_desc);
903		desc_cnt--;
904	}
905
906	adapter = sds_ring->adapter;
907	dev = &adapter->pdev->dev;
908	opcode = qlcnic_get_nic_msg_opcode(msg.body[0]);
909
910	switch (opcode) {
911	case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE:
912		qlcnic_handle_linkevent(adapter, &msg);
913		break;
914	case QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK:
915		ret = (u32)(msg.body[1]);
916		switch (ret) {
917		case 0:
918			adapter->ahw->loopback_state |= QLCNIC_LB_RESPONSE;
919			break;
920		case 1:
921			dev_info(dev, "loopback already in progress\n");
922			adapter->ahw->diag_cnt = -QLCNIC_TEST_IN_PROGRESS;
923			break;
924		case 2:
925			dev_info(dev, "loopback cable is not connected\n");
926			adapter->ahw->diag_cnt = -QLCNIC_LB_CABLE_NOT_CONN;
927			break;
928		default:
929			dev_info(dev,
930				 "loopback configure request failed, err %x\n",
931				 ret);
932			adapter->ahw->diag_cnt = -QLCNIC_UNDEFINED_ERROR;
933			break;
934		}
935		break;
936	default:
937		break;
938	}
939}
940
941struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
942				     struct qlcnic_host_rds_ring *ring,
943				     u16 index, u16 cksum)
944{
945	struct qlcnic_rx_buffer *buffer;
946	struct sk_buff *skb;
947
948	buffer = &ring->rx_buf_arr[index];
949	if (unlikely(buffer->skb == NULL)) {
950		WARN_ON(1);
951		return NULL;
952	}
953
954	pci_unmap_single(adapter->pdev, buffer->dma, ring->dma_size,
955			 PCI_DMA_FROMDEVICE);
956
957	skb = buffer->skb;
958	if (likely((adapter->netdev->features & NETIF_F_RXCSUM) &&
959		   (cksum == STATUS_CKSUM_OK || cksum == STATUS_CKSUM_LOOP))) {
960		adapter->stats.csummed++;
961		skb->ip_summed = CHECKSUM_UNNECESSARY;
962	} else {
963		skb_checksum_none_assert(skb);
964	}
965
966
967	buffer->skb = NULL;
968
969	return skb;
970}
971
972static inline int qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter,
973					  struct sk_buff *skb, u16 *vlan_tag)
974{
975	struct ethhdr *eth_hdr;
976
977	if (!__vlan_get_tag(skb, vlan_tag)) {
978		eth_hdr = (struct ethhdr *)skb->data;
979		memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
980		skb_pull(skb, VLAN_HLEN);
981	}
982	if (!adapter->rx_pvid)
983		return 0;
984
985	if (*vlan_tag == adapter->rx_pvid) {
986		/* Outer vlan tag. Packet should follow non-vlan path */
987		*vlan_tag = 0xffff;
988		return 0;
989	}
990	if (adapter->flags & QLCNIC_TAGGING_ENABLED)
991		return 0;
992
993	return -EINVAL;
994}
995
996static struct qlcnic_rx_buffer *
997qlcnic_process_rcv(struct qlcnic_adapter *adapter,
998		   struct qlcnic_host_sds_ring *sds_ring, int ring,
999		   u64 sts_data0)
1000{
1001	struct net_device *netdev = adapter->netdev;
1002	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1003	struct qlcnic_rx_buffer *buffer;
1004	struct sk_buff *skb;
1005	struct qlcnic_host_rds_ring *rds_ring;
1006	int index, length, cksum, pkt_offset, is_lb_pkt;
1007	u16 vid = 0xffff, t_vid;
1008
1009	if (unlikely(ring >= adapter->max_rds_rings))
1010		return NULL;
1011
1012	rds_ring = &recv_ctx->rds_rings[ring];
1013
1014	index = qlcnic_get_sts_refhandle(sts_data0);
1015	if (unlikely(index >= rds_ring->num_desc))
1016		return NULL;
1017
1018	buffer = &rds_ring->rx_buf_arr[index];
1019	length = qlcnic_get_sts_totallength(sts_data0);
1020	cksum  = qlcnic_get_sts_status(sts_data0);
1021	pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
1022
1023	skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
1024	if (!skb)
1025		return buffer;
1026
1027	if (adapter->drv_mac_learn &&
1028	    (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
1029		t_vid = 0;
1030		is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
1031		qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
1032	}
1033
1034	if (length > rds_ring->skb_size)
1035		skb_put(skb, rds_ring->skb_size);
1036	else
1037		skb_put(skb, length);
1038
1039	if (pkt_offset)
1040		skb_pull(skb, pkt_offset);
1041
1042	if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
1043		adapter->stats.rxdropped++;
1044		dev_kfree_skb(skb);
1045		return buffer;
1046	}
1047
1048	skb->protocol = eth_type_trans(skb, netdev);
1049
1050	if (vid != 0xffff)
1051		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1052
1053	napi_gro_receive(&sds_ring->napi, skb);
1054
1055	adapter->stats.rx_pkts++;
1056	adapter->stats.rxbytes += length;
1057
1058	return buffer;
1059}
1060
1061#define QLC_TCP_HDR_SIZE            20
1062#define QLC_TCP_TS_OPTION_SIZE      12
1063#define QLC_TCP_TS_HDR_SIZE         (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE)
1064
1065static struct qlcnic_rx_buffer *
1066qlcnic_process_lro(struct qlcnic_adapter *adapter,
1067		   int ring, u64 sts_data0, u64 sts_data1)
1068{
1069	struct net_device *netdev = adapter->netdev;
1070	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1071	struct qlcnic_rx_buffer *buffer;
1072	struct sk_buff *skb;
1073	struct qlcnic_host_rds_ring *rds_ring;
1074	struct iphdr *iph;
1075	struct ipv6hdr *ipv6h;
1076	struct tcphdr *th;
1077	bool push, timestamp;
1078	int index, l2_hdr_offset, l4_hdr_offset, is_lb_pkt;
1079	u16 lro_length, length, data_offset, t_vid, vid = 0xffff;
1080	u32 seq_number;
1081
1082	if (unlikely(ring > adapter->max_rds_rings))
1083		return NULL;
1084
1085	rds_ring = &recv_ctx->rds_rings[ring];
1086
1087	index = qlcnic_get_lro_sts_refhandle(sts_data0);
1088	if (unlikely(index > rds_ring->num_desc))
1089		return NULL;
1090
1091	buffer = &rds_ring->rx_buf_arr[index];
1092
1093	timestamp = qlcnic_get_lro_sts_timestamp(sts_data0);
1094	lro_length = qlcnic_get_lro_sts_length(sts_data0);
1095	l2_hdr_offset = qlcnic_get_lro_sts_l2_hdr_offset(sts_data0);
1096	l4_hdr_offset = qlcnic_get_lro_sts_l4_hdr_offset(sts_data0);
1097	push = qlcnic_get_lro_sts_push_flag(sts_data0);
1098	seq_number = qlcnic_get_lro_sts_seq_number(sts_data1);
1099
1100	skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
1101	if (!skb)
1102		return buffer;
1103
1104	if (adapter->drv_mac_learn &&
1105	    (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
1106		t_vid = 0;
1107		is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
1108		qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
1109	}
1110
1111	if (timestamp)
1112		data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE;
1113	else
1114		data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE;
1115
1116	skb_put(skb, lro_length + data_offset);
1117	skb_pull(skb, l2_hdr_offset);
1118
1119	if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
1120		adapter->stats.rxdropped++;
1121		dev_kfree_skb(skb);
1122		return buffer;
1123	}
1124
1125	skb->protocol = eth_type_trans(skb, netdev);
1126
1127	if (ntohs(skb->protocol) == ETH_P_IPV6) {
1128		ipv6h = (struct ipv6hdr *)skb->data;
1129		th = (struct tcphdr *)(skb->data + sizeof(struct ipv6hdr));
1130		length = (th->doff << 2) + lro_length;
1131		ipv6h->payload_len = htons(length);
1132	} else {
1133		iph = (struct iphdr *)skb->data;
1134		th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
1135		length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
1136		csum_replace2(&iph->check, iph->tot_len, htons(length));
1137		iph->tot_len = htons(length);
1138	}
1139
1140	th->psh = push;
1141	th->seq = htonl(seq_number);
1142	length = skb->len;
1143
1144	if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) {
1145		skb_shinfo(skb)->gso_size = qlcnic_get_lro_sts_mss(sts_data1);
1146		if (skb->protocol == htons(ETH_P_IPV6))
1147			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1148		else
1149			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1150	}
1151
1152	if (vid != 0xffff)
1153		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1154	netif_receive_skb(skb);
1155
1156	adapter->stats.lro_pkts++;
1157	adapter->stats.lrobytes += length;
1158
1159	return buffer;
1160}
1161
1162int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
1163{
1164	struct qlcnic_host_rds_ring *rds_ring;
1165	struct qlcnic_adapter *adapter = sds_ring->adapter;
1166	struct list_head *cur;
1167	struct status_desc *desc;
1168	struct qlcnic_rx_buffer *rxbuf;
1169	int opcode, desc_cnt, count = 0;
1170	u64 sts_data0, sts_data1;
1171	u8 ring;
1172	u32 consumer = sds_ring->consumer;
1173
1174	while (count < max) {
1175		desc = &sds_ring->desc_head[consumer];
1176		sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
1177
1178		if (!(sts_data0 & STATUS_OWNER_HOST))
1179			break;
1180
1181		desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
1182		opcode = qlcnic_get_sts_opcode(sts_data0);
1183		switch (opcode) {
1184		case QLCNIC_RXPKT_DESC:
1185		case QLCNIC_OLD_RXPKT_DESC:
1186		case QLCNIC_SYN_OFFLOAD:
1187			ring = qlcnic_get_sts_type(sts_data0);
1188			rxbuf = qlcnic_process_rcv(adapter, sds_ring, ring,
1189						   sts_data0);
1190			break;
1191		case QLCNIC_LRO_DESC:
1192			ring = qlcnic_get_lro_sts_type(sts_data0);
1193			sts_data1 = le64_to_cpu(desc->status_desc_data[1]);
1194			rxbuf = qlcnic_process_lro(adapter, ring, sts_data0,
1195						   sts_data1);
1196			break;
1197		case QLCNIC_RESPONSE_DESC:
1198			qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
1199		default:
1200			goto skip;
1201		}
1202		WARN_ON(desc_cnt > 1);
1203
1204		if (likely(rxbuf))
1205			list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
1206		else
1207			adapter->stats.null_rxbuf++;
1208skip:
1209		for (; desc_cnt > 0; desc_cnt--) {
1210			desc = &sds_ring->desc_head[consumer];
1211			desc->status_desc_data[0] = QLCNIC_DESC_OWNER_FW;
1212			consumer = get_next_index(consumer, sds_ring->num_desc);
1213		}
1214		count++;
1215	}
1216
1217	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1218		rds_ring = &adapter->recv_ctx->rds_rings[ring];
1219		if (!list_empty(&sds_ring->free_list[ring])) {
1220			list_for_each(cur, &sds_ring->free_list[ring]) {
1221				rxbuf = list_entry(cur, struct qlcnic_rx_buffer,
1222						   list);
1223				qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
1224			}
1225			spin_lock(&rds_ring->lock);
1226			list_splice_tail_init(&sds_ring->free_list[ring],
1227					      &rds_ring->free_list);
1228			spin_unlock(&rds_ring->lock);
1229		}
1230
1231		qlcnic_post_rx_buffers_nodb(adapter, rds_ring, ring);
1232	}
1233
1234	if (count) {
1235		sds_ring->consumer = consumer;
1236		writel(consumer, sds_ring->crb_sts_consumer);
1237	}
1238
1239	return count;
1240}
1241
1242void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
1243			    struct qlcnic_host_rds_ring *rds_ring, u8 ring_id)
1244{
1245	struct rcv_desc *pdesc;
1246	struct qlcnic_rx_buffer *buffer;
1247	int count = 0;
1248	u32 producer, handle;
1249	struct list_head *head;
1250
1251	producer = rds_ring->producer;
1252	head = &rds_ring->free_list;
1253
1254	while (!list_empty(head)) {
1255
1256		buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
1257
1258		if (!buffer->skb) {
1259			if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
1260				break;
1261		}
1262
1263		count++;
1264		list_del(&buffer->list);
1265
1266		/* make a rcv descriptor  */
1267		pdesc = &rds_ring->desc_head[producer];
1268		pdesc->addr_buffer = cpu_to_le64(buffer->dma);
1269		handle = qlcnic_get_ref_handle(adapter, buffer->ref_handle,
1270					       ring_id);
1271		pdesc->reference_handle = cpu_to_le16(handle);
1272		pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
1273		producer = get_next_index(producer, rds_ring->num_desc);
1274	}
1275
1276	if (count) {
1277		rds_ring->producer = producer;
1278		writel((producer-1) & (rds_ring->num_desc-1),
1279		       rds_ring->crb_rcv_producer);
1280	}
1281}
1282
1283static void dump_skb(struct sk_buff *skb, struct qlcnic_adapter *adapter)
1284{
1285	int i;
1286	unsigned char *data = skb->data;
1287
1288	pr_info(KERN_INFO "\n");
1289	for (i = 0; i < skb->len; i++) {
1290		QLCDB(adapter, DRV, "%02x ", data[i]);
1291		if ((i & 0x0f) == 8)
1292			pr_info(KERN_INFO "\n");
1293	}
1294}
1295
1296static void qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter, int ring,
1297				    u64 sts_data0)
1298{
1299	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1300	struct sk_buff *skb;
1301	struct qlcnic_host_rds_ring *rds_ring;
1302	int index, length, cksum, pkt_offset;
1303
1304	if (unlikely(ring >= adapter->max_rds_rings))
1305		return;
1306
1307	rds_ring = &recv_ctx->rds_rings[ring];
1308
1309	index = qlcnic_get_sts_refhandle(sts_data0);
1310	length = qlcnic_get_sts_totallength(sts_data0);
1311	if (unlikely(index >= rds_ring->num_desc))
1312		return;
1313
1314	cksum  = qlcnic_get_sts_status(sts_data0);
1315	pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
1316
1317	skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
1318	if (!skb)
1319		return;
1320
1321	if (length > rds_ring->skb_size)
1322		skb_put(skb, rds_ring->skb_size);
1323	else
1324		skb_put(skb, length);
1325
1326	if (pkt_offset)
1327		skb_pull(skb, pkt_offset);
1328
1329	if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr))
1330		adapter->ahw->diag_cnt++;
1331	else
1332		dump_skb(skb, adapter);
1333
1334	dev_kfree_skb_any(skb);
1335	adapter->stats.rx_pkts++;
1336	adapter->stats.rxbytes += length;
1337
1338	return;
1339}
1340
1341void qlcnic_82xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
1342{
1343	struct qlcnic_adapter *adapter = sds_ring->adapter;
1344	struct status_desc *desc;
1345	u64 sts_data0;
1346	int ring, opcode, desc_cnt;
1347
1348	u32 consumer = sds_ring->consumer;
1349
1350	desc = &sds_ring->desc_head[consumer];
1351	sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
1352
1353	if (!(sts_data0 & STATUS_OWNER_HOST))
1354		return;
1355
1356	desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
1357	opcode = qlcnic_get_sts_opcode(sts_data0);
1358	switch (opcode) {
1359	case QLCNIC_RESPONSE_DESC:
1360		qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
1361		break;
1362	default:
1363		ring = qlcnic_get_sts_type(sts_data0);
1364		qlcnic_process_rcv_diag(adapter, ring, sts_data0);
1365		break;
1366	}
1367
1368	for (; desc_cnt > 0; desc_cnt--) {
1369		desc = &sds_ring->desc_head[consumer];
1370		desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
1371		consumer = get_next_index(consumer, sds_ring->num_desc);
1372	}
1373
1374	sds_ring->consumer = consumer;
1375	writel(consumer, sds_ring->crb_sts_consumer);
1376}
1377
1378int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
1379			 struct net_device *netdev)
1380{
1381	int ring, max_sds_rings;
1382	struct qlcnic_host_sds_ring *sds_ring;
1383	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1384
1385	if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
1386		return -ENOMEM;
1387
1388	max_sds_rings = adapter->max_sds_rings;
1389
1390	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1391		sds_ring = &recv_ctx->sds_rings[ring];
1392		if (ring == adapter->max_sds_rings - 1)
1393			netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
1394				       QLCNIC_NETDEV_WEIGHT / max_sds_rings);
1395		else
1396			netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll,
1397				       QLCNIC_NETDEV_WEIGHT*2);
1398	}
1399
1400	if (qlcnic_alloc_tx_rings(adapter, netdev)) {
1401		qlcnic_free_sds_rings(recv_ctx);
1402		return -ENOMEM;
1403	}
1404
1405	return 0;
1406}
1407
1408void qlcnic_82xx_napi_del(struct qlcnic_adapter *adapter)
1409{
1410	int ring;
1411	struct qlcnic_host_sds_ring *sds_ring;
1412	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1413
1414	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1415		sds_ring = &recv_ctx->sds_rings[ring];
1416		netif_napi_del(&sds_ring->napi);
1417	}
1418
1419	qlcnic_free_sds_rings(adapter->recv_ctx);
1420	qlcnic_free_tx_rings(adapter);
1421}
1422
1423void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter)
1424{
1425	int ring;
1426	struct qlcnic_host_sds_ring *sds_ring;
1427	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1428
1429	if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1430		return;
1431
1432	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1433		sds_ring = &recv_ctx->sds_rings[ring];
1434		napi_enable(&sds_ring->napi);
1435		qlcnic_enable_int(sds_ring);
1436	}
1437}
1438
1439void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter)
1440{
1441	int ring;
1442	struct qlcnic_host_sds_ring *sds_ring;
1443	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1444
1445	if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1446		return;
1447
1448	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1449		sds_ring = &recv_ctx->sds_rings[ring];
1450		qlcnic_disable_int(sds_ring);
1451		napi_synchronize(&sds_ring->napi);
1452		napi_disable(&sds_ring->napi);
1453	}
1454}
1455
1456#define QLC_83XX_NORMAL_LB_PKT	(1ULL << 36)
1457#define QLC_83XX_LRO_LB_PKT	(1ULL << 46)
1458
1459static inline int qlcnic_83xx_is_lb_pkt(u64 sts_data, int lro_pkt)
1460{
1461	if (lro_pkt)
1462		return (sts_data & QLC_83XX_LRO_LB_PKT) ? 1 : 0;
1463	else
1464		return (sts_data & QLC_83XX_NORMAL_LB_PKT) ? 1 : 0;
1465}
1466
1467static struct qlcnic_rx_buffer *
1468qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
1469			struct qlcnic_host_sds_ring *sds_ring,
1470			u8 ring, u64 sts_data[])
1471{
1472	struct net_device *netdev = adapter->netdev;
1473	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1474	struct qlcnic_rx_buffer *buffer;
1475	struct sk_buff *skb;
1476	struct qlcnic_host_rds_ring *rds_ring;
1477	int index, length, cksum, is_lb_pkt;
1478	u16 vid = 0xffff, t_vid;
1479
1480	if (unlikely(ring >= adapter->max_rds_rings))
1481		return NULL;
1482
1483	rds_ring = &recv_ctx->rds_rings[ring];
1484
1485	index = qlcnic_83xx_hndl(sts_data[0]);
1486	if (unlikely(index >= rds_ring->num_desc))
1487		return NULL;
1488
1489	buffer = &rds_ring->rx_buf_arr[index];
1490	length = qlcnic_83xx_pktln(sts_data[0]);
1491	cksum  = qlcnic_83xx_csum_status(sts_data[1]);
1492	skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
1493	if (!skb)
1494		return buffer;
1495
1496	if (adapter->drv_mac_learn &&
1497	    (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
1498		t_vid = 0;
1499		is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 0);
1500		qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
1501	}
1502
1503	if (length > rds_ring->skb_size)
1504		skb_put(skb, rds_ring->skb_size);
1505	else
1506		skb_put(skb, length);
1507
1508	if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
1509		adapter->stats.rxdropped++;
1510		dev_kfree_skb(skb);
1511		return buffer;
1512	}
1513
1514	skb->protocol = eth_type_trans(skb, netdev);
1515
1516	if (vid != 0xffff)
1517		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1518
1519	napi_gro_receive(&sds_ring->napi, skb);
1520
1521	adapter->stats.rx_pkts++;
1522	adapter->stats.rxbytes += length;
1523
1524	return buffer;
1525}
1526
1527static struct qlcnic_rx_buffer *
1528qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter,
1529			u8 ring, u64 sts_data[])
1530{
1531	struct net_device *netdev = adapter->netdev;
1532	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1533	struct qlcnic_rx_buffer *buffer;
1534	struct sk_buff *skb;
1535	struct qlcnic_host_rds_ring *rds_ring;
1536	struct iphdr *iph;
1537	struct ipv6hdr *ipv6h;
1538	struct tcphdr *th;
1539	bool push;
1540	int l2_hdr_offset, l4_hdr_offset;
1541	int index, is_lb_pkt;
1542	u16 lro_length, length, data_offset, gso_size;
1543	u16 vid = 0xffff, t_vid;
1544
1545	if (unlikely(ring > adapter->max_rds_rings))
1546		return NULL;
1547
1548	rds_ring = &recv_ctx->rds_rings[ring];
1549
1550	index = qlcnic_83xx_hndl(sts_data[0]);
1551	if (unlikely(index > rds_ring->num_desc))
1552		return NULL;
1553
1554	buffer = &rds_ring->rx_buf_arr[index];
1555
1556	lro_length = qlcnic_83xx_lro_pktln(sts_data[0]);
1557	l2_hdr_offset = qlcnic_83xx_l2_hdr_off(sts_data[1]);
1558	l4_hdr_offset = qlcnic_83xx_l4_hdr_off(sts_data[1]);
1559	push = qlcnic_83xx_is_psh_bit(sts_data[1]);
1560
1561	skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
1562	if (!skb)
1563		return buffer;
1564
1565	if (adapter->drv_mac_learn &&
1566	    (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
1567		t_vid = 0;
1568		is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 1);
1569		qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
1570	}
1571	if (qlcnic_83xx_is_tstamp(sts_data[1]))
1572		data_offset = l4_hdr_offset + QLCNIC_TCP_TS_HDR_SIZE;
1573	else
1574		data_offset = l4_hdr_offset + QLCNIC_TCP_HDR_SIZE;
1575
1576	skb_put(skb, lro_length + data_offset);
1577	skb_pull(skb, l2_hdr_offset);
1578
1579	if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
1580		adapter->stats.rxdropped++;
1581		dev_kfree_skb(skb);
1582		return buffer;
1583	}
1584
1585	skb->protocol = eth_type_trans(skb, netdev);
1586	if (ntohs(skb->protocol) == ETH_P_IPV6) {
1587		ipv6h = (struct ipv6hdr *)skb->data;
1588		th = (struct tcphdr *)(skb->data + sizeof(struct ipv6hdr));
1589
1590		length = (th->doff << 2) + lro_length;
1591		ipv6h->payload_len = htons(length);
1592	} else {
1593		iph = (struct iphdr *)skb->data;
1594		th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
1595		length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
1596		csum_replace2(&iph->check, iph->tot_len, htons(length));
1597		iph->tot_len = htons(length);
1598	}
1599
1600	th->psh = push;
1601	length = skb->len;
1602
1603	if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) {
1604		gso_size = qlcnic_83xx_get_lro_sts_mss(sts_data[0]);
1605		skb_shinfo(skb)->gso_size = gso_size;
1606		if (skb->protocol == htons(ETH_P_IPV6))
1607			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1608		else
1609			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1610	}
1611
1612	if (vid != 0xffff)
1613		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1614
1615	netif_receive_skb(skb);
1616
1617	adapter->stats.lro_pkts++;
1618	adapter->stats.lrobytes += length;
1619	return buffer;
1620}
1621
1622static int qlcnic_83xx_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring,
1623					int max)
1624{
1625	struct qlcnic_host_rds_ring *rds_ring;
1626	struct qlcnic_adapter *adapter = sds_ring->adapter;
1627	struct list_head *cur;
1628	struct status_desc *desc;
1629	struct qlcnic_rx_buffer *rxbuf = NULL;
1630	u8 ring;
1631	u64 sts_data[2];
1632	int count = 0, opcode;
1633	u32 consumer = sds_ring->consumer;
1634
1635	while (count < max) {
1636		desc = &sds_ring->desc_head[consumer];
1637		sts_data[1] = le64_to_cpu(desc->status_desc_data[1]);
1638		opcode = qlcnic_83xx_opcode(sts_data[1]);
1639		if (!opcode)
1640			break;
1641		sts_data[0] = le64_to_cpu(desc->status_desc_data[0]);
1642		ring = QLCNIC_FETCH_RING_ID(sts_data[0]);
1643
1644		switch (opcode) {
1645		case QLC_83XX_REG_DESC:
1646			rxbuf = qlcnic_83xx_process_rcv(adapter, sds_ring,
1647							ring, sts_data);
1648			break;
1649		case QLC_83XX_LRO_DESC:
1650			rxbuf = qlcnic_83xx_process_lro(adapter, ring,
1651							sts_data);
1652			break;
1653		default:
1654			dev_info(&adapter->pdev->dev,
1655				 "Unkonwn opcode: 0x%x\n", opcode);
1656			goto skip;
1657		}
1658
1659		if (likely(rxbuf))
1660			list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
1661		else
1662			adapter->stats.null_rxbuf++;
1663skip:
1664		desc = &sds_ring->desc_head[consumer];
1665		/* Reset the descriptor */
1666		desc->status_desc_data[1] = 0;
1667		consumer = get_next_index(consumer, sds_ring->num_desc);
1668		count++;
1669	}
1670	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1671		rds_ring = &adapter->recv_ctx->rds_rings[ring];
1672		if (!list_empty(&sds_ring->free_list[ring])) {
1673			list_for_each(cur, &sds_ring->free_list[ring]) {
1674				rxbuf = list_entry(cur, struct qlcnic_rx_buffer,
1675						   list);
1676				qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
1677			}
1678			spin_lock(&rds_ring->lock);
1679			list_splice_tail_init(&sds_ring->free_list[ring],
1680					      &rds_ring->free_list);
1681			spin_unlock(&rds_ring->lock);
1682		}
1683		qlcnic_post_rx_buffers_nodb(adapter, rds_ring, ring);
1684	}
1685	if (count) {
1686		sds_ring->consumer = consumer;
1687		writel(consumer, sds_ring->crb_sts_consumer);
1688	}
1689	return count;
1690}
1691
1692static int qlcnic_83xx_msix_sriov_vf_poll(struct napi_struct *napi, int budget)
1693{
1694	int tx_complete;
1695	int work_done;
1696	struct qlcnic_host_sds_ring *sds_ring;
1697	struct qlcnic_adapter *adapter;
1698	struct qlcnic_host_tx_ring *tx_ring;
1699
1700	sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
1701	adapter = sds_ring->adapter;
1702	/* tx ring count = 1 */
1703	tx_ring = adapter->tx_ring;
1704
1705	tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
1706	work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
1707	if ((work_done < budget) && tx_complete) {
1708		napi_complete(&sds_ring->napi);
1709		qlcnic_83xx_enable_intr(adapter, sds_ring);
1710	}
1711
1712	return work_done;
1713}
1714
1715static int qlcnic_83xx_poll(struct napi_struct *napi, int budget)
1716{
1717	int tx_complete;
1718	int work_done;
1719	struct qlcnic_host_sds_ring *sds_ring;
1720	struct qlcnic_adapter *adapter;
1721	struct qlcnic_host_tx_ring *tx_ring;
1722
1723	sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
1724	adapter = sds_ring->adapter;
1725	/* tx ring count = 1 */
1726	tx_ring = adapter->tx_ring;
1727
1728	tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
1729	work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
1730	if ((work_done < budget) && tx_complete) {
1731		napi_complete(&sds_ring->napi);
1732		qlcnic_83xx_enable_intr(adapter, sds_ring);
1733	}
1734
1735	return work_done;
1736}
1737
1738static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget)
1739{
1740	int work_done;
1741	struct qlcnic_host_tx_ring *tx_ring;
1742	struct qlcnic_adapter *adapter;
1743
1744	budget = QLCNIC_TX_POLL_BUDGET;
1745	tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi);
1746	adapter = tx_ring->adapter;
1747	work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
1748	if (work_done) {
1749		napi_complete(&tx_ring->napi);
1750		if (test_bit(__QLCNIC_DEV_UP , &adapter->state))
1751			qlcnic_83xx_enable_tx_intr(adapter, tx_ring);
1752	}
1753
1754	return work_done;
1755}
1756
1757static int qlcnic_83xx_rx_poll(struct napi_struct *napi, int budget)
1758{
1759	int work_done;
1760	struct qlcnic_host_sds_ring *sds_ring;
1761	struct qlcnic_adapter *adapter;
1762
1763	sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
1764	adapter = sds_ring->adapter;
1765	work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
1766	if (work_done < budget) {
1767		napi_complete(&sds_ring->napi);
1768		if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
1769			qlcnic_83xx_enable_intr(adapter, sds_ring);
1770	}
1771
1772	return work_done;
1773}
1774
1775void qlcnic_83xx_napi_enable(struct qlcnic_adapter *adapter)
1776{
1777	int ring;
1778	struct qlcnic_host_sds_ring *sds_ring;
1779	struct qlcnic_host_tx_ring *tx_ring;
1780	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1781
1782	if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1783		return;
1784
1785	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1786		sds_ring = &recv_ctx->sds_rings[ring];
1787		napi_enable(&sds_ring->napi);
1788		if (adapter->flags & QLCNIC_MSIX_ENABLED)
1789			qlcnic_83xx_enable_intr(adapter, sds_ring);
1790	}
1791
1792	if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
1793	    !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
1794		for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
1795			tx_ring = &adapter->tx_ring[ring];
1796			napi_enable(&tx_ring->napi);
1797			qlcnic_83xx_enable_tx_intr(adapter, tx_ring);
1798		}
1799	}
1800}
1801
1802void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter)
1803{
1804	int ring;
1805	struct qlcnic_host_sds_ring *sds_ring;
1806	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1807	struct qlcnic_host_tx_ring *tx_ring;
1808
1809	if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1810		return;
1811
1812	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1813		sds_ring = &recv_ctx->sds_rings[ring];
1814		if (adapter->flags & QLCNIC_MSIX_ENABLED)
1815			qlcnic_83xx_disable_intr(adapter, sds_ring);
1816		napi_synchronize(&sds_ring->napi);
1817		napi_disable(&sds_ring->napi);
1818	}
1819
1820	if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
1821	    !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
1822		for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
1823			tx_ring = &adapter->tx_ring[ring];
1824			qlcnic_83xx_disable_tx_intr(adapter, tx_ring);
1825			napi_synchronize(&tx_ring->napi);
1826			napi_disable(&tx_ring->napi);
1827		}
1828	}
1829}
1830
1831int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
1832			 struct net_device *netdev)
1833{
1834	int ring, max_sds_rings, temp;
1835	struct qlcnic_host_sds_ring *sds_ring;
1836	struct qlcnic_host_tx_ring *tx_ring;
1837	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1838
1839	if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
1840		return -ENOMEM;
1841
1842	max_sds_rings = adapter->max_sds_rings;
1843	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1844		sds_ring = &recv_ctx->sds_rings[ring];
1845		if (adapter->flags & QLCNIC_MSIX_ENABLED) {
1846			if (!(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
1847				netif_napi_add(netdev, &sds_ring->napi,
1848					       qlcnic_83xx_rx_poll,
1849					       QLCNIC_NETDEV_WEIGHT * 2);
1850			} else {
1851				temp = QLCNIC_NETDEV_WEIGHT / max_sds_rings;
1852				netif_napi_add(netdev, &sds_ring->napi,
1853					       qlcnic_83xx_msix_sriov_vf_poll,
1854					       temp);
1855			}
1856
1857		} else {
1858			netif_napi_add(netdev, &sds_ring->napi,
1859				       qlcnic_83xx_poll,
1860				       QLCNIC_NETDEV_WEIGHT / max_sds_rings);
1861		}
1862	}
1863
1864	if (qlcnic_alloc_tx_rings(adapter, netdev)) {
1865		qlcnic_free_sds_rings(recv_ctx);
1866		return -ENOMEM;
1867	}
1868
1869	if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
1870	    !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
1871		for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
1872			tx_ring = &adapter->tx_ring[ring];
1873			netif_napi_add(netdev, &tx_ring->napi,
1874				       qlcnic_83xx_msix_tx_poll,
1875				       QLCNIC_NETDEV_WEIGHT);
1876		}
1877	}
1878
1879	return 0;
1880}
1881
1882void qlcnic_83xx_napi_del(struct qlcnic_adapter *adapter)
1883{
1884	int ring;
1885	struct qlcnic_host_sds_ring *sds_ring;
1886	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1887	struct qlcnic_host_tx_ring *tx_ring;
1888
1889	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1890		sds_ring = &recv_ctx->sds_rings[ring];
1891		netif_napi_del(&sds_ring->napi);
1892	}
1893
1894	qlcnic_free_sds_rings(adapter->recv_ctx);
1895
1896	if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
1897	    !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
1898		for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
1899			tx_ring = &adapter->tx_ring[ring];
1900			netif_napi_del(&tx_ring->napi);
1901		}
1902	}
1903
1904	qlcnic_free_tx_rings(adapter);
1905}
1906
1907void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter *adapter,
1908				  int ring, u64 sts_data[])
1909{
1910	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1911	struct sk_buff *skb;
1912	struct qlcnic_host_rds_ring *rds_ring;
1913	int index, length;
1914
1915	if (unlikely(ring >= adapter->max_rds_rings))
1916		return;
1917
1918	rds_ring = &recv_ctx->rds_rings[ring];
1919	index = qlcnic_83xx_hndl(sts_data[0]);
1920	if (unlikely(index >= rds_ring->num_desc))
1921		return;
1922
1923	length = qlcnic_83xx_pktln(sts_data[0]);
1924
1925	skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
1926	if (!skb)
1927		return;
1928
1929	if (length > rds_ring->skb_size)
1930		skb_put(skb, rds_ring->skb_size);
1931	else
1932		skb_put(skb, length);
1933
1934	if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr))
1935		adapter->ahw->diag_cnt++;
1936	else
1937		dump_skb(skb, adapter);
1938
1939	dev_kfree_skb_any(skb);
1940	return;
1941}
1942
1943void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
1944{
1945	struct qlcnic_adapter *adapter = sds_ring->adapter;
1946	struct status_desc *desc;
1947	u64 sts_data[2];
1948	int ring, opcode;
1949	u32 consumer = sds_ring->consumer;
1950
1951	desc = &sds_ring->desc_head[consumer];
1952	sts_data[0] = le64_to_cpu(desc->status_desc_data[0]);
1953	sts_data[1] = le64_to_cpu(desc->status_desc_data[1]);
1954	opcode = qlcnic_83xx_opcode(sts_data[1]);
1955	if (!opcode)
1956		return;
1957
1958	ring = QLCNIC_FETCH_RING_ID(qlcnic_83xx_hndl(sts_data[0]));
1959	qlcnic_83xx_process_rcv_diag(adapter, ring, sts_data);
1960	desc = &sds_ring->desc_head[consumer];
1961	desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
1962	consumer = get_next_index(consumer, sds_ring->num_desc);
1963	sds_ring->consumer = consumer;
1964	writel(consumer, sds_ring->crb_sts_consumer);
1965}
1966