qlcnic_io.c revision 8e3fb2ce4fe73f566e3dc332fdac48739ef3114a
1/*
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2013 QLogic Corporation
4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
6 */
7
8#include <linux/netdevice.h>
9#include <linux/if_vlan.h>
10#include <net/ip.h>
11#include <linux/ipv6.h>
12#include <net/checksum.h>
13
14#include "qlcnic.h"
15
16#define TX_ETHER_PKT	0x01
17#define TX_TCP_PKT	0x02
18#define TX_UDP_PKT	0x03
19#define TX_IP_PKT	0x04
20#define TX_TCP_LSO	0x05
21#define TX_TCP_LSO6	0x06
22#define TX_TCPV6_PKT	0x0b
23#define TX_UDPV6_PKT	0x0c
24#define FLAGS_VLAN_TAGGED	0x10
25#define FLAGS_VLAN_OOB		0x40
26
27#define qlcnic_set_tx_vlan_tci(cmd_desc, v)	\
28	(cmd_desc)->vlan_TCI = cpu_to_le16(v);
29#define qlcnic_set_cmd_desc_port(cmd_desc, var)	\
30	((cmd_desc)->port_ctxid |= ((var) & 0x0F))
31#define qlcnic_set_cmd_desc_ctxid(cmd_desc, var)	\
32	((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
33
34#define qlcnic_set_tx_port(_desc, _port) \
35	((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0))
36
37#define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \
38	((_desc)->flags_opcode |= \
39	cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)))
40
41#define qlcnic_set_tx_frags_len(_desc, _frags, _len) \
42	((_desc)->nfrags__length = \
43	cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8)))
44
45/* owner bits of status_desc */
46#define STATUS_OWNER_HOST	(0x1ULL << 56)
47#define STATUS_OWNER_PHANTOM	(0x2ULL << 56)
48
49/* Status descriptor:
50   0-3 port, 4-7 status, 8-11 type, 12-27 total_length
51   28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
52   53-55 desc_cnt, 56-57 owner, 58-63 opcode
53 */
54#define qlcnic_get_sts_port(sts_data)	\
55	((sts_data) & 0x0F)
56#define qlcnic_get_sts_status(sts_data)	\
57	(((sts_data) >> 4) & 0x0F)
58#define qlcnic_get_sts_type(sts_data)	\
59	(((sts_data) >> 8) & 0x0F)
60#define qlcnic_get_sts_totallength(sts_data)	\
61	(((sts_data) >> 12) & 0xFFFF)
62#define qlcnic_get_sts_refhandle(sts_data)	\
63	(((sts_data) >> 28) & 0xFFFF)
64#define qlcnic_get_sts_prot(sts_data)	\
65	(((sts_data) >> 44) & 0x0F)
66#define qlcnic_get_sts_pkt_offset(sts_data)	\
67	(((sts_data) >> 48) & 0x1F)
68#define qlcnic_get_sts_desc_cnt(sts_data)	\
69	(((sts_data) >> 53) & 0x7)
70#define qlcnic_get_sts_opcode(sts_data)	\
71	(((sts_data) >> 58) & 0x03F)
72
73#define qlcnic_get_lro_sts_refhandle(sts_data) 	\
74	((sts_data) & 0x07FFF)
75#define qlcnic_get_lro_sts_length(sts_data)	\
76	(((sts_data) >> 16) & 0x0FFFF)
77#define qlcnic_get_lro_sts_l2_hdr_offset(sts_data)	\
78	(((sts_data) >> 32) & 0x0FF)
79#define qlcnic_get_lro_sts_l4_hdr_offset(sts_data)	\
80	(((sts_data) >> 40) & 0x0FF)
81#define qlcnic_get_lro_sts_timestamp(sts_data)	\
82	(((sts_data) >> 48) & 0x1)
83#define qlcnic_get_lro_sts_type(sts_data)	\
84	(((sts_data) >> 49) & 0x7)
85#define qlcnic_get_lro_sts_push_flag(sts_data)		\
86	(((sts_data) >> 52) & 0x1)
87#define qlcnic_get_lro_sts_seq_number(sts_data)		\
88	((sts_data) & 0x0FFFFFFFF)
89#define qlcnic_get_lro_sts_mss(sts_data1)		\
90	((sts_data1 >> 32) & 0x0FFFF)
91
92#define qlcnic_83xx_get_lro_sts_mss(sts) ((sts) & 0xffff)
93
94/* opcode field in status_desc */
95#define QLCNIC_SYN_OFFLOAD	0x03
96#define QLCNIC_RXPKT_DESC  	0x04
97#define QLCNIC_OLD_RXPKT_DESC	0x3f
98#define QLCNIC_RESPONSE_DESC	0x05
99#define QLCNIC_LRO_DESC  	0x12
100
101#define QLCNIC_TX_POLL_BUDGET		128
102#define QLCNIC_TCP_HDR_SIZE		20
103#define QLCNIC_TCP_TS_OPTION_SIZE	12
104#define QLCNIC_FETCH_RING_ID(handle)	((handle) >> 63)
105#define QLCNIC_DESC_OWNER_FW		cpu_to_le64(STATUS_OWNER_PHANTOM)
106
107#define QLCNIC_TCP_TS_HDR_SIZE (QLCNIC_TCP_HDR_SIZE + QLCNIC_TCP_TS_OPTION_SIZE)
108
109/* for status field in status_desc */
110#define STATUS_CKSUM_LOOP	0
111#define STATUS_CKSUM_OK		2
112
113#define qlcnic_83xx_pktln(sts)		((sts >> 32) & 0x3FFF)
114#define qlcnic_83xx_hndl(sts)		((sts >> 48) & 0x7FFF)
115#define qlcnic_83xx_csum_status(sts)	((sts >> 39) & 7)
116#define qlcnic_83xx_opcode(sts)	((sts >> 42) & 0xF)
117#define qlcnic_83xx_vlan_tag(sts)	(((sts) >> 48) & 0xFFFF)
118#define qlcnic_83xx_lro_pktln(sts)	(((sts) >> 32) & 0x3FFF)
119#define qlcnic_83xx_l2_hdr_off(sts)	(((sts) >> 16) & 0xFF)
120#define qlcnic_83xx_l4_hdr_off(sts)	(((sts) >> 24) & 0xFF)
121#define qlcnic_83xx_pkt_cnt(sts)	(((sts) >> 16) & 0x7)
122#define qlcnic_83xx_is_tstamp(sts)	(((sts) >> 40) & 1)
123#define qlcnic_83xx_is_psh_bit(sts)	(((sts) >> 41) & 1)
124#define qlcnic_83xx_is_ip_align(sts)	(((sts) >> 46) & 1)
125#define qlcnic_83xx_has_vlan_tag(sts)	(((sts) >> 47) & 1)
126
127struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *,
128				     struct qlcnic_host_rds_ring *, u16, u16);
129
130static inline void qlcnic_enable_tx_intr(struct qlcnic_adapter *adapter,
131				  struct qlcnic_host_tx_ring *tx_ring)
132{
133	if (qlcnic_check_multi_tx(adapter) &&
134	    !adapter->ahw->diag_test)
135		writel(0x0, tx_ring->crb_intr_mask);
136}
137
138
139static inline void qlcnic_disable_tx_int(struct qlcnic_adapter *adapter,
140					 struct qlcnic_host_tx_ring *tx_ring)
141{
142	if (qlcnic_check_multi_tx(adapter) &&
143	    !adapter->ahw->diag_test)
144		writel(1, tx_ring->crb_intr_mask);
145}
146
147static inline void qlcnic_83xx_enable_tx_intr(struct qlcnic_adapter *adapter,
148				       struct qlcnic_host_tx_ring *tx_ring)
149{
150	writel(0, tx_ring->crb_intr_mask);
151}
152
153static inline void qlcnic_83xx_disable_tx_intr(struct qlcnic_adapter *adapter,
154					struct qlcnic_host_tx_ring *tx_ring)
155{
156	writel(1, tx_ring->crb_intr_mask);
157}
158
159static inline u8 qlcnic_mac_hash(u64 mac)
160{
161	return (u8)((mac & 0xff) ^ ((mac >> 40) & 0xff));
162}
163
164static inline u32 qlcnic_get_ref_handle(struct qlcnic_adapter *adapter,
165					u16 handle, u8 ring_id)
166{
167	if (qlcnic_83xx_check(adapter))
168		return handle | (ring_id << 15);
169	else
170		return handle;
171}
172
173static inline int qlcnic_82xx_is_lb_pkt(u64 sts_data)
174{
175	return (qlcnic_get_sts_status(sts_data) == STATUS_CKSUM_LOOP) ? 1 : 0;
176}
177
178static void qlcnic_delete_rx_list_mac(struct qlcnic_adapter *adapter,
179				      struct qlcnic_filter *fil,
180				      void *addr, u16 vlan_id)
181{
182	int ret;
183	u8 op;
184
185	op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
186	ret = qlcnic_sre_macaddr_change(adapter, addr, vlan_id, op);
187	if (ret)
188		return;
189
190	op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL;
191	ret = qlcnic_sre_macaddr_change(adapter, addr, vlan_id, op);
192	if (!ret) {
193		hlist_del(&fil->fnode);
194		adapter->rx_fhash.fnum--;
195	}
196}
197
198static struct qlcnic_filter *qlcnic_find_mac_filter(struct hlist_head *head,
199						    void *addr, u16 vlan_id)
200{
201	struct qlcnic_filter *tmp_fil = NULL;
202	struct hlist_node *n;
203
204	hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
205		if (!memcmp(tmp_fil->faddr, addr, ETH_ALEN) &&
206		    tmp_fil->vlan_id == vlan_id)
207			return tmp_fil;
208	}
209
210	return NULL;
211}
212
213void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
214			  int loopback_pkt, u16 vlan_id)
215{
216	struct ethhdr *phdr = (struct ethhdr *)(skb->data);
217	struct qlcnic_filter *fil, *tmp_fil;
218	struct hlist_head *head;
219	unsigned long time;
220	u64 src_addr = 0;
221	u8 hindex, op;
222	int ret;
223
224	memcpy(&src_addr, phdr->h_source, ETH_ALEN);
225	hindex = qlcnic_mac_hash(src_addr) &
226		 (adapter->fhash.fbucket_size - 1);
227
228	if (loopback_pkt) {
229		if (adapter->rx_fhash.fnum >= adapter->rx_fhash.fmax)
230			return;
231
232		head = &(adapter->rx_fhash.fhead[hindex]);
233
234		tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
235		if (tmp_fil) {
236			time = tmp_fil->ftime;
237			if (time_after(jiffies, QLCNIC_READD_AGE * HZ + time))
238				tmp_fil->ftime = jiffies;
239			return;
240		}
241
242		fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
243		if (!fil)
244			return;
245
246		fil->ftime = jiffies;
247		memcpy(fil->faddr, &src_addr, ETH_ALEN);
248		fil->vlan_id = vlan_id;
249		spin_lock(&adapter->rx_mac_learn_lock);
250		hlist_add_head(&(fil->fnode), head);
251		adapter->rx_fhash.fnum++;
252		spin_unlock(&adapter->rx_mac_learn_lock);
253	} else {
254		head = &adapter->fhash.fhead[hindex];
255
256		spin_lock(&adapter->mac_learn_lock);
257
258		tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
259		if (tmp_fil) {
260			op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL;
261			ret = qlcnic_sre_macaddr_change(adapter,
262							(u8 *)&src_addr,
263							vlan_id, op);
264			if (!ret) {
265				hlist_del(&tmp_fil->fnode);
266				adapter->fhash.fnum--;
267			}
268
269			spin_unlock(&adapter->mac_learn_lock);
270
271			return;
272		}
273
274		spin_unlock(&adapter->mac_learn_lock);
275
276		head = &adapter->rx_fhash.fhead[hindex];
277
278		spin_lock(&adapter->rx_mac_learn_lock);
279
280		tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
281		if (tmp_fil)
282			qlcnic_delete_rx_list_mac(adapter, tmp_fil, &src_addr,
283						  vlan_id);
284
285		spin_unlock(&adapter->rx_mac_learn_lock);
286	}
287}
288
289void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
290			       u16 vlan_id)
291{
292	struct cmd_desc_type0 *hwdesc;
293	struct qlcnic_nic_req *req;
294	struct qlcnic_mac_req *mac_req;
295	struct qlcnic_vlan_req *vlan_req;
296	struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
297	u32 producer;
298	u64 word;
299
300	producer = tx_ring->producer;
301	hwdesc = &tx_ring->desc_head[tx_ring->producer];
302
303	req = (struct qlcnic_nic_req *)hwdesc;
304	memset(req, 0, sizeof(struct qlcnic_nic_req));
305	req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
306
307	word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16);
308	req->req_hdr = cpu_to_le64(word);
309
310	mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
311	mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
312	memcpy(mac_req->mac_addr, uaddr, ETH_ALEN);
313
314	vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
315	vlan_req->vlan_id = cpu_to_le16(vlan_id);
316
317	tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
318	smp_mb();
319}
320
321static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
322			       struct cmd_desc_type0 *first_desc,
323			       struct sk_buff *skb)
324{
325	struct qlcnic_filter *fil, *tmp_fil;
326	struct hlist_node *n;
327	struct hlist_head *head;
328	struct net_device *netdev = adapter->netdev;
329	struct ethhdr *phdr = (struct ethhdr *)(skb->data);
330	u64 src_addr = 0;
331	u16 vlan_id = 0;
332	u8 hindex;
333
334	if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
335		return;
336
337	if (adapter->fhash.fnum >= adapter->fhash.fmax) {
338		adapter->stats.mac_filter_limit_overrun++;
339		netdev_info(netdev, "Can not add more than %d mac addresses\n",
340			    adapter->fhash.fmax);
341		return;
342	}
343
344	memcpy(&src_addr, phdr->h_source, ETH_ALEN);
345	hindex = qlcnic_mac_hash(src_addr) & (adapter->fhash.fbucket_size - 1);
346	head = &(adapter->fhash.fhead[hindex]);
347
348	hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
349		if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
350		    tmp_fil->vlan_id == vlan_id) {
351			if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
352				qlcnic_change_filter(adapter, &src_addr,
353						     vlan_id);
354			tmp_fil->ftime = jiffies;
355			return;
356		}
357	}
358
359	fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
360	if (!fil)
361		return;
362
363	qlcnic_change_filter(adapter, &src_addr, vlan_id);
364	fil->ftime = jiffies;
365	fil->vlan_id = vlan_id;
366	memcpy(fil->faddr, &src_addr, ETH_ALEN);
367	spin_lock(&adapter->mac_learn_lock);
368	hlist_add_head(&(fil->fnode), head);
369	adapter->fhash.fnum++;
370	spin_unlock(&adapter->mac_learn_lock);
371}
372
373static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
374			 struct cmd_desc_type0 *first_desc, struct sk_buff *skb,
375			 struct qlcnic_host_tx_ring *tx_ring)
376{
377	u8 l4proto, opcode = 0, hdr_len = 0;
378	u16 flags = 0, vlan_tci = 0;
379	int copied, offset, copy_len, size;
380	struct cmd_desc_type0 *hwdesc;
381	struct vlan_ethhdr *vh;
382	u16 protocol = ntohs(skb->protocol);
383	u32 producer = tx_ring->producer;
384
385	if (protocol == ETH_P_8021Q) {
386		vh = (struct vlan_ethhdr *)skb->data;
387		flags = FLAGS_VLAN_TAGGED;
388		vlan_tci = ntohs(vh->h_vlan_TCI);
389		protocol = ntohs(vh->h_vlan_encapsulated_proto);
390	} else if (vlan_tx_tag_present(skb)) {
391		flags = FLAGS_VLAN_OOB;
392		vlan_tci = vlan_tx_tag_get(skb);
393	}
394	if (unlikely(adapter->tx_pvid)) {
395		if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
396			return -EIO;
397		if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
398			goto set_flags;
399
400		flags = FLAGS_VLAN_OOB;
401		vlan_tci = adapter->tx_pvid;
402	}
403set_flags:
404	qlcnic_set_tx_vlan_tci(first_desc, vlan_tci);
405	qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
406
407	if (*(skb->data) & BIT_0) {
408		flags |= BIT_0;
409		memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
410	}
411	opcode = TX_ETHER_PKT;
412	if (skb_is_gso(skb)) {
413		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
414		first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
415		first_desc->total_hdr_length = hdr_len;
416		opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO;
417
418		/* For LSO, we need to copy the MAC/IP/TCP headers into
419		* the descriptor ring */
420		copied = 0;
421		offset = 2;
422
423		if (flags & FLAGS_VLAN_OOB) {
424			first_desc->total_hdr_length += VLAN_HLEN;
425			first_desc->tcp_hdr_offset = VLAN_HLEN;
426			first_desc->ip_hdr_offset = VLAN_HLEN;
427
428			/* Only in case of TSO on vlan device */
429			flags |= FLAGS_VLAN_TAGGED;
430
431			/* Create a TSO vlan header template for firmware */
432			hwdesc = &tx_ring->desc_head[producer];
433			tx_ring->cmd_buf_arr[producer].skb = NULL;
434
435			copy_len = min((int)sizeof(struct cmd_desc_type0) -
436				       offset, hdr_len + VLAN_HLEN);
437
438			vh = (struct vlan_ethhdr *)((char *) hwdesc + 2);
439			skb_copy_from_linear_data(skb, vh, 12);
440			vh->h_vlan_proto = htons(ETH_P_8021Q);
441			vh->h_vlan_TCI = htons(vlan_tci);
442
443			skb_copy_from_linear_data_offset(skb, 12,
444							 (char *)vh + 16,
445							 copy_len - 16);
446			copied = copy_len - VLAN_HLEN;
447			offset = 0;
448			producer = get_next_index(producer, tx_ring->num_desc);
449		}
450
451		while (copied < hdr_len) {
452			size = (int)sizeof(struct cmd_desc_type0) - offset;
453			copy_len = min(size, (hdr_len - copied));
454			hwdesc = &tx_ring->desc_head[producer];
455			tx_ring->cmd_buf_arr[producer].skb = NULL;
456			skb_copy_from_linear_data_offset(skb, copied,
457							 (char *)hwdesc +
458							 offset, copy_len);
459			copied += copy_len;
460			offset = 0;
461			producer = get_next_index(producer, tx_ring->num_desc);
462		}
463
464		tx_ring->producer = producer;
465		smp_mb();
466		adapter->stats.lso_frames++;
467
468	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
469		if (protocol == ETH_P_IP) {
470			l4proto = ip_hdr(skb)->protocol;
471
472			if (l4proto == IPPROTO_TCP)
473				opcode = TX_TCP_PKT;
474			else if (l4proto == IPPROTO_UDP)
475				opcode = TX_UDP_PKT;
476		} else if (protocol == ETH_P_IPV6) {
477			l4proto = ipv6_hdr(skb)->nexthdr;
478
479			if (l4proto == IPPROTO_TCP)
480				opcode = TX_TCPV6_PKT;
481			else if (l4proto == IPPROTO_UDP)
482				opcode = TX_UDPV6_PKT;
483		}
484	}
485	first_desc->tcp_hdr_offset += skb_transport_offset(skb);
486	first_desc->ip_hdr_offset += skb_network_offset(skb);
487	qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
488
489	return 0;
490}
491
492static int qlcnic_map_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
493			     struct qlcnic_cmd_buffer *pbuf)
494{
495	struct qlcnic_skb_frag *nf;
496	struct skb_frag_struct *frag;
497	int i, nr_frags;
498	dma_addr_t map;
499
500	nr_frags = skb_shinfo(skb)->nr_frags;
501	nf = &pbuf->frag_array[0];
502
503	map = pci_map_single(pdev, skb->data, skb_headlen(skb),
504			     PCI_DMA_TODEVICE);
505	if (pci_dma_mapping_error(pdev, map))
506		goto out_err;
507
508	nf->dma = map;
509	nf->length = skb_headlen(skb);
510
511	for (i = 0; i < nr_frags; i++) {
512		frag = &skb_shinfo(skb)->frags[i];
513		nf = &pbuf->frag_array[i+1];
514		map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
515				       DMA_TO_DEVICE);
516		if (dma_mapping_error(&pdev->dev, map))
517			goto unwind;
518
519		nf->dma = map;
520		nf->length = skb_frag_size(frag);
521	}
522
523	return 0;
524
525unwind:
526	while (--i >= 0) {
527		nf = &pbuf->frag_array[i+1];
528		pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
529	}
530
531	nf = &pbuf->frag_array[0];
532	pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
533
534out_err:
535	return -ENOMEM;
536}
537
538static void qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
539				 struct qlcnic_cmd_buffer *pbuf)
540{
541	struct qlcnic_skb_frag *nf = &pbuf->frag_array[0];
542	int i, nr_frags = skb_shinfo(skb)->nr_frags;
543
544	for (i = 0; i < nr_frags; i++) {
545		nf = &pbuf->frag_array[i+1];
546		pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
547	}
548
549	nf = &pbuf->frag_array[0];
550	pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
551	pbuf->skb = NULL;
552}
553
554static inline void qlcnic_clear_cmddesc(u64 *desc)
555{
556	desc[0] = 0ULL;
557	desc[2] = 0ULL;
558	desc[7] = 0ULL;
559}
560
561netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
562{
563	struct qlcnic_adapter *adapter = netdev_priv(netdev);
564	struct qlcnic_host_tx_ring *tx_ring;
565	struct qlcnic_cmd_buffer *pbuf;
566	struct qlcnic_skb_frag *buffrag;
567	struct cmd_desc_type0 *hwdesc, *first_desc;
568	struct pci_dev *pdev;
569	struct ethhdr *phdr;
570	int i, k, frag_count, delta = 0;
571	u32 producer, num_txd;
572
573	if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
574		netif_tx_stop_all_queues(netdev);
575		return NETDEV_TX_BUSY;
576	}
577
578	if (adapter->flags & QLCNIC_MACSPOOF) {
579		phdr = (struct ethhdr *)skb->data;
580		if (!ether_addr_equal(phdr->h_source, adapter->mac_addr))
581			goto drop_packet;
582	}
583
584	tx_ring = &adapter->tx_ring[skb_get_queue_mapping(skb)];
585	num_txd = tx_ring->num_desc;
586
587	frag_count = skb_shinfo(skb)->nr_frags + 1;
588
589	/* 14 frags supported for normal packet and
590	 * 32 frags supported for TSO packet
591	 */
592	if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
593		for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
594			delta += skb_frag_size(&skb_shinfo(skb)->frags[i]);
595
596		if (!__pskb_pull_tail(skb, delta))
597			goto drop_packet;
598
599		frag_count = 1 + skb_shinfo(skb)->nr_frags;
600	}
601
602	if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
603		netif_tx_stop_queue(tx_ring->txq);
604		if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
605			netif_tx_start_queue(tx_ring->txq);
606		} else {
607			tx_ring->tx_stats.xmit_off++;
608			return NETDEV_TX_BUSY;
609		}
610	}
611
612	producer = tx_ring->producer;
613	pbuf = &tx_ring->cmd_buf_arr[producer];
614	pdev = adapter->pdev;
615	first_desc = &tx_ring->desc_head[producer];
616	hwdesc = &tx_ring->desc_head[producer];
617	qlcnic_clear_cmddesc((u64 *)hwdesc);
618
619	if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
620		adapter->stats.tx_dma_map_error++;
621		goto drop_packet;
622	}
623
624	pbuf->skb = skb;
625	pbuf->frag_count = frag_count;
626
627	qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
628	qlcnic_set_tx_port(first_desc, adapter->portnum);
629
630	for (i = 0; i < frag_count; i++) {
631		k = i % 4;
632
633		if ((k == 0) && (i > 0)) {
634			/* move to next desc.*/
635			producer = get_next_index(producer, num_txd);
636			hwdesc = &tx_ring->desc_head[producer];
637			qlcnic_clear_cmddesc((u64 *)hwdesc);
638			tx_ring->cmd_buf_arr[producer].skb = NULL;
639		}
640
641		buffrag = &pbuf->frag_array[i];
642		hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
643		switch (k) {
644		case 0:
645			hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
646			break;
647		case 1:
648			hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
649			break;
650		case 2:
651			hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
652			break;
653		case 3:
654			hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
655			break;
656		}
657	}
658
659	tx_ring->producer = get_next_index(producer, num_txd);
660	smp_mb();
661
662	if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb, tx_ring)))
663		goto unwind_buff;
664
665	if (adapter->drv_mac_learn)
666		qlcnic_send_filter(adapter, first_desc, skb);
667
668	tx_ring->tx_stats.tx_bytes += skb->len;
669	tx_ring->tx_stats.xmit_called++;
670
671	qlcnic_update_cmd_producer(tx_ring);
672
673	return NETDEV_TX_OK;
674
675unwind_buff:
676	qlcnic_unmap_buffers(pdev, skb, pbuf);
677drop_packet:
678	adapter->stats.txdropped++;
679	dev_kfree_skb_any(skb);
680	return NETDEV_TX_OK;
681}
682
683void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
684{
685	struct net_device *netdev = adapter->netdev;
686
687	if (adapter->ahw->linkup && !linkup) {
688		netdev_info(netdev, "NIC Link is down\n");
689		adapter->ahw->linkup = 0;
690		netif_carrier_off(netdev);
691	} else if (!adapter->ahw->linkup && linkup) {
692		netdev_info(netdev, "NIC Link is up\n");
693		adapter->ahw->linkup = 1;
694		netif_carrier_on(netdev);
695	}
696}
697
698static int qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
699			       struct qlcnic_host_rds_ring *rds_ring,
700			       struct qlcnic_rx_buffer *buffer)
701{
702	struct sk_buff *skb;
703	dma_addr_t dma;
704	struct pci_dev *pdev = adapter->pdev;
705
706	skb = netdev_alloc_skb(adapter->netdev, rds_ring->skb_size);
707	if (!skb) {
708		adapter->stats.skb_alloc_failure++;
709		return -ENOMEM;
710	}
711
712	skb_reserve(skb, NET_IP_ALIGN);
713	dma = pci_map_single(pdev, skb->data,
714			     rds_ring->dma_size, PCI_DMA_FROMDEVICE);
715
716	if (pci_dma_mapping_error(pdev, dma)) {
717		adapter->stats.rx_dma_map_error++;
718		dev_kfree_skb_any(skb);
719		return -ENOMEM;
720	}
721
722	buffer->skb = skb;
723	buffer->dma = dma;
724
725	return 0;
726}
727
728static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
729					struct qlcnic_host_rds_ring *rds_ring,
730					u8 ring_id)
731{
732	struct rcv_desc *pdesc;
733	struct qlcnic_rx_buffer *buffer;
734	int  count = 0;
735	uint32_t producer, handle;
736	struct list_head *head;
737
738	if (!spin_trylock(&rds_ring->lock))
739		return;
740
741	producer = rds_ring->producer;
742	head = &rds_ring->free_list;
743	while (!list_empty(head)) {
744		buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
745
746		if (!buffer->skb) {
747			if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
748				break;
749		}
750		count++;
751		list_del(&buffer->list);
752
753		/* make a rcv descriptor  */
754		pdesc = &rds_ring->desc_head[producer];
755		handle = qlcnic_get_ref_handle(adapter,
756					       buffer->ref_handle, ring_id);
757		pdesc->reference_handle = cpu_to_le16(handle);
758		pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
759		pdesc->addr_buffer = cpu_to_le64(buffer->dma);
760		producer = get_next_index(producer, rds_ring->num_desc);
761	}
762	if (count) {
763		rds_ring->producer = producer;
764		writel((producer - 1) & (rds_ring->num_desc - 1),
765		       rds_ring->crb_rcv_producer);
766	}
767	spin_unlock(&rds_ring->lock);
768}
769
770static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
771				   struct qlcnic_host_tx_ring *tx_ring,
772				   int budget)
773{
774	u32 sw_consumer, hw_consumer;
775	int i, done, count = 0;
776	struct qlcnic_cmd_buffer *buffer;
777	struct pci_dev *pdev = adapter->pdev;
778	struct net_device *netdev = adapter->netdev;
779	struct qlcnic_skb_frag *frag;
780
781	if (!spin_trylock(&adapter->tx_clean_lock))
782		return 1;
783
784	sw_consumer = tx_ring->sw_consumer;
785	hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
786
787	while (sw_consumer != hw_consumer) {
788		buffer = &tx_ring->cmd_buf_arr[sw_consumer];
789		if (buffer->skb) {
790			frag = &buffer->frag_array[0];
791			pci_unmap_single(pdev, frag->dma, frag->length,
792					 PCI_DMA_TODEVICE);
793			frag->dma = 0ULL;
794			for (i = 1; i < buffer->frag_count; i++) {
795				frag++;
796				pci_unmap_page(pdev, frag->dma, frag->length,
797					       PCI_DMA_TODEVICE);
798				frag->dma = 0ULL;
799			}
800			tx_ring->tx_stats.xmit_finished++;
801			dev_kfree_skb_any(buffer->skb);
802			buffer->skb = NULL;
803		}
804
805		sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
806		if (++count >= budget)
807			break;
808	}
809
810	if (count && netif_running(netdev)) {
811		tx_ring->sw_consumer = sw_consumer;
812		smp_mb();
813		if (netif_tx_queue_stopped(tx_ring->txq) &&
814		    netif_carrier_ok(netdev)) {
815			if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
816				netif_tx_wake_queue(tx_ring->txq);
817				tx_ring->tx_stats.xmit_on++;
818			}
819		}
820		adapter->tx_timeo_cnt = 0;
821	}
822	/*
823	 * If everything is freed up to consumer then check if the ring is full
824	 * If the ring is full then check if more needs to be freed and
825	 * schedule the call back again.
826	 *
827	 * This happens when there are 2 CPUs. One could be freeing and the
828	 * other filling it. If the ring is full when we get out of here and
829	 * the card has already interrupted the host then the host can miss the
830	 * interrupt.
831	 *
832	 * There is still a possible race condition and the host could miss an
833	 * interrupt. The card has to take care of this.
834	 */
835	hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
836	done = (sw_consumer == hw_consumer);
837	spin_unlock(&adapter->tx_clean_lock);
838
839	return done;
840}
841
842static int qlcnic_poll(struct napi_struct *napi, int budget)
843{
844	int tx_complete, work_done;
845	struct qlcnic_host_sds_ring *sds_ring;
846	struct qlcnic_adapter *adapter;
847	struct qlcnic_host_tx_ring *tx_ring;
848
849	sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
850	adapter = sds_ring->adapter;
851	tx_ring = sds_ring->tx_ring;
852
853	tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring,
854					      budget);
855	work_done = qlcnic_process_rcv_ring(sds_ring, budget);
856	if ((work_done < budget) && tx_complete) {
857		napi_complete(&sds_ring->napi);
858		if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
859			qlcnic_enable_int(sds_ring);
860			qlcnic_enable_tx_intr(adapter, tx_ring);
861		}
862	}
863
864	return work_done;
865}
866
867static int qlcnic_tx_poll(struct napi_struct *napi, int budget)
868{
869	struct qlcnic_host_tx_ring *tx_ring;
870	struct qlcnic_adapter *adapter;
871	int work_done;
872
873	tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi);
874	adapter = tx_ring->adapter;
875
876	work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
877	if (work_done) {
878		napi_complete(&tx_ring->napi);
879		if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
880			qlcnic_enable_tx_intr(adapter, tx_ring);
881	}
882
883	return work_done;
884}
885
886static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
887{
888	struct qlcnic_host_sds_ring *sds_ring;
889	struct qlcnic_adapter *adapter;
890	int work_done;
891
892	sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
893	adapter = sds_ring->adapter;
894
895	work_done = qlcnic_process_rcv_ring(sds_ring, budget);
896
897	if (work_done < budget) {
898		napi_complete(&sds_ring->napi);
899		if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
900			qlcnic_enable_int(sds_ring);
901	}
902
903	return work_done;
904}
905
906static void qlcnic_handle_linkevent(struct qlcnic_adapter *adapter,
907				    struct qlcnic_fw_msg *msg)
908{
909	u32 cable_OUI;
910	u16 cable_len, link_speed;
911	u8  link_status, module, duplex, autoneg, lb_status = 0;
912	struct net_device *netdev = adapter->netdev;
913
914	adapter->ahw->has_link_events = 1;
915
916	cable_OUI = msg->body[1] & 0xffffffff;
917	cable_len = (msg->body[1] >> 32) & 0xffff;
918	link_speed = (msg->body[1] >> 48) & 0xffff;
919
920	link_status = msg->body[2] & 0xff;
921	duplex = (msg->body[2] >> 16) & 0xff;
922	autoneg = (msg->body[2] >> 24) & 0xff;
923	lb_status = (msg->body[2] >> 32) & 0x3;
924
925	module = (msg->body[2] >> 8) & 0xff;
926	if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE)
927		dev_info(&netdev->dev,
928			 "unsupported cable: OUI 0x%x, length %d\n",
929			 cable_OUI, cable_len);
930	else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN)
931		dev_info(&netdev->dev, "unsupported cable length %d\n",
932			 cable_len);
933
934	if (!link_status && (lb_status == QLCNIC_ILB_MODE ||
935	    lb_status == QLCNIC_ELB_MODE))
936		adapter->ahw->loopback_state |= QLCNIC_LINKEVENT;
937
938	qlcnic_advert_link_change(adapter, link_status);
939
940	if (duplex == LINKEVENT_FULL_DUPLEX)
941		adapter->ahw->link_duplex = DUPLEX_FULL;
942	else
943		adapter->ahw->link_duplex = DUPLEX_HALF;
944
945	adapter->ahw->module_type = module;
946	adapter->ahw->link_autoneg = autoneg;
947
948	if (link_status) {
949		adapter->ahw->link_speed = link_speed;
950	} else {
951		adapter->ahw->link_speed = SPEED_UNKNOWN;
952		adapter->ahw->link_duplex = DUPLEX_UNKNOWN;
953	}
954}
955
956static void qlcnic_handle_fw_message(int desc_cnt, int index,
957				     struct qlcnic_host_sds_ring *sds_ring)
958{
959	struct qlcnic_fw_msg msg;
960	struct status_desc *desc;
961	struct qlcnic_adapter *adapter;
962	struct device *dev;
963	int i = 0, opcode, ret;
964
965	while (desc_cnt > 0 && i < 8) {
966		desc = &sds_ring->desc_head[index];
967		msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]);
968		msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]);
969
970		index = get_next_index(index, sds_ring->num_desc);
971		desc_cnt--;
972	}
973
974	adapter = sds_ring->adapter;
975	dev = &adapter->pdev->dev;
976	opcode = qlcnic_get_nic_msg_opcode(msg.body[0]);
977
978	switch (opcode) {
979	case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE:
980		qlcnic_handle_linkevent(adapter, &msg);
981		break;
982	case QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK:
983		ret = (u32)(msg.body[1]);
984		switch (ret) {
985		case 0:
986			adapter->ahw->loopback_state |= QLCNIC_LB_RESPONSE;
987			break;
988		case 1:
989			dev_info(dev, "loopback already in progress\n");
990			adapter->ahw->diag_cnt = -EINPROGRESS;
991			break;
992		case 2:
993			dev_info(dev, "loopback cable is not connected\n");
994			adapter->ahw->diag_cnt = -ENODEV;
995			break;
996		default:
997			dev_info(dev,
998				 "loopback configure request failed, err %x\n",
999				 ret);
1000			adapter->ahw->diag_cnt = -EIO;
1001			break;
1002		}
1003		break;
1004	case QLCNIC_C2H_OPCODE_GET_DCB_AEN:
1005		qlcnic_dcb_aen_handler(adapter->dcb, (void *)&msg);
1006		break;
1007	default:
1008		break;
1009	}
1010}
1011
1012struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
1013				     struct qlcnic_host_rds_ring *ring,
1014				     u16 index, u16 cksum)
1015{
1016	struct qlcnic_rx_buffer *buffer;
1017	struct sk_buff *skb;
1018
1019	buffer = &ring->rx_buf_arr[index];
1020	if (unlikely(buffer->skb == NULL)) {
1021		WARN_ON(1);
1022		return NULL;
1023	}
1024
1025	pci_unmap_single(adapter->pdev, buffer->dma, ring->dma_size,
1026			 PCI_DMA_FROMDEVICE);
1027
1028	skb = buffer->skb;
1029	if (likely((adapter->netdev->features & NETIF_F_RXCSUM) &&
1030		   (cksum == STATUS_CKSUM_OK || cksum == STATUS_CKSUM_LOOP))) {
1031		adapter->stats.csummed++;
1032		skb->ip_summed = CHECKSUM_UNNECESSARY;
1033	} else {
1034		skb_checksum_none_assert(skb);
1035	}
1036
1037
1038	buffer->skb = NULL;
1039
1040	return skb;
1041}
1042
1043static inline int qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter,
1044					  struct sk_buff *skb, u16 *vlan_tag)
1045{
1046	struct ethhdr *eth_hdr;
1047
1048	if (!__vlan_get_tag(skb, vlan_tag)) {
1049		eth_hdr = (struct ethhdr *)skb->data;
1050		memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
1051		skb_pull(skb, VLAN_HLEN);
1052	}
1053	if (!adapter->rx_pvid)
1054		return 0;
1055
1056	if (*vlan_tag == adapter->rx_pvid) {
1057		/* Outer vlan tag. Packet should follow non-vlan path */
1058		*vlan_tag = 0xffff;
1059		return 0;
1060	}
1061	if (adapter->flags & QLCNIC_TAGGING_ENABLED)
1062		return 0;
1063
1064	return -EINVAL;
1065}
1066
1067static struct qlcnic_rx_buffer *
1068qlcnic_process_rcv(struct qlcnic_adapter *adapter,
1069		   struct qlcnic_host_sds_ring *sds_ring, int ring,
1070		   u64 sts_data0)
1071{
1072	struct net_device *netdev = adapter->netdev;
1073	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1074	struct qlcnic_rx_buffer *buffer;
1075	struct sk_buff *skb;
1076	struct qlcnic_host_rds_ring *rds_ring;
1077	int index, length, cksum, pkt_offset, is_lb_pkt;
1078	u16 vid = 0xffff, t_vid;
1079
1080	if (unlikely(ring >= adapter->max_rds_rings))
1081		return NULL;
1082
1083	rds_ring = &recv_ctx->rds_rings[ring];
1084
1085	index = qlcnic_get_sts_refhandle(sts_data0);
1086	if (unlikely(index >= rds_ring->num_desc))
1087		return NULL;
1088
1089	buffer = &rds_ring->rx_buf_arr[index];
1090	length = qlcnic_get_sts_totallength(sts_data0);
1091	cksum  = qlcnic_get_sts_status(sts_data0);
1092	pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
1093
1094	skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
1095	if (!skb)
1096		return buffer;
1097
1098	if (adapter->drv_mac_learn &&
1099	    (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
1100		t_vid = 0;
1101		is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
1102		qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
1103	}
1104
1105	if (length > rds_ring->skb_size)
1106		skb_put(skb, rds_ring->skb_size);
1107	else
1108		skb_put(skb, length);
1109
1110	if (pkt_offset)
1111		skb_pull(skb, pkt_offset);
1112
1113	if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
1114		adapter->stats.rxdropped++;
1115		dev_kfree_skb(skb);
1116		return buffer;
1117	}
1118
1119	skb->protocol = eth_type_trans(skb, netdev);
1120
1121	if (vid != 0xffff)
1122		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1123
1124	napi_gro_receive(&sds_ring->napi, skb);
1125
1126	adapter->stats.rx_pkts++;
1127	adapter->stats.rxbytes += length;
1128
1129	return buffer;
1130}
1131
1132#define QLC_TCP_HDR_SIZE            20
1133#define QLC_TCP_TS_OPTION_SIZE      12
1134#define QLC_TCP_TS_HDR_SIZE         (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE)
1135
1136static struct qlcnic_rx_buffer *
1137qlcnic_process_lro(struct qlcnic_adapter *adapter,
1138		   int ring, u64 sts_data0, u64 sts_data1)
1139{
1140	struct net_device *netdev = adapter->netdev;
1141	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1142	struct qlcnic_rx_buffer *buffer;
1143	struct sk_buff *skb;
1144	struct qlcnic_host_rds_ring *rds_ring;
1145	struct iphdr *iph;
1146	struct ipv6hdr *ipv6h;
1147	struct tcphdr *th;
1148	bool push, timestamp;
1149	int index, l2_hdr_offset, l4_hdr_offset, is_lb_pkt;
1150	u16 lro_length, length, data_offset, t_vid, vid = 0xffff;
1151	u32 seq_number;
1152
1153	if (unlikely(ring > adapter->max_rds_rings))
1154		return NULL;
1155
1156	rds_ring = &recv_ctx->rds_rings[ring];
1157
1158	index = qlcnic_get_lro_sts_refhandle(sts_data0);
1159	if (unlikely(index > rds_ring->num_desc))
1160		return NULL;
1161
1162	buffer = &rds_ring->rx_buf_arr[index];
1163
1164	timestamp = qlcnic_get_lro_sts_timestamp(sts_data0);
1165	lro_length = qlcnic_get_lro_sts_length(sts_data0);
1166	l2_hdr_offset = qlcnic_get_lro_sts_l2_hdr_offset(sts_data0);
1167	l4_hdr_offset = qlcnic_get_lro_sts_l4_hdr_offset(sts_data0);
1168	push = qlcnic_get_lro_sts_push_flag(sts_data0);
1169	seq_number = qlcnic_get_lro_sts_seq_number(sts_data1);
1170
1171	skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
1172	if (!skb)
1173		return buffer;
1174
1175	if (adapter->drv_mac_learn &&
1176	    (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
1177		t_vid = 0;
1178		is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
1179		qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
1180	}
1181
1182	if (timestamp)
1183		data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE;
1184	else
1185		data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE;
1186
1187	skb_put(skb, lro_length + data_offset);
1188	skb_pull(skb, l2_hdr_offset);
1189
1190	if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
1191		adapter->stats.rxdropped++;
1192		dev_kfree_skb(skb);
1193		return buffer;
1194	}
1195
1196	skb->protocol = eth_type_trans(skb, netdev);
1197
1198	if (ntohs(skb->protocol) == ETH_P_IPV6) {
1199		ipv6h = (struct ipv6hdr *)skb->data;
1200		th = (struct tcphdr *)(skb->data + sizeof(struct ipv6hdr));
1201		length = (th->doff << 2) + lro_length;
1202		ipv6h->payload_len = htons(length);
1203	} else {
1204		iph = (struct iphdr *)skb->data;
1205		th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
1206		length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
1207		csum_replace2(&iph->check, iph->tot_len, htons(length));
1208		iph->tot_len = htons(length);
1209	}
1210
1211	th->psh = push;
1212	th->seq = htonl(seq_number);
1213	length = skb->len;
1214
1215	if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) {
1216		skb_shinfo(skb)->gso_size = qlcnic_get_lro_sts_mss(sts_data1);
1217		if (skb->protocol == htons(ETH_P_IPV6))
1218			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1219		else
1220			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1221	}
1222
1223	if (vid != 0xffff)
1224		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1225	netif_receive_skb(skb);
1226
1227	adapter->stats.lro_pkts++;
1228	adapter->stats.lrobytes += length;
1229
1230	return buffer;
1231}
1232
1233int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
1234{
1235	struct qlcnic_host_rds_ring *rds_ring;
1236	struct qlcnic_adapter *adapter = sds_ring->adapter;
1237	struct list_head *cur;
1238	struct status_desc *desc;
1239	struct qlcnic_rx_buffer *rxbuf;
1240	int opcode, desc_cnt, count = 0;
1241	u64 sts_data0, sts_data1;
1242	u8 ring;
1243	u32 consumer = sds_ring->consumer;
1244
1245	while (count < max) {
1246		desc = &sds_ring->desc_head[consumer];
1247		sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
1248
1249		if (!(sts_data0 & STATUS_OWNER_HOST))
1250			break;
1251
1252		desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
1253		opcode = qlcnic_get_sts_opcode(sts_data0);
1254		switch (opcode) {
1255		case QLCNIC_RXPKT_DESC:
1256		case QLCNIC_OLD_RXPKT_DESC:
1257		case QLCNIC_SYN_OFFLOAD:
1258			ring = qlcnic_get_sts_type(sts_data0);
1259			rxbuf = qlcnic_process_rcv(adapter, sds_ring, ring,
1260						   sts_data0);
1261			break;
1262		case QLCNIC_LRO_DESC:
1263			ring = qlcnic_get_lro_sts_type(sts_data0);
1264			sts_data1 = le64_to_cpu(desc->status_desc_data[1]);
1265			rxbuf = qlcnic_process_lro(adapter, ring, sts_data0,
1266						   sts_data1);
1267			break;
1268		case QLCNIC_RESPONSE_DESC:
1269			qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
1270		default:
1271			goto skip;
1272		}
1273		WARN_ON(desc_cnt > 1);
1274
1275		if (likely(rxbuf))
1276			list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
1277		else
1278			adapter->stats.null_rxbuf++;
1279skip:
1280		for (; desc_cnt > 0; desc_cnt--) {
1281			desc = &sds_ring->desc_head[consumer];
1282			desc->status_desc_data[0] = QLCNIC_DESC_OWNER_FW;
1283			consumer = get_next_index(consumer, sds_ring->num_desc);
1284		}
1285		count++;
1286	}
1287
1288	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1289		rds_ring = &adapter->recv_ctx->rds_rings[ring];
1290		if (!list_empty(&sds_ring->free_list[ring])) {
1291			list_for_each(cur, &sds_ring->free_list[ring]) {
1292				rxbuf = list_entry(cur, struct qlcnic_rx_buffer,
1293						   list);
1294				qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
1295			}
1296			spin_lock(&rds_ring->lock);
1297			list_splice_tail_init(&sds_ring->free_list[ring],
1298					      &rds_ring->free_list);
1299			spin_unlock(&rds_ring->lock);
1300		}
1301
1302		qlcnic_post_rx_buffers_nodb(adapter, rds_ring, ring);
1303	}
1304
1305	if (count) {
1306		sds_ring->consumer = consumer;
1307		writel(consumer, sds_ring->crb_sts_consumer);
1308	}
1309
1310	return count;
1311}
1312
1313void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
1314			    struct qlcnic_host_rds_ring *rds_ring, u8 ring_id)
1315{
1316	struct rcv_desc *pdesc;
1317	struct qlcnic_rx_buffer *buffer;
1318	int count = 0;
1319	u32 producer, handle;
1320	struct list_head *head;
1321
1322	producer = rds_ring->producer;
1323	head = &rds_ring->free_list;
1324
1325	while (!list_empty(head)) {
1326
1327		buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
1328
1329		if (!buffer->skb) {
1330			if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
1331				break;
1332		}
1333
1334		count++;
1335		list_del(&buffer->list);
1336
1337		/* make a rcv descriptor  */
1338		pdesc = &rds_ring->desc_head[producer];
1339		pdesc->addr_buffer = cpu_to_le64(buffer->dma);
1340		handle = qlcnic_get_ref_handle(adapter, buffer->ref_handle,
1341					       ring_id);
1342		pdesc->reference_handle = cpu_to_le16(handle);
1343		pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
1344		producer = get_next_index(producer, rds_ring->num_desc);
1345	}
1346
1347	if (count) {
1348		rds_ring->producer = producer;
1349		writel((producer-1) & (rds_ring->num_desc-1),
1350		       rds_ring->crb_rcv_producer);
1351	}
1352}
1353
1354static void dump_skb(struct sk_buff *skb, struct qlcnic_adapter *adapter)
1355{
1356	int i;
1357	unsigned char *data = skb->data;
1358
1359	pr_info(KERN_INFO "\n");
1360	for (i = 0; i < skb->len; i++) {
1361		QLCDB(adapter, DRV, "%02x ", data[i]);
1362		if ((i & 0x0f) == 8)
1363			pr_info(KERN_INFO "\n");
1364	}
1365}
1366
1367static void qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter, int ring,
1368				    u64 sts_data0)
1369{
1370	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1371	struct sk_buff *skb;
1372	struct qlcnic_host_rds_ring *rds_ring;
1373	int index, length, cksum, pkt_offset;
1374
1375	if (unlikely(ring >= adapter->max_rds_rings))
1376		return;
1377
1378	rds_ring = &recv_ctx->rds_rings[ring];
1379
1380	index = qlcnic_get_sts_refhandle(sts_data0);
1381	length = qlcnic_get_sts_totallength(sts_data0);
1382	if (unlikely(index >= rds_ring->num_desc))
1383		return;
1384
1385	cksum  = qlcnic_get_sts_status(sts_data0);
1386	pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
1387
1388	skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
1389	if (!skb)
1390		return;
1391
1392	if (length > rds_ring->skb_size)
1393		skb_put(skb, rds_ring->skb_size);
1394	else
1395		skb_put(skb, length);
1396
1397	if (pkt_offset)
1398		skb_pull(skb, pkt_offset);
1399
1400	if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr))
1401		adapter->ahw->diag_cnt++;
1402	else
1403		dump_skb(skb, adapter);
1404
1405	dev_kfree_skb_any(skb);
1406	adapter->stats.rx_pkts++;
1407	adapter->stats.rxbytes += length;
1408
1409	return;
1410}
1411
1412void qlcnic_82xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
1413{
1414	struct qlcnic_adapter *adapter = sds_ring->adapter;
1415	struct status_desc *desc;
1416	u64 sts_data0;
1417	int ring, opcode, desc_cnt;
1418
1419	u32 consumer = sds_ring->consumer;
1420
1421	desc = &sds_ring->desc_head[consumer];
1422	sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
1423
1424	if (!(sts_data0 & STATUS_OWNER_HOST))
1425		return;
1426
1427	desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
1428	opcode = qlcnic_get_sts_opcode(sts_data0);
1429	switch (opcode) {
1430	case QLCNIC_RESPONSE_DESC:
1431		qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
1432		break;
1433	default:
1434		ring = qlcnic_get_sts_type(sts_data0);
1435		qlcnic_process_rcv_diag(adapter, ring, sts_data0);
1436		break;
1437	}
1438
1439	for (; desc_cnt > 0; desc_cnt--) {
1440		desc = &sds_ring->desc_head[consumer];
1441		desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
1442		consumer = get_next_index(consumer, sds_ring->num_desc);
1443	}
1444
1445	sds_ring->consumer = consumer;
1446	writel(consumer, sds_ring->crb_sts_consumer);
1447}
1448
1449int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
1450			 struct net_device *netdev)
1451{
1452	int ring;
1453	struct qlcnic_host_sds_ring *sds_ring;
1454	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1455	struct qlcnic_host_tx_ring *tx_ring;
1456
1457	if (qlcnic_alloc_sds_rings(recv_ctx, adapter->drv_sds_rings))
1458		return -ENOMEM;
1459
1460	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
1461		sds_ring = &recv_ctx->sds_rings[ring];
1462		if (qlcnic_check_multi_tx(adapter) &&
1463		    !adapter->ahw->diag_test) {
1464			netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll,
1465				       NAPI_POLL_WEIGHT);
1466		} else {
1467			if (ring == (adapter->drv_sds_rings - 1))
1468				netif_napi_add(netdev, &sds_ring->napi,
1469					       qlcnic_poll,
1470					       NAPI_POLL_WEIGHT);
1471			else
1472				netif_napi_add(netdev, &sds_ring->napi,
1473					       qlcnic_rx_poll,
1474					       NAPI_POLL_WEIGHT);
1475		}
1476	}
1477
1478	if (qlcnic_alloc_tx_rings(adapter, netdev)) {
1479		qlcnic_free_sds_rings(recv_ctx);
1480		return -ENOMEM;
1481	}
1482
1483	if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) {
1484		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
1485			tx_ring = &adapter->tx_ring[ring];
1486			netif_napi_add(netdev, &tx_ring->napi, qlcnic_tx_poll,
1487				       NAPI_POLL_WEIGHT);
1488		}
1489	}
1490
1491	return 0;
1492}
1493
1494void qlcnic_82xx_napi_del(struct qlcnic_adapter *adapter)
1495{
1496	int ring;
1497	struct qlcnic_host_sds_ring *sds_ring;
1498	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1499	struct qlcnic_host_tx_ring *tx_ring;
1500
1501	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
1502		sds_ring = &recv_ctx->sds_rings[ring];
1503		netif_napi_del(&sds_ring->napi);
1504	}
1505
1506	qlcnic_free_sds_rings(adapter->recv_ctx);
1507
1508	if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) {
1509		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
1510			tx_ring = &adapter->tx_ring[ring];
1511			netif_napi_del(&tx_ring->napi);
1512		}
1513	}
1514
1515	qlcnic_free_tx_rings(adapter);
1516}
1517
1518void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter)
1519{
1520	int ring;
1521	struct qlcnic_host_sds_ring *sds_ring;
1522	struct qlcnic_host_tx_ring *tx_ring;
1523	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1524
1525	if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1526		return;
1527
1528	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
1529		sds_ring = &recv_ctx->sds_rings[ring];
1530		napi_enable(&sds_ring->napi);
1531		qlcnic_enable_int(sds_ring);
1532	}
1533
1534	if (qlcnic_check_multi_tx(adapter) &&
1535	    (adapter->flags & QLCNIC_MSIX_ENABLED) &&
1536	    !adapter->ahw->diag_test) {
1537		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
1538			tx_ring = &adapter->tx_ring[ring];
1539			napi_enable(&tx_ring->napi);
1540			qlcnic_enable_tx_intr(adapter, tx_ring);
1541		}
1542	}
1543}
1544
1545void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter)
1546{
1547	int ring;
1548	struct qlcnic_host_sds_ring *sds_ring;
1549	struct qlcnic_host_tx_ring *tx_ring;
1550	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1551
1552	if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1553		return;
1554
1555	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
1556		sds_ring = &recv_ctx->sds_rings[ring];
1557		qlcnic_disable_int(sds_ring);
1558		napi_synchronize(&sds_ring->napi);
1559		napi_disable(&sds_ring->napi);
1560	}
1561
1562	if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
1563	    !adapter->ahw->diag_test &&
1564	    qlcnic_check_multi_tx(adapter)) {
1565		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
1566			tx_ring = &adapter->tx_ring[ring];
1567			qlcnic_disable_tx_int(adapter, tx_ring);
1568			napi_synchronize(&tx_ring->napi);
1569			napi_disable(&tx_ring->napi);
1570		}
1571	}
1572}
1573
1574#define QLC_83XX_NORMAL_LB_PKT	(1ULL << 36)
1575#define QLC_83XX_LRO_LB_PKT	(1ULL << 46)
1576
1577static inline int qlcnic_83xx_is_lb_pkt(u64 sts_data, int lro_pkt)
1578{
1579	if (lro_pkt)
1580		return (sts_data & QLC_83XX_LRO_LB_PKT) ? 1 : 0;
1581	else
1582		return (sts_data & QLC_83XX_NORMAL_LB_PKT) ? 1 : 0;
1583}
1584
1585static struct qlcnic_rx_buffer *
1586qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
1587			struct qlcnic_host_sds_ring *sds_ring,
1588			u8 ring, u64 sts_data[])
1589{
1590	struct net_device *netdev = adapter->netdev;
1591	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1592	struct qlcnic_rx_buffer *buffer;
1593	struct sk_buff *skb;
1594	struct qlcnic_host_rds_ring *rds_ring;
1595	int index, length, cksum, is_lb_pkt;
1596	u16 vid = 0xffff, t_vid;
1597
1598	if (unlikely(ring >= adapter->max_rds_rings))
1599		return NULL;
1600
1601	rds_ring = &recv_ctx->rds_rings[ring];
1602
1603	index = qlcnic_83xx_hndl(sts_data[0]);
1604	if (unlikely(index >= rds_ring->num_desc))
1605		return NULL;
1606
1607	buffer = &rds_ring->rx_buf_arr[index];
1608	length = qlcnic_83xx_pktln(sts_data[0]);
1609	cksum  = qlcnic_83xx_csum_status(sts_data[1]);
1610	skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
1611	if (!skb)
1612		return buffer;
1613
1614	if (adapter->drv_mac_learn &&
1615	    (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
1616		t_vid = 0;
1617		is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 0);
1618		qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
1619	}
1620
1621	if (length > rds_ring->skb_size)
1622		skb_put(skb, rds_ring->skb_size);
1623	else
1624		skb_put(skb, length);
1625
1626	if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
1627		adapter->stats.rxdropped++;
1628		dev_kfree_skb(skb);
1629		return buffer;
1630	}
1631
1632	skb->protocol = eth_type_trans(skb, netdev);
1633
1634	if (vid != 0xffff)
1635		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1636
1637	napi_gro_receive(&sds_ring->napi, skb);
1638
1639	adapter->stats.rx_pkts++;
1640	adapter->stats.rxbytes += length;
1641
1642	return buffer;
1643}
1644
1645static struct qlcnic_rx_buffer *
1646qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter,
1647			u8 ring, u64 sts_data[])
1648{
1649	struct net_device *netdev = adapter->netdev;
1650	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1651	struct qlcnic_rx_buffer *buffer;
1652	struct sk_buff *skb;
1653	struct qlcnic_host_rds_ring *rds_ring;
1654	struct iphdr *iph;
1655	struct ipv6hdr *ipv6h;
1656	struct tcphdr *th;
1657	bool push;
1658	int l2_hdr_offset, l4_hdr_offset;
1659	int index, is_lb_pkt;
1660	u16 lro_length, length, data_offset, gso_size;
1661	u16 vid = 0xffff, t_vid;
1662
1663	if (unlikely(ring > adapter->max_rds_rings))
1664		return NULL;
1665
1666	rds_ring = &recv_ctx->rds_rings[ring];
1667
1668	index = qlcnic_83xx_hndl(sts_data[0]);
1669	if (unlikely(index > rds_ring->num_desc))
1670		return NULL;
1671
1672	buffer = &rds_ring->rx_buf_arr[index];
1673
1674	lro_length = qlcnic_83xx_lro_pktln(sts_data[0]);
1675	l2_hdr_offset = qlcnic_83xx_l2_hdr_off(sts_data[1]);
1676	l4_hdr_offset = qlcnic_83xx_l4_hdr_off(sts_data[1]);
1677	push = qlcnic_83xx_is_psh_bit(sts_data[1]);
1678
1679	skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
1680	if (!skb)
1681		return buffer;
1682
1683	if (adapter->drv_mac_learn &&
1684	    (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
1685		t_vid = 0;
1686		is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 1);
1687		qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
1688	}
1689	if (qlcnic_83xx_is_tstamp(sts_data[1]))
1690		data_offset = l4_hdr_offset + QLCNIC_TCP_TS_HDR_SIZE;
1691	else
1692		data_offset = l4_hdr_offset + QLCNIC_TCP_HDR_SIZE;
1693
1694	skb_put(skb, lro_length + data_offset);
1695	skb_pull(skb, l2_hdr_offset);
1696
1697	if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
1698		adapter->stats.rxdropped++;
1699		dev_kfree_skb(skb);
1700		return buffer;
1701	}
1702
1703	skb->protocol = eth_type_trans(skb, netdev);
1704	if (ntohs(skb->protocol) == ETH_P_IPV6) {
1705		ipv6h = (struct ipv6hdr *)skb->data;
1706		th = (struct tcphdr *)(skb->data + sizeof(struct ipv6hdr));
1707
1708		length = (th->doff << 2) + lro_length;
1709		ipv6h->payload_len = htons(length);
1710	} else {
1711		iph = (struct iphdr *)skb->data;
1712		th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
1713		length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
1714		csum_replace2(&iph->check, iph->tot_len, htons(length));
1715		iph->tot_len = htons(length);
1716	}
1717
1718	th->psh = push;
1719	length = skb->len;
1720
1721	if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) {
1722		gso_size = qlcnic_83xx_get_lro_sts_mss(sts_data[0]);
1723		skb_shinfo(skb)->gso_size = gso_size;
1724		if (skb->protocol == htons(ETH_P_IPV6))
1725			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1726		else
1727			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1728	}
1729
1730	if (vid != 0xffff)
1731		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1732
1733	netif_receive_skb(skb);
1734
1735	adapter->stats.lro_pkts++;
1736	adapter->stats.lrobytes += length;
1737	return buffer;
1738}
1739
1740static int qlcnic_83xx_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring,
1741					int max)
1742{
1743	struct qlcnic_host_rds_ring *rds_ring;
1744	struct qlcnic_adapter *adapter = sds_ring->adapter;
1745	struct list_head *cur;
1746	struct status_desc *desc;
1747	struct qlcnic_rx_buffer *rxbuf = NULL;
1748	u8 ring;
1749	u64 sts_data[2];
1750	int count = 0, opcode;
1751	u32 consumer = sds_ring->consumer;
1752
1753	while (count < max) {
1754		desc = &sds_ring->desc_head[consumer];
1755		sts_data[1] = le64_to_cpu(desc->status_desc_data[1]);
1756		opcode = qlcnic_83xx_opcode(sts_data[1]);
1757		if (!opcode)
1758			break;
1759		sts_data[0] = le64_to_cpu(desc->status_desc_data[0]);
1760		ring = QLCNIC_FETCH_RING_ID(sts_data[0]);
1761
1762		switch (opcode) {
1763		case QLC_83XX_REG_DESC:
1764			rxbuf = qlcnic_83xx_process_rcv(adapter, sds_ring,
1765							ring, sts_data);
1766			break;
1767		case QLC_83XX_LRO_DESC:
1768			rxbuf = qlcnic_83xx_process_lro(adapter, ring,
1769							sts_data);
1770			break;
1771		default:
1772			dev_info(&adapter->pdev->dev,
1773				 "Unknown opcode: 0x%x\n", opcode);
1774			goto skip;
1775		}
1776
1777		if (likely(rxbuf))
1778			list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
1779		else
1780			adapter->stats.null_rxbuf++;
1781skip:
1782		desc = &sds_ring->desc_head[consumer];
1783		/* Reset the descriptor */
1784		desc->status_desc_data[1] = 0;
1785		consumer = get_next_index(consumer, sds_ring->num_desc);
1786		count++;
1787	}
1788	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1789		rds_ring = &adapter->recv_ctx->rds_rings[ring];
1790		if (!list_empty(&sds_ring->free_list[ring])) {
1791			list_for_each(cur, &sds_ring->free_list[ring]) {
1792				rxbuf = list_entry(cur, struct qlcnic_rx_buffer,
1793						   list);
1794				qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
1795			}
1796			spin_lock(&rds_ring->lock);
1797			list_splice_tail_init(&sds_ring->free_list[ring],
1798					      &rds_ring->free_list);
1799			spin_unlock(&rds_ring->lock);
1800		}
1801		qlcnic_post_rx_buffers_nodb(adapter, rds_ring, ring);
1802	}
1803	if (count) {
1804		sds_ring->consumer = consumer;
1805		writel(consumer, sds_ring->crb_sts_consumer);
1806	}
1807	return count;
1808}
1809
1810static int qlcnic_83xx_msix_sriov_vf_poll(struct napi_struct *napi, int budget)
1811{
1812	int tx_complete;
1813	int work_done;
1814	struct qlcnic_host_sds_ring *sds_ring;
1815	struct qlcnic_adapter *adapter;
1816	struct qlcnic_host_tx_ring *tx_ring;
1817
1818	sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
1819	adapter = sds_ring->adapter;
1820	/* tx ring count = 1 */
1821	tx_ring = adapter->tx_ring;
1822
1823	tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
1824	work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
1825	if ((work_done < budget) && tx_complete) {
1826		napi_complete(&sds_ring->napi);
1827		qlcnic_83xx_enable_intr(adapter, sds_ring);
1828	}
1829
1830	return work_done;
1831}
1832
1833static int qlcnic_83xx_poll(struct napi_struct *napi, int budget)
1834{
1835	int tx_complete;
1836	int work_done;
1837	struct qlcnic_host_sds_ring *sds_ring;
1838	struct qlcnic_adapter *adapter;
1839	struct qlcnic_host_tx_ring *tx_ring;
1840
1841	sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
1842	adapter = sds_ring->adapter;
1843	/* tx ring count = 1 */
1844	tx_ring = adapter->tx_ring;
1845
1846	tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
1847	work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
1848	if ((work_done < budget) && tx_complete) {
1849		napi_complete(&sds_ring->napi);
1850		qlcnic_83xx_enable_intr(adapter, sds_ring);
1851	}
1852
1853	return work_done;
1854}
1855
1856static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget)
1857{
1858	int work_done;
1859	struct qlcnic_host_tx_ring *tx_ring;
1860	struct qlcnic_adapter *adapter;
1861
1862	budget = QLCNIC_TX_POLL_BUDGET;
1863	tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi);
1864	adapter = tx_ring->adapter;
1865	work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
1866	if (work_done) {
1867		napi_complete(&tx_ring->napi);
1868		if (test_bit(__QLCNIC_DEV_UP , &adapter->state))
1869			qlcnic_83xx_enable_tx_intr(adapter, tx_ring);
1870	}
1871
1872	return work_done;
1873}
1874
1875static int qlcnic_83xx_rx_poll(struct napi_struct *napi, int budget)
1876{
1877	int work_done;
1878	struct qlcnic_host_sds_ring *sds_ring;
1879	struct qlcnic_adapter *adapter;
1880
1881	sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
1882	adapter = sds_ring->adapter;
1883	work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
1884	if (work_done < budget) {
1885		napi_complete(&sds_ring->napi);
1886		if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
1887			qlcnic_83xx_enable_intr(adapter, sds_ring);
1888	}
1889
1890	return work_done;
1891}
1892
1893void qlcnic_83xx_napi_enable(struct qlcnic_adapter *adapter)
1894{
1895	int ring;
1896	struct qlcnic_host_sds_ring *sds_ring;
1897	struct qlcnic_host_tx_ring *tx_ring;
1898	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1899
1900	if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1901		return;
1902
1903	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
1904		sds_ring = &recv_ctx->sds_rings[ring];
1905		napi_enable(&sds_ring->napi);
1906		if (adapter->flags & QLCNIC_MSIX_ENABLED)
1907			qlcnic_83xx_enable_intr(adapter, sds_ring);
1908	}
1909
1910	if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
1911	    !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
1912		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
1913			tx_ring = &adapter->tx_ring[ring];
1914			napi_enable(&tx_ring->napi);
1915			qlcnic_83xx_enable_tx_intr(adapter, tx_ring);
1916		}
1917	}
1918}
1919
1920void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter)
1921{
1922	int ring;
1923	struct qlcnic_host_sds_ring *sds_ring;
1924	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1925	struct qlcnic_host_tx_ring *tx_ring;
1926
1927	if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1928		return;
1929
1930	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
1931		sds_ring = &recv_ctx->sds_rings[ring];
1932		if (adapter->flags & QLCNIC_MSIX_ENABLED)
1933			qlcnic_83xx_disable_intr(adapter, sds_ring);
1934		napi_synchronize(&sds_ring->napi);
1935		napi_disable(&sds_ring->napi);
1936	}
1937
1938	if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
1939	    !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
1940		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
1941			tx_ring = &adapter->tx_ring[ring];
1942			qlcnic_83xx_disable_tx_intr(adapter, tx_ring);
1943			napi_synchronize(&tx_ring->napi);
1944			napi_disable(&tx_ring->napi);
1945		}
1946	}
1947}
1948
1949int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
1950			 struct net_device *netdev)
1951{
1952	int ring;
1953	struct qlcnic_host_sds_ring *sds_ring;
1954	struct qlcnic_host_tx_ring *tx_ring;
1955	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1956
1957	if (qlcnic_alloc_sds_rings(recv_ctx, adapter->drv_sds_rings))
1958		return -ENOMEM;
1959
1960	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
1961		sds_ring = &recv_ctx->sds_rings[ring];
1962		if (adapter->flags & QLCNIC_MSIX_ENABLED) {
1963			if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
1964				netif_napi_add(netdev, &sds_ring->napi,
1965					       qlcnic_83xx_rx_poll,
1966					       NAPI_POLL_WEIGHT);
1967			else
1968				netif_napi_add(netdev, &sds_ring->napi,
1969					       qlcnic_83xx_msix_sriov_vf_poll,
1970					       NAPI_POLL_WEIGHT);
1971
1972		} else {
1973			netif_napi_add(netdev, &sds_ring->napi,
1974				       qlcnic_83xx_poll,
1975				       NAPI_POLL_WEIGHT);
1976		}
1977	}
1978
1979	if (qlcnic_alloc_tx_rings(adapter, netdev)) {
1980		qlcnic_free_sds_rings(recv_ctx);
1981		return -ENOMEM;
1982	}
1983
1984	if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
1985	    !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
1986		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
1987			tx_ring = &adapter->tx_ring[ring];
1988			netif_napi_add(netdev, &tx_ring->napi,
1989				       qlcnic_83xx_msix_tx_poll,
1990				       NAPI_POLL_WEIGHT);
1991		}
1992	}
1993
1994	return 0;
1995}
1996
1997void qlcnic_83xx_napi_del(struct qlcnic_adapter *adapter)
1998{
1999	int ring;
2000	struct qlcnic_host_sds_ring *sds_ring;
2001	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
2002	struct qlcnic_host_tx_ring *tx_ring;
2003
2004	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
2005		sds_ring = &recv_ctx->sds_rings[ring];
2006		netif_napi_del(&sds_ring->napi);
2007	}
2008
2009	qlcnic_free_sds_rings(adapter->recv_ctx);
2010
2011	if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
2012	    !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
2013		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
2014			tx_ring = &adapter->tx_ring[ring];
2015			netif_napi_del(&tx_ring->napi);
2016		}
2017	}
2018
2019	qlcnic_free_tx_rings(adapter);
2020}
2021
2022void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter *adapter,
2023				  int ring, u64 sts_data[])
2024{
2025	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
2026	struct sk_buff *skb;
2027	struct qlcnic_host_rds_ring *rds_ring;
2028	int index, length;
2029
2030	if (unlikely(ring >= adapter->max_rds_rings))
2031		return;
2032
2033	rds_ring = &recv_ctx->rds_rings[ring];
2034	index = qlcnic_83xx_hndl(sts_data[0]);
2035	if (unlikely(index >= rds_ring->num_desc))
2036		return;
2037
2038	length = qlcnic_83xx_pktln(sts_data[0]);
2039
2040	skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
2041	if (!skb)
2042		return;
2043
2044	if (length > rds_ring->skb_size)
2045		skb_put(skb, rds_ring->skb_size);
2046	else
2047		skb_put(skb, length);
2048
2049	if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr))
2050		adapter->ahw->diag_cnt++;
2051	else
2052		dump_skb(skb, adapter);
2053
2054	dev_kfree_skb_any(skb);
2055	return;
2056}
2057
2058void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
2059{
2060	struct qlcnic_adapter *adapter = sds_ring->adapter;
2061	struct status_desc *desc;
2062	u64 sts_data[2];
2063	int ring, opcode;
2064	u32 consumer = sds_ring->consumer;
2065
2066	desc = &sds_ring->desc_head[consumer];
2067	sts_data[0] = le64_to_cpu(desc->status_desc_data[0]);
2068	sts_data[1] = le64_to_cpu(desc->status_desc_data[1]);
2069	opcode = qlcnic_83xx_opcode(sts_data[1]);
2070	if (!opcode)
2071		return;
2072
2073	ring = QLCNIC_FETCH_RING_ID(qlcnic_83xx_hndl(sts_data[0]));
2074	qlcnic_83xx_process_rcv_diag(adapter, ring, sts_data);
2075	desc = &sds_ring->desc_head[consumer];
2076	desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
2077	consumer = get_next_index(consumer, sds_ring->num_desc);
2078	sds_ring->consumer = consumer;
2079	writel(consumer, sds_ring->crb_sts_consumer);
2080}
2081