1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses.  You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 *     Redistribution and use in source and binary forms, with or
13 *     without modification, are permitted provided that the following
14 *     conditions are met:
15 *
16 *      - Redistributions of source code must retain the above
17 *        copyright notice, this list of conditions and the following
18 *        disclaimer.
19 *
20 *      - Redistributions in binary form must reproduce the above
21 *        copyright notice, this list of conditions and the following
22 *        disclaimer in the documentation and/or other materials
23 *        provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/skbuff.h>
36#include <linux/netdevice.h>
37#include <linux/etherdevice.h>
38#include <linux/if_vlan.h>
39#include <linux/ip.h>
40#include <linux/dma-mapping.h>
41#include <linux/jiffies.h>
42#include <linux/prefetch.h>
43#include <linux/export.h>
44#include <net/ipv6.h>
45#include <net/tcp.h>
46#include "cxgb4.h"
47#include "t4_regs.h"
48#include "t4_msg.h"
49#include "t4fw_api.h"
50
51/*
52 * Rx buffer size.  We use largish buffers if possible but settle for single
53 * pages under memory shortage.
54 */
55#if PAGE_SHIFT >= 16
56# define FL_PG_ORDER 0
57#else
58# define FL_PG_ORDER (16 - PAGE_SHIFT)
59#endif
60
61/* RX_PULL_LEN should be <= RX_COPY_THRES */
62#define RX_COPY_THRES    256
63#define RX_PULL_LEN      128
64
65/*
66 * Main body length for sk_buffs used for Rx Ethernet packets with fragments.
67 * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room.
68 */
69#define RX_PKT_SKB_LEN   512
70
71/* Ethernet header padding prepended to RX_PKTs */
72#define RX_PKT_PAD 2
73
74/*
75 * Max number of Tx descriptors we clean up at a time.  Should be modest as
76 * freeing skbs isn't cheap and it happens while holding locks.  We just need
77 * to free packets faster than they arrive, we eventually catch up and keep
78 * the amortized cost reasonable.  Must be >= 2 * TXQ_STOP_THRES.
79 */
80#define MAX_TX_RECLAIM 16
81
82/*
83 * Max number of Rx buffers we replenish at a time.  Again keep this modest,
84 * allocating buffers isn't cheap either.
85 */
86#define MAX_RX_REFILL 16U
87
88/*
89 * Period of the Rx queue check timer.  This timer is infrequent as it has
90 * something to do only when the system experiences severe memory shortage.
91 */
92#define RX_QCHECK_PERIOD (HZ / 2)
93
94/*
95 * Period of the Tx queue check timer.
96 */
97#define TX_QCHECK_PERIOD (HZ / 2)
98
99/*
100 * Max number of Tx descriptors to be reclaimed by the Tx timer.
101 */
102#define MAX_TIMER_TX_RECLAIM 100
103
104/*
105 * Timer index used when backing off due to memory shortage.
106 */
107#define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
108
109/*
110 * An FL with <= FL_STARVE_THRES buffers is starving and a periodic timer will
111 * attempt to refill it.
112 */
113#define FL_STARVE_THRES 4
114
115/*
116 * Suspend an Ethernet Tx queue with fewer available descriptors than this.
117 * This is the same as calc_tx_descs() for a TSO packet with
118 * nr_frags == MAX_SKB_FRAGS.
119 */
120#define ETHTXQ_STOP_THRES \
121	(1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
122
123/*
124 * Suspension threshold for non-Ethernet Tx queues.  We require enough room
125 * for a full sized WR.
126 */
127#define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc))
128
129/*
130 * Max Tx descriptor space we allow for an Ethernet packet to be inlined
131 * into a WR.
132 */
133#define MAX_IMM_TX_PKT_LEN 128
134
135/*
136 * Max size of a WR sent through a control Tx queue.
137 */
138#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
139
140enum {
141	/* packet alignment in FL buffers */
142	FL_ALIGN = L1_CACHE_BYTES < 32 ? 32 : L1_CACHE_BYTES,
143	/* egress status entry size */
144	STAT_LEN = L1_CACHE_BYTES > 64 ? 128 : 64
145};
146
147struct tx_sw_desc {                /* SW state per Tx descriptor */
148	struct sk_buff *skb;
149	struct ulptx_sgl *sgl;
150};
151
152struct rx_sw_desc {                /* SW state per Rx descriptor */
153	struct page *page;
154	dma_addr_t dma_addr;
155};
156
157/*
158 * The low bits of rx_sw_desc.dma_addr have special meaning.
159 */
160enum {
161	RX_LARGE_BUF    = 1 << 0, /* buffer is larger than PAGE_SIZE */
162	RX_UNMAPPED_BUF = 1 << 1, /* buffer is not mapped */
163};
164
165static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d)
166{
167	return d->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF);
168}
169
170static inline bool is_buf_mapped(const struct rx_sw_desc *d)
171{
172	return !(d->dma_addr & RX_UNMAPPED_BUF);
173}
174
175/**
176 *	txq_avail - return the number of available slots in a Tx queue
177 *	@q: the Tx queue
178 *
179 *	Returns the number of descriptors in a Tx queue available to write new
180 *	packets.
181 */
182static inline unsigned int txq_avail(const struct sge_txq *q)
183{
184	return q->size - 1 - q->in_use;
185}
186
187/**
188 *	fl_cap - return the capacity of a free-buffer list
189 *	@fl: the FL
190 *
191 *	Returns the capacity of a free-buffer list.  The capacity is less than
192 *	the size because one descriptor needs to be left unpopulated, otherwise
193 *	HW will think the FL is empty.
194 */
195static inline unsigned int fl_cap(const struct sge_fl *fl)
196{
197	return fl->size - 8;   /* 1 descriptor = 8 buffers */
198}
199
200static inline bool fl_starving(const struct sge_fl *fl)
201{
202	return fl->avail - fl->pend_cred <= FL_STARVE_THRES;
203}
204
205static int map_skb(struct device *dev, const struct sk_buff *skb,
206		   dma_addr_t *addr)
207{
208	const skb_frag_t *fp, *end;
209	const struct skb_shared_info *si;
210
211	*addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
212	if (dma_mapping_error(dev, *addr))
213		goto out_err;
214
215	si = skb_shinfo(skb);
216	end = &si->frags[si->nr_frags];
217
218	for (fp = si->frags; fp < end; fp++) {
219		*++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
220					   DMA_TO_DEVICE);
221		if (dma_mapping_error(dev, *addr))
222			goto unwind;
223	}
224	return 0;
225
226unwind:
227	while (fp-- > si->frags)
228		dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
229
230	dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
231out_err:
232	return -ENOMEM;
233}
234
235#ifdef CONFIG_NEED_DMA_MAP_STATE
236static void unmap_skb(struct device *dev, const struct sk_buff *skb,
237		      const dma_addr_t *addr)
238{
239	const skb_frag_t *fp, *end;
240	const struct skb_shared_info *si;
241
242	dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE);
243
244	si = skb_shinfo(skb);
245	end = &si->frags[si->nr_frags];
246	for (fp = si->frags; fp < end; fp++)
247		dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE);
248}
249
250/**
251 *	deferred_unmap_destructor - unmap a packet when it is freed
252 *	@skb: the packet
253 *
254 *	This is the packet destructor used for Tx packets that need to remain
255 *	mapped until they are freed rather than until their Tx descriptors are
256 *	freed.
257 */
258static void deferred_unmap_destructor(struct sk_buff *skb)
259{
260	unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head);
261}
262#endif
263
264static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
265		      const struct ulptx_sgl *sgl, const struct sge_txq *q)
266{
267	const struct ulptx_sge_pair *p;
268	unsigned int nfrags = skb_shinfo(skb)->nr_frags;
269
270	if (likely(skb_headlen(skb)))
271		dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
272				 DMA_TO_DEVICE);
273	else {
274		dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
275			       DMA_TO_DEVICE);
276		nfrags--;
277	}
278
279	/*
280	 * the complexity below is because of the possibility of a wrap-around
281	 * in the middle of an SGL
282	 */
283	for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
284		if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) {
285unmap:			dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
286				       ntohl(p->len[0]), DMA_TO_DEVICE);
287			dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
288				       ntohl(p->len[1]), DMA_TO_DEVICE);
289			p++;
290		} else if ((u8 *)p == (u8 *)q->stat) {
291			p = (const struct ulptx_sge_pair *)q->desc;
292			goto unmap;
293		} else if ((u8 *)p + 8 == (u8 *)q->stat) {
294			const __be64 *addr = (const __be64 *)q->desc;
295
296			dma_unmap_page(dev, be64_to_cpu(addr[0]),
297				       ntohl(p->len[0]), DMA_TO_DEVICE);
298			dma_unmap_page(dev, be64_to_cpu(addr[1]),
299				       ntohl(p->len[1]), DMA_TO_DEVICE);
300			p = (const struct ulptx_sge_pair *)&addr[2];
301		} else {
302			const __be64 *addr = (const __be64 *)q->desc;
303
304			dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
305				       ntohl(p->len[0]), DMA_TO_DEVICE);
306			dma_unmap_page(dev, be64_to_cpu(addr[0]),
307				       ntohl(p->len[1]), DMA_TO_DEVICE);
308			p = (const struct ulptx_sge_pair *)&addr[1];
309		}
310	}
311	if (nfrags) {
312		__be64 addr;
313
314		if ((u8 *)p == (u8 *)q->stat)
315			p = (const struct ulptx_sge_pair *)q->desc;
316		addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] :
317						       *(const __be64 *)q->desc;
318		dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]),
319			       DMA_TO_DEVICE);
320	}
321}
322
323/**
324 *	free_tx_desc - reclaims Tx descriptors and their buffers
325 *	@adapter: the adapter
326 *	@q: the Tx queue to reclaim descriptors from
327 *	@n: the number of descriptors to reclaim
328 *	@unmap: whether the buffers should be unmapped for DMA
329 *
330 *	Reclaims Tx descriptors from an SGE Tx queue and frees the associated
331 *	Tx buffers.  Called with the Tx queue lock held.
332 */
333static void free_tx_desc(struct adapter *adap, struct sge_txq *q,
334			 unsigned int n, bool unmap)
335{
336	struct tx_sw_desc *d;
337	unsigned int cidx = q->cidx;
338	struct device *dev = adap->pdev_dev;
339
340	d = &q->sdesc[cidx];
341	while (n--) {
342		if (d->skb) {                       /* an SGL is present */
343			if (unmap)
344				unmap_sgl(dev, d->skb, d->sgl, q);
345			kfree_skb(d->skb);
346			d->skb = NULL;
347		}
348		++d;
349		if (++cidx == q->size) {
350			cidx = 0;
351			d = q->sdesc;
352		}
353	}
354	q->cidx = cidx;
355}
356
357/*
358 * Return the number of reclaimable descriptors in a Tx queue.
359 */
360static inline int reclaimable(const struct sge_txq *q)
361{
362	int hw_cidx = ntohs(q->stat->cidx);
363	hw_cidx -= q->cidx;
364	return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx;
365}
366
367/**
368 *	reclaim_completed_tx - reclaims completed Tx descriptors
369 *	@adap: the adapter
370 *	@q: the Tx queue to reclaim completed descriptors from
371 *	@unmap: whether the buffers should be unmapped for DMA
372 *
373 *	Reclaims Tx descriptors that the SGE has indicated it has processed,
374 *	and frees the associated buffers if possible.  Called with the Tx
375 *	queue locked.
376 */
377static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
378					bool unmap)
379{
380	int avail = reclaimable(q);
381
382	if (avail) {
383		/*
384		 * Limit the amount of clean up work we do at a time to keep
385		 * the Tx lock hold time O(1).
386		 */
387		if (avail > MAX_TX_RECLAIM)
388			avail = MAX_TX_RECLAIM;
389
390		free_tx_desc(adap, q, avail, unmap);
391		q->in_use -= avail;
392	}
393}
394
395static inline int get_buf_size(const struct rx_sw_desc *d)
396{
397#if FL_PG_ORDER > 0
398	return (d->dma_addr & RX_LARGE_BUF) ? (PAGE_SIZE << FL_PG_ORDER) :
399					      PAGE_SIZE;
400#else
401	return PAGE_SIZE;
402#endif
403}
404
405/**
406 *	free_rx_bufs - free the Rx buffers on an SGE free list
407 *	@adap: the adapter
408 *	@q: the SGE free list to free buffers from
409 *	@n: how many buffers to free
410 *
411 *	Release the next @n buffers on an SGE free-buffer Rx queue.   The
412 *	buffers must be made inaccessible to HW before calling this function.
413 */
414static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
415{
416	while (n--) {
417		struct rx_sw_desc *d = &q->sdesc[q->cidx];
418
419		if (is_buf_mapped(d))
420			dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
421				       get_buf_size(d), PCI_DMA_FROMDEVICE);
422		put_page(d->page);
423		d->page = NULL;
424		if (++q->cidx == q->size)
425			q->cidx = 0;
426		q->avail--;
427	}
428}
429
430/**
431 *	unmap_rx_buf - unmap the current Rx buffer on an SGE free list
432 *	@adap: the adapter
433 *	@q: the SGE free list
434 *
435 *	Unmap the current buffer on an SGE free-buffer Rx queue.   The
436 *	buffer must be made inaccessible to HW before calling this function.
437 *
438 *	This is similar to @free_rx_bufs above but does not free the buffer.
439 *	Do note that the FL still loses any further access to the buffer.
440 */
441static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
442{
443	struct rx_sw_desc *d = &q->sdesc[q->cidx];
444
445	if (is_buf_mapped(d))
446		dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
447			       get_buf_size(d), PCI_DMA_FROMDEVICE);
448	d->page = NULL;
449	if (++q->cidx == q->size)
450		q->cidx = 0;
451	q->avail--;
452}
453
454static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
455{
456	if (q->pend_cred >= 8) {
457		wmb();
458		t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO |
459			     QID(q->cntxt_id) | PIDX(q->pend_cred / 8));
460		q->pend_cred &= 7;
461	}
462}
463
464static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg,
465				  dma_addr_t mapping)
466{
467	sd->page = pg;
468	sd->dma_addr = mapping;      /* includes size low bits */
469}
470
471/**
472 *	refill_fl - refill an SGE Rx buffer ring
473 *	@adap: the adapter
474 *	@q: the ring to refill
475 *	@n: the number of new buffers to allocate
476 *	@gfp: the gfp flags for the allocations
477 *
478 *	(Re)populate an SGE free-buffer queue with up to @n new packet buffers,
479 *	allocated with the supplied gfp flags.  The caller must assure that
480 *	@n does not exceed the queue's capacity.  If afterwards the queue is
481 *	found critically low mark it as starving in the bitmap of starving FLs.
482 *
483 *	Returns the number of buffers allocated.
484 */
485static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
486			      gfp_t gfp)
487{
488	struct page *pg;
489	dma_addr_t mapping;
490	unsigned int cred = q->avail;
491	__be64 *d = &q->desc[q->pidx];
492	struct rx_sw_desc *sd = &q->sdesc[q->pidx];
493
494	gfp |= __GFP_NOWARN | __GFP_COLD;
495
496#if FL_PG_ORDER > 0
497	/*
498	 * Prefer large buffers
499	 */
500	while (n) {
501		pg = alloc_pages(gfp | __GFP_COMP, FL_PG_ORDER);
502		if (unlikely(!pg)) {
503			q->large_alloc_failed++;
504			break;       /* fall back to single pages */
505		}
506
507		mapping = dma_map_page(adap->pdev_dev, pg, 0,
508				       PAGE_SIZE << FL_PG_ORDER,
509				       PCI_DMA_FROMDEVICE);
510		if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
511			__free_pages(pg, FL_PG_ORDER);
512			goto out;   /* do not try small pages for this error */
513		}
514		mapping |= RX_LARGE_BUF;
515		*d++ = cpu_to_be64(mapping);
516
517		set_rx_sw_desc(sd, pg, mapping);
518		sd++;
519
520		q->avail++;
521		if (++q->pidx == q->size) {
522			q->pidx = 0;
523			sd = q->sdesc;
524			d = q->desc;
525		}
526		n--;
527	}
528#endif
529
530	while (n--) {
531		pg = alloc_page(gfp);
532		if (unlikely(!pg)) {
533			q->alloc_failed++;
534			break;
535		}
536
537		mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
538				       PCI_DMA_FROMDEVICE);
539		if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
540			put_page(pg);
541			goto out;
542		}
543		*d++ = cpu_to_be64(mapping);
544
545		set_rx_sw_desc(sd, pg, mapping);
546		sd++;
547
548		q->avail++;
549		if (++q->pidx == q->size) {
550			q->pidx = 0;
551			sd = q->sdesc;
552			d = q->desc;
553		}
554	}
555
556out:	cred = q->avail - cred;
557	q->pend_cred += cred;
558	ring_fl_db(adap, q);
559
560	if (unlikely(fl_starving(q))) {
561		smp_wmb();
562		set_bit(q->cntxt_id - adap->sge.egr_start,
563			adap->sge.starving_fl);
564	}
565
566	return cred;
567}
568
569static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
570{
571	refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail),
572		  GFP_ATOMIC);
573}
574
575/**
576 *	alloc_ring - allocate resources for an SGE descriptor ring
577 *	@dev: the PCI device's core device
578 *	@nelem: the number of descriptors
579 *	@elem_size: the size of each descriptor
580 *	@sw_size: the size of the SW state associated with each ring element
581 *	@phys: the physical address of the allocated ring
582 *	@metadata: address of the array holding the SW state for the ring
583 *	@stat_size: extra space in HW ring for status information
584 *	@node: preferred node for memory allocations
585 *
586 *	Allocates resources for an SGE descriptor ring, such as Tx queues,
587 *	free buffer lists, or response queues.  Each SGE ring requires
588 *	space for its HW descriptors plus, optionally, space for the SW state
589 *	associated with each HW entry (the metadata).  The function returns
590 *	three values: the virtual address for the HW ring (the return value
591 *	of the function), the bus address of the HW ring, and the address
592 *	of the SW ring.
593 */
594static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
595			size_t sw_size, dma_addr_t *phys, void *metadata,
596			size_t stat_size, int node)
597{
598	size_t len = nelem * elem_size + stat_size;
599	void *s = NULL;
600	void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL);
601
602	if (!p)
603		return NULL;
604	if (sw_size) {
605		s = kzalloc_node(nelem * sw_size, GFP_KERNEL, node);
606
607		if (!s) {
608			dma_free_coherent(dev, len, p, *phys);
609			return NULL;
610		}
611	}
612	if (metadata)
613		*(void **)metadata = s;
614	memset(p, 0, len);
615	return p;
616}
617
618/**
619 *	sgl_len - calculates the size of an SGL of the given capacity
620 *	@n: the number of SGL entries
621 *
622 *	Calculates the number of flits needed for a scatter/gather list that
623 *	can hold the given number of entries.
624 */
625static inline unsigned int sgl_len(unsigned int n)
626{
627	n--;
628	return (3 * n) / 2 + (n & 1) + 2;
629}
630
631/**
632 *	flits_to_desc - returns the num of Tx descriptors for the given flits
633 *	@n: the number of flits
634 *
635 *	Returns the number of Tx descriptors needed for the supplied number
636 *	of flits.
637 */
638static inline unsigned int flits_to_desc(unsigned int n)
639{
640	BUG_ON(n > SGE_MAX_WR_LEN / 8);
641	return DIV_ROUND_UP(n, 8);
642}
643
644/**
645 *	is_eth_imm - can an Ethernet packet be sent as immediate data?
646 *	@skb: the packet
647 *
648 *	Returns whether an Ethernet packet is small enough to fit as
649 *	immediate data.
650 */
651static inline int is_eth_imm(const struct sk_buff *skb)
652{
653	return skb->len <= MAX_IMM_TX_PKT_LEN - sizeof(struct cpl_tx_pkt);
654}
655
656/**
657 *	calc_tx_flits - calculate the number of flits for a packet Tx WR
658 *	@skb: the packet
659 *
660 *	Returns the number of flits needed for a Tx WR for the given Ethernet
661 *	packet, including the needed WR and CPL headers.
662 */
663static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
664{
665	unsigned int flits;
666
667	if (is_eth_imm(skb))
668		return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), 8);
669
670	flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4;
671	if (skb_shinfo(skb)->gso_size)
672		flits += 2;
673	return flits;
674}
675
676/**
677 *	calc_tx_descs - calculate the number of Tx descriptors for a packet
678 *	@skb: the packet
679 *
680 *	Returns the number of Tx descriptors needed for the given Ethernet
681 *	packet, including the needed WR and CPL headers.
682 */
683static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
684{
685	return flits_to_desc(calc_tx_flits(skb));
686}
687
688/**
689 *	write_sgl - populate a scatter/gather list for a packet
690 *	@skb: the packet
691 *	@q: the Tx queue we are writing into
692 *	@sgl: starting location for writing the SGL
693 *	@end: points right after the end of the SGL
694 *	@start: start offset into skb main-body data to include in the SGL
695 *	@addr: the list of bus addresses for the SGL elements
696 *
697 *	Generates a gather list for the buffers that make up a packet.
698 *	The caller must provide adequate space for the SGL that will be written.
699 *	The SGL includes all of the packet's page fragments and the data in its
700 *	main body except for the first @start bytes.  @sgl must be 16-byte
701 *	aligned and within a Tx descriptor with available space.  @end points
702 *	right after the end of the SGL but does not account for any potential
703 *	wrap around, i.e., @end > @sgl.
704 */
705static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
706		      struct ulptx_sgl *sgl, u64 *end, unsigned int start,
707		      const dma_addr_t *addr)
708{
709	unsigned int i, len;
710	struct ulptx_sge_pair *to;
711	const struct skb_shared_info *si = skb_shinfo(skb);
712	unsigned int nfrags = si->nr_frags;
713	struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
714
715	len = skb_headlen(skb) - start;
716	if (likely(len)) {
717		sgl->len0 = htonl(len);
718		sgl->addr0 = cpu_to_be64(addr[0] + start);
719		nfrags++;
720	} else {
721		sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
722		sgl->addr0 = cpu_to_be64(addr[1]);
723	}
724
725	sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags));
726	if (likely(--nfrags == 0))
727		return;
728	/*
729	 * Most of the complexity below deals with the possibility we hit the
730	 * end of the queue in the middle of writing the SGL.  For this case
731	 * only we create the SGL in a temporary buffer and then copy it.
732	 */
733	to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
734
735	for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
736		to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
737		to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
738		to->addr[0] = cpu_to_be64(addr[i]);
739		to->addr[1] = cpu_to_be64(addr[++i]);
740	}
741	if (nfrags) {
742		to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
743		to->len[1] = cpu_to_be32(0);
744		to->addr[0] = cpu_to_be64(addr[i + 1]);
745	}
746	if (unlikely((u8 *)end > (u8 *)q->stat)) {
747		unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
748
749		if (likely(part0))
750			memcpy(sgl->sge, buf, part0);
751		part1 = (u8 *)end - (u8 *)q->stat;
752		memcpy(q->desc, (u8 *)buf + part0, part1);
753		end = (void *)q->desc + part1;
754	}
755	if ((uintptr_t)end & 8)           /* 0-pad to multiple of 16 */
756		*(u64 *)end = 0;
757}
758
759/**
760 *	ring_tx_db - check and potentially ring a Tx queue's doorbell
761 *	@adap: the adapter
762 *	@q: the Tx queue
763 *	@n: number of new descriptors to give to HW
764 *
765 *	Ring the doorbel for a Tx queue.
766 */
767static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
768{
769	wmb();            /* write descriptors before telling HW */
770	t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
771		     QID(q->cntxt_id) | PIDX(n));
772}
773
774/**
775 *	inline_tx_skb - inline a packet's data into Tx descriptors
776 *	@skb: the packet
777 *	@q: the Tx queue where the packet will be inlined
778 *	@pos: starting position in the Tx queue where to inline the packet
779 *
780 *	Inline a packet's contents directly into Tx descriptors, starting at
781 *	the given position within the Tx DMA ring.
782 *	Most of the complexity of this operation is dealing with wrap arounds
783 *	in the middle of the packet we want to inline.
784 */
785static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
786			  void *pos)
787{
788	u64 *p;
789	int left = (void *)q->stat - pos;
790
791	if (likely(skb->len <= left)) {
792		if (likely(!skb->data_len))
793			skb_copy_from_linear_data(skb, pos, skb->len);
794		else
795			skb_copy_bits(skb, 0, pos, skb->len);
796		pos += skb->len;
797	} else {
798		skb_copy_bits(skb, 0, pos, left);
799		skb_copy_bits(skb, left, q->desc, skb->len - left);
800		pos = (void *)q->desc + (skb->len - left);
801	}
802
803	/* 0-pad to multiple of 16 */
804	p = PTR_ALIGN(pos, 8);
805	if ((uintptr_t)p & 8)
806		*p = 0;
807}
808
809/*
810 * Figure out what HW csum a packet wants and return the appropriate control
811 * bits.
812 */
813static u64 hwcsum(const struct sk_buff *skb)
814{
815	int csum_type;
816	const struct iphdr *iph = ip_hdr(skb);
817
818	if (iph->version == 4) {
819		if (iph->protocol == IPPROTO_TCP)
820			csum_type = TX_CSUM_TCPIP;
821		else if (iph->protocol == IPPROTO_UDP)
822			csum_type = TX_CSUM_UDPIP;
823		else {
824nocsum:			/*
825			 * unknown protocol, disable HW csum
826			 * and hope a bad packet is detected
827			 */
828			return TXPKT_L4CSUM_DIS;
829		}
830	} else {
831		/*
832		 * this doesn't work with extension headers
833		 */
834		const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
835
836		if (ip6h->nexthdr == IPPROTO_TCP)
837			csum_type = TX_CSUM_TCPIP6;
838		else if (ip6h->nexthdr == IPPROTO_UDP)
839			csum_type = TX_CSUM_UDPIP6;
840		else
841			goto nocsum;
842	}
843
844	if (likely(csum_type >= TX_CSUM_TCPIP))
845		return TXPKT_CSUM_TYPE(csum_type) |
846			TXPKT_IPHDR_LEN(skb_network_header_len(skb)) |
847			TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN);
848	else {
849		int start = skb_transport_offset(skb);
850
851		return TXPKT_CSUM_TYPE(csum_type) | TXPKT_CSUM_START(start) |
852			TXPKT_CSUM_LOC(start + skb->csum_offset);
853	}
854}
855
856static void eth_txq_stop(struct sge_eth_txq *q)
857{
858	netif_tx_stop_queue(q->txq);
859	q->q.stops++;
860}
861
862static inline void txq_advance(struct sge_txq *q, unsigned int n)
863{
864	q->in_use += n;
865	q->pidx += n;
866	if (q->pidx >= q->size)
867		q->pidx -= q->size;
868}
869
870/**
871 *	t4_eth_xmit - add a packet to an Ethernet Tx queue
872 *	@skb: the packet
873 *	@dev: the egress net device
874 *
875 *	Add a packet to an SGE Ethernet Tx queue.  Runs with softirqs disabled.
876 */
877netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
878{
879	u32 wr_mid;
880	u64 cntrl, *end;
881	int qidx, credits;
882	unsigned int flits, ndesc;
883	struct adapter *adap;
884	struct sge_eth_txq *q;
885	const struct port_info *pi;
886	struct fw_eth_tx_pkt_wr *wr;
887	struct cpl_tx_pkt_core *cpl;
888	const struct skb_shared_info *ssi;
889	dma_addr_t addr[MAX_SKB_FRAGS + 1];
890
891	/*
892	 * The chip min packet length is 10 octets but play safe and reject
893	 * anything shorter than an Ethernet header.
894	 */
895	if (unlikely(skb->len < ETH_HLEN)) {
896out_free:	dev_kfree_skb(skb);
897		return NETDEV_TX_OK;
898	}
899
900	pi = netdev_priv(dev);
901	adap = pi->adapter;
902	qidx = skb_get_queue_mapping(skb);
903	q = &adap->sge.ethtxq[qidx + pi->first_qset];
904
905	reclaim_completed_tx(adap, &q->q, true);
906
907	flits = calc_tx_flits(skb);
908	ndesc = flits_to_desc(flits);
909	credits = txq_avail(&q->q) - ndesc;
910
911	if (unlikely(credits < 0)) {
912		eth_txq_stop(q);
913		dev_err(adap->pdev_dev,
914			"%s: Tx ring %u full while queue awake!\n",
915			dev->name, qidx);
916		return NETDEV_TX_BUSY;
917	}
918
919	if (!is_eth_imm(skb) &&
920	    unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) {
921		q->mapping_err++;
922		goto out_free;
923	}
924
925	wr_mid = FW_WR_LEN16(DIV_ROUND_UP(flits, 2));
926	if (unlikely(credits < ETHTXQ_STOP_THRES)) {
927		eth_txq_stop(q);
928		wr_mid |= FW_WR_EQUEQ | FW_WR_EQUIQ;
929	}
930
931	wr = (void *)&q->q.desc[q->q.pidx];
932	wr->equiq_to_len16 = htonl(wr_mid);
933	wr->r3 = cpu_to_be64(0);
934	end = (u64 *)wr + flits;
935
936	ssi = skb_shinfo(skb);
937	if (ssi->gso_size) {
938		struct cpl_tx_pkt_lso *lso = (void *)wr;
939		bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
940		int l3hdr_len = skb_network_header_len(skb);
941		int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
942
943		wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
944				       FW_WR_IMMDLEN(sizeof(*lso)));
945		lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) |
946					LSO_FIRST_SLICE | LSO_LAST_SLICE |
947					LSO_IPV6(v6) |
948					LSO_ETHHDR_LEN(eth_xtra_len / 4) |
949					LSO_IPHDR_LEN(l3hdr_len / 4) |
950					LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
951		lso->c.ipid_ofst = htons(0);
952		lso->c.mss = htons(ssi->gso_size);
953		lso->c.seqno_offset = htonl(0);
954		lso->c.len = htonl(skb->len);
955		cpl = (void *)(lso + 1);
956		cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
957			TXPKT_IPHDR_LEN(l3hdr_len) |
958			TXPKT_ETHHDR_LEN(eth_xtra_len);
959		q->tso++;
960		q->tx_cso += ssi->gso_segs;
961	} else {
962		int len;
963
964		len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl);
965		wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
966				       FW_WR_IMMDLEN(len));
967		cpl = (void *)(wr + 1);
968		if (skb->ip_summed == CHECKSUM_PARTIAL) {
969			cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
970			q->tx_cso++;
971		} else
972			cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
973	}
974
975	if (vlan_tx_tag_present(skb)) {
976		q->vlan_ins++;
977		cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb));
978	}
979
980	cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) |
981			   TXPKT_INTF(pi->tx_chan) | TXPKT_PF(adap->fn));
982	cpl->pack = htons(0);
983	cpl->len = htons(skb->len);
984	cpl->ctrl1 = cpu_to_be64(cntrl);
985
986	if (is_eth_imm(skb)) {
987		inline_tx_skb(skb, &q->q, cpl + 1);
988		dev_kfree_skb(skb);
989	} else {
990		int last_desc;
991
992		write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), end, 0,
993			  addr);
994		skb_orphan(skb);
995
996		last_desc = q->q.pidx + ndesc - 1;
997		if (last_desc >= q->q.size)
998			last_desc -= q->q.size;
999		q->q.sdesc[last_desc].skb = skb;
1000		q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1);
1001	}
1002
1003	txq_advance(&q->q, ndesc);
1004
1005	ring_tx_db(adap, &q->q, ndesc);
1006	return NETDEV_TX_OK;
1007}
1008
1009/**
1010 *	reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1011 *	@q: the SGE control Tx queue
1012 *
1013 *	This is a variant of reclaim_completed_tx() that is used for Tx queues
1014 *	that send only immediate data (presently just the control queues) and
1015 *	thus do not have any sk_buffs to release.
1016 */
1017static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1018{
1019	int hw_cidx = ntohs(q->stat->cidx);
1020	int reclaim = hw_cidx - q->cidx;
1021
1022	if (reclaim < 0)
1023		reclaim += q->size;
1024
1025	q->in_use -= reclaim;
1026	q->cidx = hw_cidx;
1027}
1028
1029/**
1030 *	is_imm - check whether a packet can be sent as immediate data
1031 *	@skb: the packet
1032 *
1033 *	Returns true if a packet can be sent as a WR with immediate data.
1034 */
1035static inline int is_imm(const struct sk_buff *skb)
1036{
1037	return skb->len <= MAX_CTRL_WR_LEN;
1038}
1039
1040/**
1041 *	ctrlq_check_stop - check if a control queue is full and should stop
1042 *	@q: the queue
1043 *	@wr: most recent WR written to the queue
1044 *
1045 *	Check if a control queue has become full and should be stopped.
1046 *	We clean up control queue descriptors very lazily, only when we are out.
1047 *	If the queue is still full after reclaiming any completed descriptors
1048 *	we suspend it and have the last WR wake it up.
1049 */
1050static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr)
1051{
1052	reclaim_completed_tx_imm(&q->q);
1053	if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
1054		wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ);
1055		q->q.stops++;
1056		q->full = 1;
1057	}
1058}
1059
1060/**
1061 *	ctrl_xmit - send a packet through an SGE control Tx queue
1062 *	@q: the control queue
1063 *	@skb: the packet
1064 *
1065 *	Send a packet through an SGE control Tx queue.  Packets sent through
1066 *	a control queue must fit entirely as immediate data.
1067 */
1068static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
1069{
1070	unsigned int ndesc;
1071	struct fw_wr_hdr *wr;
1072
1073	if (unlikely(!is_imm(skb))) {
1074		WARN_ON(1);
1075		dev_kfree_skb(skb);
1076		return NET_XMIT_DROP;
1077	}
1078
1079	ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc));
1080	spin_lock(&q->sendq.lock);
1081
1082	if (unlikely(q->full)) {
1083		skb->priority = ndesc;                  /* save for restart */
1084		__skb_queue_tail(&q->sendq, skb);
1085		spin_unlock(&q->sendq.lock);
1086		return NET_XMIT_CN;
1087	}
1088
1089	wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
1090	inline_tx_skb(skb, &q->q, wr);
1091
1092	txq_advance(&q->q, ndesc);
1093	if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES))
1094		ctrlq_check_stop(q, wr);
1095
1096	ring_tx_db(q->adap, &q->q, ndesc);
1097	spin_unlock(&q->sendq.lock);
1098
1099	kfree_skb(skb);
1100	return NET_XMIT_SUCCESS;
1101}
1102
1103/**
1104 *	restart_ctrlq - restart a suspended control queue
1105 *	@data: the control queue to restart
1106 *
1107 *	Resumes transmission on a suspended Tx control queue.
1108 */
1109static void restart_ctrlq(unsigned long data)
1110{
1111	struct sk_buff *skb;
1112	unsigned int written = 0;
1113	struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data;
1114
1115	spin_lock(&q->sendq.lock);
1116	reclaim_completed_tx_imm(&q->q);
1117	BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES);  /* q should be empty */
1118
1119	while ((skb = __skb_dequeue(&q->sendq)) != NULL) {
1120		struct fw_wr_hdr *wr;
1121		unsigned int ndesc = skb->priority;     /* previously saved */
1122
1123		/*
1124		 * Write descriptors and free skbs outside the lock to limit
1125		 * wait times.  q->full is still set so new skbs will be queued.
1126		 */
1127		spin_unlock(&q->sendq.lock);
1128
1129		wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
1130		inline_tx_skb(skb, &q->q, wr);
1131		kfree_skb(skb);
1132
1133		written += ndesc;
1134		txq_advance(&q->q, ndesc);
1135		if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
1136			unsigned long old = q->q.stops;
1137
1138			ctrlq_check_stop(q, wr);
1139			if (q->q.stops != old) {          /* suspended anew */
1140				spin_lock(&q->sendq.lock);
1141				goto ringdb;
1142			}
1143		}
1144		if (written > 16) {
1145			ring_tx_db(q->adap, &q->q, written);
1146			written = 0;
1147		}
1148		spin_lock(&q->sendq.lock);
1149	}
1150	q->full = 0;
1151ringdb: if (written)
1152		ring_tx_db(q->adap, &q->q, written);
1153	spin_unlock(&q->sendq.lock);
1154}
1155
1156/**
1157 *	t4_mgmt_tx - send a management message
1158 *	@adap: the adapter
1159 *	@skb: the packet containing the management message
1160 *
1161 *	Send a management message through control queue 0.
1162 */
1163int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1164{
1165	int ret;
1166
1167	local_bh_disable();
1168	ret = ctrl_xmit(&adap->sge.ctrlq[0], skb);
1169	local_bh_enable();
1170	return ret;
1171}
1172
1173/**
1174 *	is_ofld_imm - check whether a packet can be sent as immediate data
1175 *	@skb: the packet
1176 *
1177 *	Returns true if a packet can be sent as an offload WR with immediate
1178 *	data.  We currently use the same limit as for Ethernet packets.
1179 */
1180static inline int is_ofld_imm(const struct sk_buff *skb)
1181{
1182	return skb->len <= MAX_IMM_TX_PKT_LEN;
1183}
1184
1185/**
1186 *	calc_tx_flits_ofld - calculate # of flits for an offload packet
1187 *	@skb: the packet
1188 *
1189 *	Returns the number of flits needed for the given offload packet.
1190 *	These packets are already fully constructed and no additional headers
1191 *	will be added.
1192 */
1193static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
1194{
1195	unsigned int flits, cnt;
1196
1197	if (is_ofld_imm(skb))
1198		return DIV_ROUND_UP(skb->len, 8);
1199
1200	flits = skb_transport_offset(skb) / 8U;   /* headers */
1201	cnt = skb_shinfo(skb)->nr_frags;
1202	if (skb->tail != skb->transport_header)
1203		cnt++;
1204	return flits + sgl_len(cnt);
1205}
1206
1207/**
1208 *	txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
1209 *	@adap: the adapter
1210 *	@q: the queue to stop
1211 *
1212 *	Mark a Tx queue stopped due to I/O MMU exhaustion and resulting
1213 *	inability to map packets.  A periodic timer attempts to restart
1214 *	queues so marked.
1215 */
1216static void txq_stop_maperr(struct sge_ofld_txq *q)
1217{
1218	q->mapping_err++;
1219	q->q.stops++;
1220	set_bit(q->q.cntxt_id - q->adap->sge.egr_start,
1221		q->adap->sge.txq_maperr);
1222}
1223
1224/**
1225 *	ofldtxq_stop - stop an offload Tx queue that has become full
1226 *	@q: the queue to stop
1227 *	@skb: the packet causing the queue to become full
1228 *
1229 *	Stops an offload Tx queue that has become full and modifies the packet
1230 *	being written to request a wakeup.
1231 */
1232static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb)
1233{
1234	struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data;
1235
1236	wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ);
1237	q->q.stops++;
1238	q->full = 1;
1239}
1240
1241/**
1242 *	service_ofldq - restart a suspended offload queue
1243 *	@q: the offload queue
1244 *
1245 *	Services an offload Tx queue by moving packets from its packet queue
1246 *	to the HW Tx ring.  The function starts and ends with the queue locked.
1247 */
1248static void service_ofldq(struct sge_ofld_txq *q)
1249{
1250	u64 *pos;
1251	int credits;
1252	struct sk_buff *skb;
1253	unsigned int written = 0;
1254	unsigned int flits, ndesc;
1255
1256	while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) {
1257		/*
1258		 * We drop the lock but leave skb on sendq, thus retaining
1259		 * exclusive access to the state of the queue.
1260		 */
1261		spin_unlock(&q->sendq.lock);
1262
1263		reclaim_completed_tx(q->adap, &q->q, false);
1264
1265		flits = skb->priority;                /* previously saved */
1266		ndesc = flits_to_desc(flits);
1267		credits = txq_avail(&q->q) - ndesc;
1268		BUG_ON(credits < 0);
1269		if (unlikely(credits < TXQ_STOP_THRES))
1270			ofldtxq_stop(q, skb);
1271
1272		pos = (u64 *)&q->q.desc[q->q.pidx];
1273		if (is_ofld_imm(skb))
1274			inline_tx_skb(skb, &q->q, pos);
1275		else if (map_skb(q->adap->pdev_dev, skb,
1276				 (dma_addr_t *)skb->head)) {
1277			txq_stop_maperr(q);
1278			spin_lock(&q->sendq.lock);
1279			break;
1280		} else {
1281			int last_desc, hdr_len = skb_transport_offset(skb);
1282
1283			memcpy(pos, skb->data, hdr_len);
1284			write_sgl(skb, &q->q, (void *)pos + hdr_len,
1285				  pos + flits, hdr_len,
1286				  (dma_addr_t *)skb->head);
1287#ifdef CONFIG_NEED_DMA_MAP_STATE
1288			skb->dev = q->adap->port[0];
1289			skb->destructor = deferred_unmap_destructor;
1290#endif
1291			last_desc = q->q.pidx + ndesc - 1;
1292			if (last_desc >= q->q.size)
1293				last_desc -= q->q.size;
1294			q->q.sdesc[last_desc].skb = skb;
1295		}
1296
1297		txq_advance(&q->q, ndesc);
1298		written += ndesc;
1299		if (unlikely(written > 32)) {
1300			ring_tx_db(q->adap, &q->q, written);
1301			written = 0;
1302		}
1303
1304		spin_lock(&q->sendq.lock);
1305		__skb_unlink(skb, &q->sendq);
1306		if (is_ofld_imm(skb))
1307			kfree_skb(skb);
1308	}
1309	if (likely(written))
1310		ring_tx_db(q->adap, &q->q, written);
1311}
1312
1313/**
1314 *	ofld_xmit - send a packet through an offload queue
1315 *	@q: the Tx offload queue
1316 *	@skb: the packet
1317 *
1318 *	Send an offload packet through an SGE offload queue.
1319 */
1320static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb)
1321{
1322	skb->priority = calc_tx_flits_ofld(skb);       /* save for restart */
1323	spin_lock(&q->sendq.lock);
1324	__skb_queue_tail(&q->sendq, skb);
1325	if (q->sendq.qlen == 1)
1326		service_ofldq(q);
1327	spin_unlock(&q->sendq.lock);
1328	return NET_XMIT_SUCCESS;
1329}
1330
1331/**
1332 *	restart_ofldq - restart a suspended offload queue
1333 *	@data: the offload queue to restart
1334 *
1335 *	Resumes transmission on a suspended Tx offload queue.
1336 */
1337static void restart_ofldq(unsigned long data)
1338{
1339	struct sge_ofld_txq *q = (struct sge_ofld_txq *)data;
1340
1341	spin_lock(&q->sendq.lock);
1342	q->full = 0;            /* the queue actually is completely empty now */
1343	service_ofldq(q);
1344	spin_unlock(&q->sendq.lock);
1345}
1346
1347/**
1348 *	skb_txq - return the Tx queue an offload packet should use
1349 *	@skb: the packet
1350 *
1351 *	Returns the Tx queue an offload packet should use as indicated by bits
1352 *	1-15 in the packet's queue_mapping.
1353 */
1354static inline unsigned int skb_txq(const struct sk_buff *skb)
1355{
1356	return skb->queue_mapping >> 1;
1357}
1358
1359/**
1360 *	is_ctrl_pkt - return whether an offload packet is a control packet
1361 *	@skb: the packet
1362 *
1363 *	Returns whether an offload packet should use an OFLD or a CTRL
1364 *	Tx queue as indicated by bit 0 in the packet's queue_mapping.
1365 */
1366static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb)
1367{
1368	return skb->queue_mapping & 1;
1369}
1370
1371static inline int ofld_send(struct adapter *adap, struct sk_buff *skb)
1372{
1373	unsigned int idx = skb_txq(skb);
1374
1375	if (unlikely(is_ctrl_pkt(skb)))
1376		return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
1377	return ofld_xmit(&adap->sge.ofldtxq[idx], skb);
1378}
1379
1380/**
1381 *	t4_ofld_send - send an offload packet
1382 *	@adap: the adapter
1383 *	@skb: the packet
1384 *
1385 *	Sends an offload packet.  We use the packet queue_mapping to select the
1386 *	appropriate Tx queue as follows: bit 0 indicates whether the packet
1387 *	should be sent as regular or control, bits 1-15 select the queue.
1388 */
1389int t4_ofld_send(struct adapter *adap, struct sk_buff *skb)
1390{
1391	int ret;
1392
1393	local_bh_disable();
1394	ret = ofld_send(adap, skb);
1395	local_bh_enable();
1396	return ret;
1397}
1398
1399/**
1400 *	cxgb4_ofld_send - send an offload packet
1401 *	@dev: the net device
1402 *	@skb: the packet
1403 *
1404 *	Sends an offload packet.  This is an exported version of @t4_ofld_send,
1405 *	intended for ULDs.
1406 */
1407int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb)
1408{
1409	return t4_ofld_send(netdev2adap(dev), skb);
1410}
1411EXPORT_SYMBOL(cxgb4_ofld_send);
1412
1413static inline void copy_frags(struct sk_buff *skb,
1414			      const struct pkt_gl *gl, unsigned int offset)
1415{
1416	int i;
1417
1418	/* usually there's just one frag */
1419	__skb_fill_page_desc(skb, 0, gl->frags[0].page,
1420			     gl->frags[0].offset + offset,
1421			     gl->frags[0].size - offset);
1422	skb_shinfo(skb)->nr_frags = gl->nfrags;
1423	for (i = 1; i < gl->nfrags; i++)
1424		__skb_fill_page_desc(skb, i, gl->frags[i].page,
1425				     gl->frags[i].offset,
1426				     gl->frags[i].size);
1427
1428	/* get a reference to the last page, we don't own it */
1429	get_page(gl->frags[gl->nfrags - 1].page);
1430}
1431
1432/**
1433 *	cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list
1434 *	@gl: the gather list
1435 *	@skb_len: size of sk_buff main body if it carries fragments
1436 *	@pull_len: amount of data to move to the sk_buff's main body
1437 *
1438 *	Builds an sk_buff from the given packet gather list.  Returns the
1439 *	sk_buff or %NULL if sk_buff allocation failed.
1440 */
1441struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
1442				   unsigned int skb_len, unsigned int pull_len)
1443{
1444	struct sk_buff *skb;
1445
1446	/*
1447	 * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer
1448	 * size, which is expected since buffers are at least PAGE_SIZEd.
1449	 * In this case packets up to RX_COPY_THRES have only one fragment.
1450	 */
1451	if (gl->tot_len <= RX_COPY_THRES) {
1452		skb = dev_alloc_skb(gl->tot_len);
1453		if (unlikely(!skb))
1454			goto out;
1455		__skb_put(skb, gl->tot_len);
1456		skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
1457	} else {
1458		skb = dev_alloc_skb(skb_len);
1459		if (unlikely(!skb))
1460			goto out;
1461		__skb_put(skb, pull_len);
1462		skb_copy_to_linear_data(skb, gl->va, pull_len);
1463
1464		copy_frags(skb, gl, pull_len);
1465		skb->len = gl->tot_len;
1466		skb->data_len = skb->len - pull_len;
1467		skb->truesize += skb->data_len;
1468	}
1469out:	return skb;
1470}
1471EXPORT_SYMBOL(cxgb4_pktgl_to_skb);
1472
1473/**
1474 *	t4_pktgl_free - free a packet gather list
1475 *	@gl: the gather list
1476 *
1477 *	Releases the pages of a packet gather list.  We do not own the last
1478 *	page on the list and do not free it.
1479 */
1480static void t4_pktgl_free(const struct pkt_gl *gl)
1481{
1482	int n;
1483	const struct page_frag *p;
1484
1485	for (p = gl->frags, n = gl->nfrags - 1; n--; p++)
1486		put_page(p->page);
1487}
1488
1489/*
1490 * Process an MPS trace packet.  Give it an unused protocol number so it won't
1491 * be delivered to anyone and send it to the stack for capture.
1492 */
1493static noinline int handle_trace_pkt(struct adapter *adap,
1494				     const struct pkt_gl *gl)
1495{
1496	struct sk_buff *skb;
1497	struct cpl_trace_pkt *p;
1498
1499	skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
1500	if (unlikely(!skb)) {
1501		t4_pktgl_free(gl);
1502		return 0;
1503	}
1504
1505	p = (struct cpl_trace_pkt *)skb->data;
1506	__skb_pull(skb, sizeof(*p));
1507	skb_reset_mac_header(skb);
1508	skb->protocol = htons(0xffff);
1509	skb->dev = adap->port[0];
1510	netif_receive_skb(skb);
1511	return 0;
1512}
1513
1514static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1515		   const struct cpl_rx_pkt *pkt)
1516{
1517	int ret;
1518	struct sk_buff *skb;
1519
1520	skb = napi_get_frags(&rxq->rspq.napi);
1521	if (unlikely(!skb)) {
1522		t4_pktgl_free(gl);
1523		rxq->stats.rx_drops++;
1524		return;
1525	}
1526
1527	copy_frags(skb, gl, RX_PKT_PAD);
1528	skb->len = gl->tot_len - RX_PKT_PAD;
1529	skb->data_len = skb->len;
1530	skb->truesize += skb->data_len;
1531	skb->ip_summed = CHECKSUM_UNNECESSARY;
1532	skb_record_rx_queue(skb, rxq->rspq.idx);
1533	if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
1534		skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
1535
1536	if (unlikely(pkt->vlan_ex)) {
1537		__vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan));
1538		rxq->stats.vlan_ex++;
1539	}
1540	ret = napi_gro_frags(&rxq->rspq.napi);
1541	if (ret == GRO_HELD)
1542		rxq->stats.lro_pkts++;
1543	else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
1544		rxq->stats.lro_merged++;
1545	rxq->stats.pkts++;
1546	rxq->stats.rx_cso++;
1547}
1548
1549/**
1550 *	t4_ethrx_handler - process an ingress ethernet packet
1551 *	@q: the response queue that received the packet
1552 *	@rsp: the response queue descriptor holding the RX_PKT message
1553 *	@si: the gather list of packet fragments
1554 *
1555 *	Process an ingress ethernet packet and deliver it to the stack.
1556 */
1557int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1558		     const struct pkt_gl *si)
1559{
1560	bool csum_ok;
1561	struct sk_buff *skb;
1562	const struct cpl_rx_pkt *pkt;
1563	struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1564
1565	if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT))
1566		return handle_trace_pkt(q->adap, si);
1567
1568	pkt = (const struct cpl_rx_pkt *)rsp;
1569	csum_ok = pkt->csum_calc && !pkt->err_vec;
1570	if ((pkt->l2info & htonl(RXF_TCP)) &&
1571	    (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
1572		do_gro(rxq, si, pkt);
1573		return 0;
1574	}
1575
1576	skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN);
1577	if (unlikely(!skb)) {
1578		t4_pktgl_free(si);
1579		rxq->stats.rx_drops++;
1580		return 0;
1581	}
1582
1583	__skb_pull(skb, RX_PKT_PAD);      /* remove ethernet header padding */
1584	skb->protocol = eth_type_trans(skb, q->netdev);
1585	skb_record_rx_queue(skb, q->idx);
1586	if (skb->dev->features & NETIF_F_RXHASH)
1587		skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
1588
1589	rxq->stats.pkts++;
1590
1591	if (csum_ok && (q->netdev->features & NETIF_F_RXCSUM) &&
1592	    (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
1593		if (!pkt->ip_frag) {
1594			skb->ip_summed = CHECKSUM_UNNECESSARY;
1595			rxq->stats.rx_cso++;
1596		} else if (pkt->l2info & htonl(RXF_IP)) {
1597			__sum16 c = (__force __sum16)pkt->csum;
1598			skb->csum = csum_unfold(c);
1599			skb->ip_summed = CHECKSUM_COMPLETE;
1600			rxq->stats.rx_cso++;
1601		}
1602	} else
1603		skb_checksum_none_assert(skb);
1604
1605	if (unlikely(pkt->vlan_ex)) {
1606		__vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan));
1607		rxq->stats.vlan_ex++;
1608	}
1609	netif_receive_skb(skb);
1610	return 0;
1611}
1612
1613/**
1614 *	restore_rx_bufs - put back a packet's Rx buffers
1615 *	@si: the packet gather list
1616 *	@q: the SGE free list
1617 *	@frags: number of FL buffers to restore
1618 *
1619 *	Puts back on an FL the Rx buffers associated with @si.  The buffers
1620 *	have already been unmapped and are left unmapped, we mark them so to
1621 *	prevent further unmapping attempts.
1622 *
1623 *	This function undoes a series of @unmap_rx_buf calls when we find out
1624 *	that the current packet can't be processed right away afterall and we
1625 *	need to come back to it later.  This is a very rare event and there's
1626 *	no effort to make this particularly efficient.
1627 */
1628static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q,
1629			    int frags)
1630{
1631	struct rx_sw_desc *d;
1632
1633	while (frags--) {
1634		if (q->cidx == 0)
1635			q->cidx = q->size - 1;
1636		else
1637			q->cidx--;
1638		d = &q->sdesc[q->cidx];
1639		d->page = si->frags[frags].page;
1640		d->dma_addr |= RX_UNMAPPED_BUF;
1641		q->avail++;
1642	}
1643}
1644
1645/**
1646 *	is_new_response - check if a response is newly written
1647 *	@r: the response descriptor
1648 *	@q: the response queue
1649 *
1650 *	Returns true if a response descriptor contains a yet unprocessed
1651 *	response.
1652 */
1653static inline bool is_new_response(const struct rsp_ctrl *r,
1654				   const struct sge_rspq *q)
1655{
1656	return RSPD_GEN(r->type_gen) == q->gen;
1657}
1658
1659/**
1660 *	rspq_next - advance to the next entry in a response queue
1661 *	@q: the queue
1662 *
1663 *	Updates the state of a response queue to advance it to the next entry.
1664 */
1665static inline void rspq_next(struct sge_rspq *q)
1666{
1667	q->cur_desc = (void *)q->cur_desc + q->iqe_len;
1668	if (unlikely(++q->cidx == q->size)) {
1669		q->cidx = 0;
1670		q->gen ^= 1;
1671		q->cur_desc = q->desc;
1672	}
1673}
1674
1675/**
1676 *	process_responses - process responses from an SGE response queue
1677 *	@q: the ingress queue to process
1678 *	@budget: how many responses can be processed in this round
1679 *
1680 *	Process responses from an SGE response queue up to the supplied budget.
1681 *	Responses include received packets as well as control messages from FW
1682 *	or HW.
1683 *
1684 *	Additionally choose the interrupt holdoff time for the next interrupt
1685 *	on this queue.  If the system is under memory shortage use a fairly
1686 *	long delay to help recovery.
1687 */
1688static int process_responses(struct sge_rspq *q, int budget)
1689{
1690	int ret, rsp_type;
1691	int budget_left = budget;
1692	const struct rsp_ctrl *rc;
1693	struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1694
1695	while (likely(budget_left)) {
1696		rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
1697		if (!is_new_response(rc, q))
1698			break;
1699
1700		rmb();
1701		rsp_type = RSPD_TYPE(rc->type_gen);
1702		if (likely(rsp_type == RSP_TYPE_FLBUF)) {
1703			struct page_frag *fp;
1704			struct pkt_gl si;
1705			const struct rx_sw_desc *rsd;
1706			u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
1707
1708			if (len & RSPD_NEWBUF) {
1709				if (likely(q->offset > 0)) {
1710					free_rx_bufs(q->adap, &rxq->fl, 1);
1711					q->offset = 0;
1712				}
1713				len = RSPD_LEN(len);
1714			}
1715			si.tot_len = len;
1716
1717			/* gather packet fragments */
1718			for (frags = 0, fp = si.frags; ; frags++, fp++) {
1719				rsd = &rxq->fl.sdesc[rxq->fl.cidx];
1720				bufsz = get_buf_size(rsd);
1721				fp->page = rsd->page;
1722				fp->offset = q->offset;
1723				fp->size = min(bufsz, len);
1724				len -= fp->size;
1725				if (!len)
1726					break;
1727				unmap_rx_buf(q->adap, &rxq->fl);
1728			}
1729
1730			/*
1731			 * Last buffer remains mapped so explicitly make it
1732			 * coherent for CPU access.
1733			 */
1734			dma_sync_single_for_cpu(q->adap->pdev_dev,
1735						get_buf_addr(rsd),
1736						fp->size, DMA_FROM_DEVICE);
1737
1738			si.va = page_address(si.frags[0].page) +
1739				si.frags[0].offset;
1740			prefetch(si.va);
1741
1742			si.nfrags = frags + 1;
1743			ret = q->handler(q, q->cur_desc, &si);
1744			if (likely(ret == 0))
1745				q->offset += ALIGN(fp->size, FL_ALIGN);
1746			else
1747				restore_rx_bufs(&si, &rxq->fl, frags);
1748		} else if (likely(rsp_type == RSP_TYPE_CPL)) {
1749			ret = q->handler(q, q->cur_desc, NULL);
1750		} else {
1751			ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
1752		}
1753
1754		if (unlikely(ret)) {
1755			/* couldn't process descriptor, back off for recovery */
1756			q->next_intr_params = QINTR_TIMER_IDX(NOMEM_TMR_IDX);
1757			break;
1758		}
1759
1760		rspq_next(q);
1761		budget_left--;
1762	}
1763
1764	if (q->offset >= 0 && rxq->fl.size - rxq->fl.avail >= 16)
1765		__refill_fl(q->adap, &rxq->fl);
1766	return budget - budget_left;
1767}
1768
1769/**
1770 *	napi_rx_handler - the NAPI handler for Rx processing
1771 *	@napi: the napi instance
1772 *	@budget: how many packets we can process in this round
1773 *
1774 *	Handler for new data events when using NAPI.  This does not need any
1775 *	locking or protection from interrupts as data interrupts are off at
1776 *	this point and other adapter interrupts do not interfere (the latter
1777 *	in not a concern at all with MSI-X as non-data interrupts then have
1778 *	a separate handler).
1779 */
1780static int napi_rx_handler(struct napi_struct *napi, int budget)
1781{
1782	unsigned int params;
1783	struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
1784	int work_done = process_responses(q, budget);
1785
1786	if (likely(work_done < budget)) {
1787		napi_complete(napi);
1788		params = q->next_intr_params;
1789		q->next_intr_params = q->intr_params;
1790	} else
1791		params = QINTR_TIMER_IDX(7);
1792
1793	t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS), CIDXINC(work_done) |
1794		     INGRESSQID((u32)q->cntxt_id) | SEINTARM(params));
1795	return work_done;
1796}
1797
1798/*
1799 * The MSI-X interrupt handler for an SGE response queue.
1800 */
1801irqreturn_t t4_sge_intr_msix(int irq, void *cookie)
1802{
1803	struct sge_rspq *q = cookie;
1804
1805	napi_schedule(&q->napi);
1806	return IRQ_HANDLED;
1807}
1808
1809/*
1810 * Process the indirect interrupt entries in the interrupt queue and kick off
1811 * NAPI for each queue that has generated an entry.
1812 */
1813static unsigned int process_intrq(struct adapter *adap)
1814{
1815	unsigned int credits;
1816	const struct rsp_ctrl *rc;
1817	struct sge_rspq *q = &adap->sge.intrq;
1818
1819	spin_lock(&adap->sge.intrq_lock);
1820	for (credits = 0; ; credits++) {
1821		rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
1822		if (!is_new_response(rc, q))
1823			break;
1824
1825		rmb();
1826		if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) {
1827			unsigned int qid = ntohl(rc->pldbuflen_qid);
1828
1829			qid -= adap->sge.ingr_start;
1830			napi_schedule(&adap->sge.ingr_map[qid]->napi);
1831		}
1832
1833		rspq_next(q);
1834	}
1835
1836	t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), CIDXINC(credits) |
1837		     INGRESSQID(q->cntxt_id) | SEINTARM(q->intr_params));
1838	spin_unlock(&adap->sge.intrq_lock);
1839	return credits;
1840}
1841
1842/*
1843 * The MSI interrupt handler, which handles data events from SGE response queues
1844 * as well as error and other async events as they all use the same MSI vector.
1845 */
1846static irqreturn_t t4_intr_msi(int irq, void *cookie)
1847{
1848	struct adapter *adap = cookie;
1849
1850	t4_slow_intr_handler(adap);
1851	process_intrq(adap);
1852	return IRQ_HANDLED;
1853}
1854
1855/*
1856 * Interrupt handler for legacy INTx interrupts.
1857 * Handles data events from SGE response queues as well as error and other
1858 * async events as they all use the same interrupt line.
1859 */
1860static irqreturn_t t4_intr_intx(int irq, void *cookie)
1861{
1862	struct adapter *adap = cookie;
1863
1864	t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI), 0);
1865	if (t4_slow_intr_handler(adap) | process_intrq(adap))
1866		return IRQ_HANDLED;
1867	return IRQ_NONE;             /* probably shared interrupt */
1868}
1869
1870/**
1871 *	t4_intr_handler - select the top-level interrupt handler
1872 *	@adap: the adapter
1873 *
1874 *	Selects the top-level interrupt handler based on the type of interrupts
1875 *	(MSI-X, MSI, or INTx).
1876 */
1877irq_handler_t t4_intr_handler(struct adapter *adap)
1878{
1879	if (adap->flags & USING_MSIX)
1880		return t4_sge_intr_msix;
1881	if (adap->flags & USING_MSI)
1882		return t4_intr_msi;
1883	return t4_intr_intx;
1884}
1885
1886static void sge_rx_timer_cb(unsigned long data)
1887{
1888	unsigned long m;
1889	unsigned int i, cnt[2];
1890	struct adapter *adap = (struct adapter *)data;
1891	struct sge *s = &adap->sge;
1892
1893	for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++)
1894		for (m = s->starving_fl[i]; m; m &= m - 1) {
1895			struct sge_eth_rxq *rxq;
1896			unsigned int id = __ffs(m) + i * BITS_PER_LONG;
1897			struct sge_fl *fl = s->egr_map[id];
1898
1899			clear_bit(id, s->starving_fl);
1900			smp_mb__after_clear_bit();
1901
1902			if (fl_starving(fl)) {
1903				rxq = container_of(fl, struct sge_eth_rxq, fl);
1904				if (napi_reschedule(&rxq->rspq.napi))
1905					fl->starving++;
1906				else
1907					set_bit(id, s->starving_fl);
1908			}
1909		}
1910
1911	t4_write_reg(adap, SGE_DEBUG_INDEX, 13);
1912	cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH);
1913	cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
1914
1915	for (i = 0; i < 2; i++)
1916		if (cnt[i] >= s->starve_thres) {
1917			if (s->idma_state[i] || cnt[i] == 0xffffffff)
1918				continue;
1919			s->idma_state[i] = 1;
1920			t4_write_reg(adap, SGE_DEBUG_INDEX, 11);
1921			m = t4_read_reg(adap, SGE_DEBUG_DATA_LOW) >> (i * 16);
1922			dev_warn(adap->pdev_dev,
1923				 "SGE idma%u starvation detected for "
1924				 "queue %lu\n", i, m & 0xffff);
1925		} else if (s->idma_state[i])
1926			s->idma_state[i] = 0;
1927
1928	mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
1929}
1930
1931static void sge_tx_timer_cb(unsigned long data)
1932{
1933	unsigned long m;
1934	unsigned int i, budget;
1935	struct adapter *adap = (struct adapter *)data;
1936	struct sge *s = &adap->sge;
1937
1938	for (i = 0; i < ARRAY_SIZE(s->txq_maperr); i++)
1939		for (m = s->txq_maperr[i]; m; m &= m - 1) {
1940			unsigned long id = __ffs(m) + i * BITS_PER_LONG;
1941			struct sge_ofld_txq *txq = s->egr_map[id];
1942
1943			clear_bit(id, s->txq_maperr);
1944			tasklet_schedule(&txq->qresume_tsk);
1945		}
1946
1947	budget = MAX_TIMER_TX_RECLAIM;
1948	i = s->ethtxq_rover;
1949	do {
1950		struct sge_eth_txq *q = &s->ethtxq[i];
1951
1952		if (q->q.in_use &&
1953		    time_after_eq(jiffies, q->txq->trans_start + HZ / 100) &&
1954		    __netif_tx_trylock(q->txq)) {
1955			int avail = reclaimable(&q->q);
1956
1957			if (avail) {
1958				if (avail > budget)
1959					avail = budget;
1960
1961				free_tx_desc(adap, &q->q, avail, true);
1962				q->q.in_use -= avail;
1963				budget -= avail;
1964			}
1965			__netif_tx_unlock(q->txq);
1966		}
1967
1968		if (++i >= s->ethqsets)
1969			i = 0;
1970	} while (budget && i != s->ethtxq_rover);
1971	s->ethtxq_rover = i;
1972	mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
1973}
1974
1975int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
1976		     struct net_device *dev, int intr_idx,
1977		     struct sge_fl *fl, rspq_handler_t hnd)
1978{
1979	int ret, flsz = 0;
1980	struct fw_iq_cmd c;
1981	struct port_info *pi = netdev_priv(dev);
1982
1983	/* Size needs to be multiple of 16, including status entry. */
1984	iq->size = roundup(iq->size, 16);
1985
1986	iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0,
1987			      &iq->phys_addr, NULL, 0, NUMA_NO_NODE);
1988	if (!iq->desc)
1989		return -ENOMEM;
1990
1991	memset(&c, 0, sizeof(c));
1992	c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
1993			    FW_CMD_WRITE | FW_CMD_EXEC |
1994			    FW_IQ_CMD_PFN(adap->fn) | FW_IQ_CMD_VFN(0));
1995	c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC | FW_IQ_CMD_IQSTART(1) |
1996				 FW_LEN16(c));
1997	c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
1998		FW_IQ_CMD_IQASYNCH(fwevtq) | FW_IQ_CMD_VIID(pi->viid) |
1999		FW_IQ_CMD_IQANDST(intr_idx < 0) | FW_IQ_CMD_IQANUD(1) |
2000		FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx :
2001							-intr_idx - 1));
2002	c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
2003		FW_IQ_CMD_IQGTSMODE |
2004		FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) |
2005		FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4));
2006	c.iqsize = htons(iq->size);
2007	c.iqaddr = cpu_to_be64(iq->phys_addr);
2008
2009	if (fl) {
2010		fl->size = roundup(fl->size, 8);
2011		fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
2012				      sizeof(struct rx_sw_desc), &fl->addr,
2013				      &fl->sdesc, STAT_LEN, NUMA_NO_NODE);
2014		if (!fl->desc)
2015			goto fl_nomem;
2016
2017		flsz = fl->size / 8 + STAT_LEN / sizeof(struct tx_desc);
2018		c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN |
2019					    FW_IQ_CMD_FL0FETCHRO(1) |
2020					    FW_IQ_CMD_FL0DATARO(1) |
2021					    FW_IQ_CMD_FL0PADEN);
2022		c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN(2) |
2023				FW_IQ_CMD_FL0FBMAX(3));
2024		c.fl0size = htons(flsz);
2025		c.fl0addr = cpu_to_be64(fl->addr);
2026	}
2027
2028	ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2029	if (ret)
2030		goto err;
2031
2032	netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
2033	iq->cur_desc = iq->desc;
2034	iq->cidx = 0;
2035	iq->gen = 1;
2036	iq->next_intr_params = iq->intr_params;
2037	iq->cntxt_id = ntohs(c.iqid);
2038	iq->abs_id = ntohs(c.physiqid);
2039	iq->size--;                           /* subtract status entry */
2040	iq->adap = adap;
2041	iq->netdev = dev;
2042	iq->handler = hnd;
2043
2044	/* set offset to -1 to distinguish ingress queues without FL */
2045	iq->offset = fl ? 0 : -1;
2046
2047	adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq;
2048
2049	if (fl) {
2050		fl->cntxt_id = ntohs(c.fl0id);
2051		fl->avail = fl->pend_cred = 0;
2052		fl->pidx = fl->cidx = 0;
2053		fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
2054		adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl;
2055		refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
2056	}
2057	return 0;
2058
2059fl_nomem:
2060	ret = -ENOMEM;
2061err:
2062	if (iq->desc) {
2063		dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len,
2064				  iq->desc, iq->phys_addr);
2065		iq->desc = NULL;
2066	}
2067	if (fl && fl->desc) {
2068		kfree(fl->sdesc);
2069		fl->sdesc = NULL;
2070		dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc),
2071				  fl->desc, fl->addr);
2072		fl->desc = NULL;
2073	}
2074	return ret;
2075}
2076
2077static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
2078{
2079	q->in_use = 0;
2080	q->cidx = q->pidx = 0;
2081	q->stops = q->restarts = 0;
2082	q->stat = (void *)&q->desc[q->size];
2083	q->cntxt_id = id;
2084	adap->sge.egr_map[id - adap->sge.egr_start] = q;
2085}
2086
2087int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
2088			 struct net_device *dev, struct netdev_queue *netdevq,
2089			 unsigned int iqid)
2090{
2091	int ret, nentries;
2092	struct fw_eq_eth_cmd c;
2093	struct port_info *pi = netdev_priv(dev);
2094
2095	/* Add status entries */
2096	nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
2097
2098	txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
2099			sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
2100			&txq->q.phys_addr, &txq->q.sdesc, STAT_LEN,
2101			netdev_queue_numa_node_read(netdevq));
2102	if (!txq->q.desc)
2103		return -ENOMEM;
2104
2105	memset(&c, 0, sizeof(c));
2106	c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
2107			    FW_CMD_WRITE | FW_CMD_EXEC |
2108			    FW_EQ_ETH_CMD_PFN(adap->fn) | FW_EQ_ETH_CMD_VFN(0));
2109	c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC |
2110				 FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
2111	c.viid_pkd = htonl(FW_EQ_ETH_CMD_VIID(pi->viid));
2112	c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE(2) |
2113				   FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) |
2114				   FW_EQ_ETH_CMD_FETCHRO(1) |
2115				   FW_EQ_ETH_CMD_IQID(iqid));
2116	c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN(2) |
2117				  FW_EQ_ETH_CMD_FBMAX(3) |
2118				  FW_EQ_ETH_CMD_CIDXFTHRESH(5) |
2119				  FW_EQ_ETH_CMD_EQSIZE(nentries));
2120	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2121
2122	ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2123	if (ret) {
2124		kfree(txq->q.sdesc);
2125		txq->q.sdesc = NULL;
2126		dma_free_coherent(adap->pdev_dev,
2127				  nentries * sizeof(struct tx_desc),
2128				  txq->q.desc, txq->q.phys_addr);
2129		txq->q.desc = NULL;
2130		return ret;
2131	}
2132
2133	init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_GET(ntohl(c.eqid_pkd)));
2134	txq->txq = netdevq;
2135	txq->tso = txq->tx_cso = txq->vlan_ins = 0;
2136	txq->mapping_err = 0;
2137	return 0;
2138}
2139
2140int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
2141			  struct net_device *dev, unsigned int iqid,
2142			  unsigned int cmplqid)
2143{
2144	int ret, nentries;
2145	struct fw_eq_ctrl_cmd c;
2146	struct port_info *pi = netdev_priv(dev);
2147
2148	/* Add status entries */
2149	nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
2150
2151	txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
2152				 sizeof(struct tx_desc), 0, &txq->q.phys_addr,
2153				 NULL, 0, NUMA_NO_NODE);
2154	if (!txq->q.desc)
2155		return -ENOMEM;
2156
2157	c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
2158			    FW_CMD_WRITE | FW_CMD_EXEC |
2159			    FW_EQ_CTRL_CMD_PFN(adap->fn) |
2160			    FW_EQ_CTRL_CMD_VFN(0));
2161	c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC |
2162				 FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c));
2163	c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID(cmplqid));
2164	c.physeqid_pkd = htonl(0);
2165	c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE(2) |
2166				   FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) |
2167				   FW_EQ_CTRL_CMD_FETCHRO |
2168				   FW_EQ_CTRL_CMD_IQID(iqid));
2169	c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN(2) |
2170				  FW_EQ_CTRL_CMD_FBMAX(3) |
2171				  FW_EQ_CTRL_CMD_CIDXFTHRESH(5) |
2172				  FW_EQ_CTRL_CMD_EQSIZE(nentries));
2173	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2174
2175	ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2176	if (ret) {
2177		dma_free_coherent(adap->pdev_dev,
2178				  nentries * sizeof(struct tx_desc),
2179				  txq->q.desc, txq->q.phys_addr);
2180		txq->q.desc = NULL;
2181		return ret;
2182	}
2183
2184	init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_GET(ntohl(c.cmpliqid_eqid)));
2185	txq->adap = adap;
2186	skb_queue_head_init(&txq->sendq);
2187	tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq);
2188	txq->full = 0;
2189	return 0;
2190}
2191
2192int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
2193			  struct net_device *dev, unsigned int iqid)
2194{
2195	int ret, nentries;
2196	struct fw_eq_ofld_cmd c;
2197	struct port_info *pi = netdev_priv(dev);
2198
2199	/* Add status entries */
2200	nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
2201
2202	txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
2203			sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
2204			&txq->q.phys_addr, &txq->q.sdesc, STAT_LEN,
2205			NUMA_NO_NODE);
2206	if (!txq->q.desc)
2207		return -ENOMEM;
2208
2209	memset(&c, 0, sizeof(c));
2210	c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
2211			    FW_CMD_WRITE | FW_CMD_EXEC |
2212			    FW_EQ_OFLD_CMD_PFN(adap->fn) |
2213			    FW_EQ_OFLD_CMD_VFN(0));
2214	c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC |
2215				 FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c));
2216	c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE(2) |
2217				   FW_EQ_OFLD_CMD_PCIECHN(pi->tx_chan) |
2218				   FW_EQ_OFLD_CMD_FETCHRO(1) |
2219				   FW_EQ_OFLD_CMD_IQID(iqid));
2220	c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN(2) |
2221				  FW_EQ_OFLD_CMD_FBMAX(3) |
2222				  FW_EQ_OFLD_CMD_CIDXFTHRESH(5) |
2223				  FW_EQ_OFLD_CMD_EQSIZE(nentries));
2224	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2225
2226	ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2227	if (ret) {
2228		kfree(txq->q.sdesc);
2229		txq->q.sdesc = NULL;
2230		dma_free_coherent(adap->pdev_dev,
2231				  nentries * sizeof(struct tx_desc),
2232				  txq->q.desc, txq->q.phys_addr);
2233		txq->q.desc = NULL;
2234		return ret;
2235	}
2236
2237	init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_GET(ntohl(c.eqid_pkd)));
2238	txq->adap = adap;
2239	skb_queue_head_init(&txq->sendq);
2240	tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq);
2241	txq->full = 0;
2242	txq->mapping_err = 0;
2243	return 0;
2244}
2245
2246static void free_txq(struct adapter *adap, struct sge_txq *q)
2247{
2248	dma_free_coherent(adap->pdev_dev,
2249			  q->size * sizeof(struct tx_desc) + STAT_LEN,
2250			  q->desc, q->phys_addr);
2251	q->cntxt_id = 0;
2252	q->sdesc = NULL;
2253	q->desc = NULL;
2254}
2255
2256static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
2257			 struct sge_fl *fl)
2258{
2259	unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
2260
2261	adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
2262	t4_iq_free(adap, adap->fn, adap->fn, 0, FW_IQ_TYPE_FL_INT_CAP,
2263		   rq->cntxt_id, fl_id, 0xffff);
2264	dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
2265			  rq->desc, rq->phys_addr);
2266	netif_napi_del(&rq->napi);
2267	rq->netdev = NULL;
2268	rq->cntxt_id = rq->abs_id = 0;
2269	rq->desc = NULL;
2270
2271	if (fl) {
2272		free_rx_bufs(adap, fl, fl->avail);
2273		dma_free_coherent(adap->pdev_dev, fl->size * 8 + STAT_LEN,
2274				  fl->desc, fl->addr);
2275		kfree(fl->sdesc);
2276		fl->sdesc = NULL;
2277		fl->cntxt_id = 0;
2278		fl->desc = NULL;
2279	}
2280}
2281
2282/**
2283 *	t4_free_sge_resources - free SGE resources
2284 *	@adap: the adapter
2285 *
2286 *	Frees resources used by the SGE queue sets.
2287 */
2288void t4_free_sge_resources(struct adapter *adap)
2289{
2290	int i;
2291	struct sge_eth_rxq *eq = adap->sge.ethrxq;
2292	struct sge_eth_txq *etq = adap->sge.ethtxq;
2293	struct sge_ofld_rxq *oq = adap->sge.ofldrxq;
2294
2295	/* clean up Ethernet Tx/Rx queues */
2296	for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) {
2297		if (eq->rspq.desc)
2298			free_rspq_fl(adap, &eq->rspq, &eq->fl);
2299		if (etq->q.desc) {
2300			t4_eth_eq_free(adap, adap->fn, adap->fn, 0,
2301				       etq->q.cntxt_id);
2302			free_tx_desc(adap, &etq->q, etq->q.in_use, true);
2303			kfree(etq->q.sdesc);
2304			free_txq(adap, &etq->q);
2305		}
2306	}
2307
2308	/* clean up RDMA and iSCSI Rx queues */
2309	for (i = 0; i < adap->sge.ofldqsets; i++, oq++) {
2310		if (oq->rspq.desc)
2311			free_rspq_fl(adap, &oq->rspq, &oq->fl);
2312	}
2313	for (i = 0, oq = adap->sge.rdmarxq; i < adap->sge.rdmaqs; i++, oq++) {
2314		if (oq->rspq.desc)
2315			free_rspq_fl(adap, &oq->rspq, &oq->fl);
2316	}
2317
2318	/* clean up offload Tx queues */
2319	for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) {
2320		struct sge_ofld_txq *q = &adap->sge.ofldtxq[i];
2321
2322		if (q->q.desc) {
2323			tasklet_kill(&q->qresume_tsk);
2324			t4_ofld_eq_free(adap, adap->fn, adap->fn, 0,
2325					q->q.cntxt_id);
2326			free_tx_desc(adap, &q->q, q->q.in_use, false);
2327			kfree(q->q.sdesc);
2328			__skb_queue_purge(&q->sendq);
2329			free_txq(adap, &q->q);
2330		}
2331	}
2332
2333	/* clean up control Tx queues */
2334	for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
2335		struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
2336
2337		if (cq->q.desc) {
2338			tasklet_kill(&cq->qresume_tsk);
2339			t4_ctrl_eq_free(adap, adap->fn, adap->fn, 0,
2340					cq->q.cntxt_id);
2341			__skb_queue_purge(&cq->sendq);
2342			free_txq(adap, &cq->q);
2343		}
2344	}
2345
2346	if (adap->sge.fw_evtq.desc)
2347		free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
2348
2349	if (adap->sge.intrq.desc)
2350		free_rspq_fl(adap, &adap->sge.intrq, NULL);
2351
2352	/* clear the reverse egress queue map */
2353	memset(adap->sge.egr_map, 0, sizeof(adap->sge.egr_map));
2354}
2355
2356void t4_sge_start(struct adapter *adap)
2357{
2358	adap->sge.ethtxq_rover = 0;
2359	mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
2360	mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
2361}
2362
2363/**
2364 *	t4_sge_stop - disable SGE operation
2365 *	@adap: the adapter
2366 *
2367 *	Stop tasklets and timers associated with the DMA engine.  Note that
2368 *	this is effective only if measures have been taken to disable any HW
2369 *	events that may restart them.
2370 */
2371void t4_sge_stop(struct adapter *adap)
2372{
2373	int i;
2374	struct sge *s = &adap->sge;
2375
2376	if (in_interrupt())  /* actions below require waiting */
2377		return;
2378
2379	if (s->rx_timer.function)
2380		del_timer_sync(&s->rx_timer);
2381	if (s->tx_timer.function)
2382		del_timer_sync(&s->tx_timer);
2383
2384	for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) {
2385		struct sge_ofld_txq *q = &s->ofldtxq[i];
2386
2387		if (q->q.desc)
2388			tasklet_kill(&q->qresume_tsk);
2389	}
2390	for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) {
2391		struct sge_ctrl_txq *cq = &s->ctrlq[i];
2392
2393		if (cq->q.desc)
2394			tasklet_kill(&cq->qresume_tsk);
2395	}
2396}
2397
2398/**
2399 *	t4_sge_init - initialize SGE
2400 *	@adap: the adapter
2401 *
2402 *	Performs SGE initialization needed every time after a chip reset.
2403 *	We do not initialize any of the queues here, instead the driver
2404 *	top-level must request them individually.
2405 */
2406void t4_sge_init(struct adapter *adap)
2407{
2408	unsigned int i, v;
2409	struct sge *s = &adap->sge;
2410	unsigned int fl_align_log = ilog2(FL_ALIGN);
2411
2412	t4_set_reg_field(adap, SGE_CONTROL, PKTSHIFT_MASK |
2413			 INGPADBOUNDARY_MASK | EGRSTATUSPAGESIZE,
2414			 INGPADBOUNDARY(fl_align_log - 5) | PKTSHIFT(2) |
2415			 RXPKTCPLMODE |
2416			 (STAT_LEN == 128 ? EGRSTATUSPAGESIZE : 0));
2417
2418	for (i = v = 0; i < 32; i += 4)
2419		v |= (PAGE_SHIFT - 10) << i;
2420	t4_write_reg(adap, SGE_HOST_PAGE_SIZE, v);
2421	t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, PAGE_SIZE);
2422#if FL_PG_ORDER > 0
2423	t4_write_reg(adap, SGE_FL_BUFFER_SIZE1, PAGE_SIZE << FL_PG_ORDER);
2424#endif
2425	t4_write_reg(adap, SGE_INGRESS_RX_THRESHOLD,
2426		     THRESHOLD_0(s->counter_val[0]) |
2427		     THRESHOLD_1(s->counter_val[1]) |
2428		     THRESHOLD_2(s->counter_val[2]) |
2429		     THRESHOLD_3(s->counter_val[3]));
2430	t4_write_reg(adap, SGE_TIMER_VALUE_0_AND_1,
2431		     TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[0])) |
2432		     TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[1])));
2433	t4_write_reg(adap, SGE_TIMER_VALUE_2_AND_3,
2434		     TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[2])) |
2435		     TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[3])));
2436	t4_write_reg(adap, SGE_TIMER_VALUE_4_AND_5,
2437		     TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[4])) |
2438		     TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[5])));
2439	setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
2440	setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
2441	s->starve_thres = core_ticks_per_usec(adap) * 1000000;  /* 1 s */
2442	s->idma_state[0] = s->idma_state[1] = 0;
2443	spin_lock_init(&s->intrq_lock);
2444}
2445