Lines Matching refs:fl

169 	return container_of(q, struct sge_qset, fl[qidx]);
558 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
560 refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits),
653 memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET);
678 if (q->fl[i].desc) {
680 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
682 free_rx_bufs(pdev, &q->fl[i]);
683 kfree(q->fl[i].sdesc);
685 q->fl[i].size *
686 sizeof(struct rx_desc), q->fl[i].desc,
687 q->fl[i].phys_addr);
729 qs->fl[0].cntxt_id = 2 * id;
730 qs->fl[1].cntxt_id = 2 * id + 1;
767 * @fl: the SGE free list holding the packet
779 static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
783 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
786 fl->credits--;
802 recycle_rx_buf(adap, fl, fl->cidx);
806 if (unlikely(fl->credits < drop_thres) &&
807 refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits - 1),
813 fl->buf_size, PCI_DMA_FROMDEVICE);
816 __refill_fl(adap, fl);
823 * @fl: the SGE free list holding the packet
838 static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
843 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
861 fl->credits--;
862 recycle_rx_buf(adap, fl, fl->cidx);
867 if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres)))
884 if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
887 fl->alloc_size,
907 fl->credits--;
2056 * @fl: the free list containing the page chunk to add
2064 struct sge_fl *fl, int len, int complete)
2066 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
2079 fl->credits--;
2083 fl->buf_size - SGE_PG_RSVD,
2087 if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
2090 fl->alloc_size,
2302 struct sge_fl *fl;
2306 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2307 if (fl->use_pages) {
2308 void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
2314 __refill_fl(adap, fl);
2316 lro_add_page(adap, qs, fl,
2322 skb = get_packet_pg(adap, fl, q,
2328 skb = get_packet(adap, fl, G_RSPD_LEN(len),
2337 if (++fl->cidx == fl->size)
2338 fl->cidx = 0;
2928 if (qs->fl[0].credits < qs->fl[0].size)
2929 __refill_fl(adap, &qs->fl[0]);
2930 if (qs->fl[1].credits < qs->fl[1].size)
2931 __refill_fl(adap, &qs->fl[1]);
2982 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
2985 &q->fl[0].phys_addr, &q->fl[0].sdesc);
2986 if (!q->fl[0].desc)
2989 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
2992 &q->fl[1].phys_addr, &q->fl[1].sdesc);
2993 if (!q->fl[1].desc)
3027 q->fl[0].gen = q->fl[1].gen = 1;
3028 q->fl[0].size = p->fl_size;
3029 q->fl[1].size = p->jumbo_size;
3040 q->fl[0].buf_size = FL0_PG_CHUNK_SIZE;
3042 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data);
3045 q->fl[1].buf_size = FL1_PG_CHUNK_SIZE;
3047 q->fl[1].buf_size = is_offload(adapter) ?
3052 q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0;
3053 q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0;
3054 q->fl[0].order = FL0_PG_ORDER;
3055 q->fl[1].order = FL1_PG_ORDER;
3056 q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE;
3057 q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE;
3064 q->fl[0].buf_size - SGE_PG_RSVD, 1, 0);
3069 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
3070 q->fl[i].phys_addr, q->fl[i].size,
3071 q->fl[i].buf_size - SGE_PG_RSVD,
3110 avail = refill_fl(adapter, &q->fl[0], q->fl[0].size,
3116 if (avail < q->fl[0].size)
3120 avail = refill_fl(adapter, &q->fl[1], q->fl[1].size,
3122 if (avail < q->fl[1].size)