Lines Matching refs:txq

179 	return container_of(q, struct sge_qset, txq[qidx]);
654 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
691 if (q->txq[i].desc) {
693 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
695 if (q->txq[i].sdesc) {
696 free_tx_desc(adapter, &q->txq[i],
697 q->txq[i].in_use);
698 kfree(q->txq[i].sdesc);
701 q->txq[i].size *
703 q->txq[i].desc, q->txq[i].phys_addr);
704 __skb_queue_purge(&q->txq[i].sendq);
731 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
732 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
733 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
734 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
735 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
1206 static inline void t3_stop_tx_queue(struct netdev_queue *txq,
1209 netif_tx_stop_queue(txq);
1227 struct netdev_queue *txq;
1242 q = &qs->txq[TXQ_ETH];
1243 txq = netdev_get_tx_queue(dev, qidx);
1251 t3_stop_tx_queue(txq, qs, q);
1260 t3_stop_tx_queue(txq, qs, q);
1265 netif_tx_start_queue(txq);
1476 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1516 ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1687 struct sge_txq *q = &qs->txq[TXQ_OFLD];
1773 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1775 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1922 should_restart_tx(&qs->txq[TXQ_ETH]) &&
1924 qs->txq[TXQ_ETH].restarts++;
1930 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1932 qs->txq[TXQ_OFLD].restarts++;
1933 tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
1936 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
1938 qs->txq[TXQ_CTRL].restarts++;
1939 tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
2155 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
2160 qs->txq[TXQ_ETH].processed += credits;
2164 qs->txq[TXQ_CTRL].processed += credits;
2168 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
2172 qs->txq[TXQ_OFLD].processed += credits;
2189 struct sge_txq *txq = &qs->txq[TXQ_ETH];
2191 if (txq->cleaned + txq->in_use != txq->processed &&
2192 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2193 set_bit(TXQ_RUNNING, &txq->flags);
2195 V_EGRCNTX(txq->cntxt_id));
2200 struct sge_txq *txq = &qs->txq[TXQ_OFLD];
2202 if (txq->cleaned + txq->in_use != txq->processed &&
2203 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2204 set_bit(TXQ_RUNNING, &txq->flags);
2206 V_EGRCNTX(txq->cntxt_id));
2865 tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH],
2870 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2871 tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD],
2873 spin_unlock(&qs->txq[TXQ_OFLD].lock);
3009 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
3011 &q->txq[i].phys_addr,
3012 &q->txq[i].sdesc);
3013 if (!q->txq[i].desc)
3016 q->txq[i].gen = 1;
3017 q->txq[i].size = p->txq_size[i];
3018 spin_lock_init(&q->txq[i].lock);
3019 skb_queue_head_init(&q->txq[i].sendq);
3022 tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
3024 tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
3036 q->txq[TXQ_ETH].stop_thres = nports *
3077 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
3078 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
3079 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
3085 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
3087 q->txq[TXQ_OFLD].phys_addr,
3088 q->txq[TXQ_OFLD].size, 0, 1, 0);
3094 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
3096 q->txq[TXQ_CTRL].phys_addr,
3097 q->txq[TXQ_CTRL].size,
3098 q->txq[TXQ_CTRL].token, 1, 0);
3228 tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
3229 tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);