Searched refs:tq (Results 1 - 11 of 11) sorted by relevance

/drivers/net/wireless/ath/ath5k/
H A Dqcu.c290 struct ath5k_txq_info *tq = &ah->ah_txq[queue]; local
296 (tq->tqi_cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S)
330 struct ath5k_txq_info *tq = &ah->ah_txq[queue]; local
334 tq = &ah->ah_txq[queue];
339 (tq->tqi_type == AR5K_TX_QUEUE_INACTIVE))
347 AR5K_REG_SM(tq->tqi_cw_min, AR5K_DCU_LCL_IFS_CW_MIN) |
348 AR5K_REG_SM(tq->tqi_cw_max, AR5K_DCU_LCL_IFS_CW_MAX) |
349 AR5K_REG_SM(tq->tqi_aifs, AR5K_DCU_LCL_IFS_AIFS),
372 if (tq->tqi_cbr_period) {
373 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq
[all...]
/drivers/net/vmxnet3/
H A Dvmxnet3_drv.c107 vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) argument
109 return tq->stopped;
114 vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) argument
116 tq->stopped = false;
117 netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue);
122 vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) argument
124 tq->stopped = false;
125 netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue));
130 vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) argument
132 tq
336 vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq, struct pci_dev *pdev, struct vmxnet3_adapter *adapter) argument
371 vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) argument
402 vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) argument
435 vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) argument
473 vmxnet3_tq_init(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) argument
503 vmxnet3_tq_create(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) argument
665 vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, struct vmxnet3_tx_queue *tq, struct pci_dev *pdev, struct vmxnet3_adapter *adapter) argument
808 vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, struct vmxnet3_tx_ctx *ctx, struct vmxnet3_adapter *adapter) argument
904 vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter, struct net_device *netdev) argument
1600 struct vmxnet3_tx_queue *tq = local
1625 struct vmxnet3_tx_queue *tq = data; local
2108 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i]; local
2473 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i]; local
[all...]
H A Dvmxnet3_int.h371 #define VMXNET3_WAKE_QUEUE_THRESHOLD(tq) (5)
/drivers/net/ethernet/chelsio/cxgb4vf/
H A Dsge.c242 * @tq: the TX queue
246 static inline unsigned int txq_avail(const struct sge_txq *tq) argument
248 return tq->size - 1 - tq->in_use;
316 const struct ulptx_sgl *sgl, const struct sge_txq *tq)
335 if (likely((u8 *)(p + 1) <= (u8 *)tq->stat)) {
342 } else if ((u8 *)p == (u8 *)tq->stat) {
343 p = (const struct ulptx_sge_pair *)tq->desc;
345 } else if ((u8 *)p + 8 == (u8 *)tq->stat) {
346 const __be64 *addr = (const __be64 *)tq
315 unmap_sgl(struct device *dev, const struct sk_buff *skb, const struct ulptx_sgl *sgl, const struct sge_txq *tq) argument
386 free_tx_desc(struct adapter *adapter, struct sge_txq *tq, unsigned int n, bool unmap) argument
420 reclaimable(const struct sge_txq *tq) argument
439 reclaim_completed_tx(struct adapter *adapter, struct sge_txq *tq, bool unmap) argument
885 write_sgl(const struct sk_buff *skb, struct sge_txq *tq, struct ulptx_sgl *sgl, u64 *end, unsigned int start, const dma_addr_t *addr) argument
948 ring_tx_db(struct adapter *adapter, struct sge_txq *tq, int n) argument
972 inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *tq, void *pos) argument
1057 txq_advance(struct sge_txq *tq, unsigned int n) argument
1314 struct sge_txq *tq = &txq->q; local
2296 free_txq(struct adapter *adapter, struct sge_txq *tq) argument
[all...]
H A Dcxgb4vf_main.c424 struct sge_txq *tq; local
441 tq = s->egr_map[eq_idx];
442 if (unlikely(tq == NULL)) {
447 txq = container_of(tq, struct sge_eth_txq, q);
448 if (unlikely(tq->abs_id != qid)) {
451 qid, tq->abs_id);
/drivers/net/
H A Difb.c51 struct sk_buff_head tq; member in struct:ifb_private
71 if ((skb = skb_peek(&dp->tq)) == NULL) {
73 skb_queue_splice_tail_init(&dp->rq, &dp->tq);
81 while ((skb = __skb_dequeue(&dp->tq)) != NULL) {
98 if (skb_queue_len(&dp->tq) != 0)
226 __skb_queue_purge(&dp->tq);
236 __skb_queue_head_init(&dp->tq);
/drivers/input/serio/
H A Dhp_sdc.c190 curr = hp_sdc.tq[hp_sdc.rcurr];
319 curr = hp_sdc.tq[hp_sdc.rcurr];
381 if (hp_sdc.tq[curridx] != NULL)
395 if (hp_sdc.tq[curridx] != NULL)
415 curr = hp_sdc.tq[curridx];
419 hp_sdc.tq[curridx] = NULL;
433 hp_sdc.tq[curridx] = NULL;
576 hp_sdc.tq[curridx] = NULL;
612 if (hp_sdc.tq[i] == this)
620 if (hp_sdc.tq[
[all...]
/drivers/input/keyboard/
H A Dsunkbd.c80 struct work_struct tq; member in struct:sunkbd
119 schedule_work(&sunkbd->tq);
226 struct sunkbd *sunkbd = container_of(work, struct sunkbd, tq);
269 INIT_WORK(&sunkbd->tq, sunkbd_reinit);
H A Dlkkbd.c283 struct work_struct tq; member in struct:lkkbd
470 schedule_work(&lk->tq);
581 struct lkkbd *lk = container_of(work, struct lkkbd, tq);
636 INIT_WORK(&lk->tq, lkkbd_reinit);
/drivers/net/can/
H A Ddev.c149 bt->tq = (u32)v64;
204 brp64 = (u64)priv->clock.freq * (u64)bt->tq;
232 if (!bt->tq)
554 if (!priv->bittiming.tq && !priv->bittiming.bitrate) {
629 if ((!bt.bitrate && !bt.tq) || (bt.bitrate && bt.tq))
/drivers/tty/ipwireless/
H A Dhardware.c1728 struct ipw_tx_packet *tp, *tq; local
1739 list_for_each_entry_safe(tp, tq, &hw->tx_queue[i], queue) {

Completed in 288 milliseconds