Searched defs:rxq (Results 1 - 25 of 36) sorted by relevance

12

/drivers/net/ethernet/sfc/
H A Dvfdi.h151 * @u.mac_filter.rxq: Insert MAC filter at VF local address/VLAN targetting
198 u32 rxq; member in struct:vfdi_req::__anon2918::__anon2922
/drivers/net/wireless/iwlwifi/
H A Diwl-trans-pcie-rx.c80 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
81 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
82 * to replenish the iwl->rxq->rx_free.
84 * iwl->rxq is replenished and the READ INDEX is updated (updating the
87 * detached from the iwl->rxq. The driver 'processed' index is updated.
88 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
89 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
204 struct iwl_rx_queue *rxq = &trans_pcie->rxq; local
209 spin_lock_irqsave(&rxq
256 struct iwl_rx_queue *rxq = &trans_pcie->rxq; local
363 struct iwl_rx_queue *rxq = &trans_pcie->rxq; local
470 struct iwl_rx_queue *rxq = &trans_pcie->rxq; local
[all...]
H A Diwl-trans-pcie-int.h231 * @rxq: all the RX queue data
254 struct iwl_rx_queue rxq; member in struct:iwl_trans_pcie
H A Diwl-trans-pcie.c89 struct iwl_rx_queue *rxq = &trans_pcie->rxq; local
92 memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
94 spin_lock_init(&rxq->lock);
96 if (WARN_ON(rxq->bd || rxq->rb_stts))
100 rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
101 &rxq->bd_dma, GFP_KERNEL);
102 if (!rxq
126 struct iwl_rx_queue *rxq = &trans_pcie->rxq; local
145 iwl_trans_rx_hw_init(struct iwl_trans *trans, struct iwl_rx_queue *rxq) argument
196 struct iwl_rx_queue *rxq = &trans_pcie->rxq; local
239 struct iwl_rx_queue *rxq = &trans_pcie->rxq; local
1982 struct iwl_rx_queue *rxq = &trans_pcie->rxq; local
[all...]
/drivers/infiniband/hw/cxgb3/
H A Diwch_cm.c107 static struct sk_buff_head rxq; variable in typeref:struct:sk_buff_head
2169 while ((skb = skb_dequeue(&rxq))) {
2200 skb_queue_tail(&rxq, skb);
2241 skb_queue_head_init(&rxq);
/drivers/net/
H A Dmacvtap.c163 * Select a queue based on the rxq of the device on which this packet
175 __u32 rxq; local
181 rxq = skb_get_rxhash(skb);
182 if (rxq) {
183 tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
189 rxq = skb_get_rx_queue(skb);
191 while (unlikely(rxq >= numvtaps))
192 rxq -= numvtaps;
194 tap = rcu_dereference(vlan->taps[rxq]);
200 for (rxq
[all...]
H A Dxen-netfront.c862 struct sk_buff_head *rxq)
869 while ((skb = __skb_dequeue(rxq)) != NULL) {
912 struct sk_buff_head rxq; local
921 skb_queue_head_init(&rxq);
1010 __skb_queue_tail(&rxq, skb);
1018 work_done -= handle_incoming_queue(dev, &rxq);
861 handle_incoming_queue(struct net_device *dev, struct sk_buff_head *rxq) argument
/drivers/net/wireless/iwlegacy/
H A Ddebug.c899 struct il_rx_queue *rxq = &il->rxq; local
904 pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n", rxq->read);
905 pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n", rxq->write);
908 rxq->free_count);
909 if (rxq->rb_stts) {
912 le16_to_cpu(rxq->rb_stts->
H A D3945.c769 il3945_rx_init(struct il_priv *il, struct il_rx_queue *rxq) argument
771 il_wr(il, FH39_RCSR_RBD_BASE(0), rxq->bd_dma);
772 il_wr(il, FH39_RCSR_RPTR_ADDR(0), rxq->rb_stts_dma);
948 struct il_rx_queue *rxq = &il->rxq; local
958 if (!rxq->bd) {
965 il3945_rx_queue_reset(il, rxq);
969 il3945_rx_init(il, rxq);
972 rxq->need_update = 1;
973 il_rx_queue_update_write_ptr(il, rxq);
[all...]
H A D3945-mac.c893 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
894 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
895 * to replenish the iwl->rxq->rx_free.
897 * iwl->rxq is replenished and the READ IDX is updated (updating the
900 * detached from the iwl->rxq. The driver 'processed' idx is updated.
901 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
902 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
949 struct il_rx_queue *rxq = &il->rxq; local
955 spin_lock_irqsave(&rxq
998 struct il_rx_queue *rxq = &il->rxq; local
1065 il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq) argument
1121 il3945_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq) argument
1192 struct il_rx_queue *rxq = &il->rxq; local
[all...]
/drivers/atm/
H A Dambassador.c688 amb_rxq * rxq = &dev->rxq[pool]; local
693 spin_lock_irqsave (&rxq->lock, flags);
695 if (rxq->pending < rxq->maximum) {
696 PRINTD (DBG_RX, "RX in slot %p", rxq->in.ptr);
698 *rxq->in.ptr = *rx;
699 rxq->pending++;
700 rxq->in.ptr = NEXTQ (rxq
713 amb_rxq * rxq = &dev->rxq[pool]; local
746 amb_rxq * rxq = &dev->rxq[pool]; local
784 amb_rxq * rxq; local
1590 amb_rxq * rxq = &dev->rxq[pool]; local
[all...]
H A Dfore200e.c1181 struct host_rxq* rxq = &fore200e->host_rxq; local
1188 entry = &rxq->host_entry[ rxq->head ];
1219 FORE200E_NEXT_ENTRY(rxq->head, QUEUE_SIZE_RX);
2215 struct host_rxq* rxq = &fore200e->host_rxq; local
2223 &rxq->status,
2232 &rxq->rpd,
2237 fore200e->bus->dma_chunk_free(fore200e, &rxq->status);
2247 rxq->host_entry[ i ].status =
2248 FORE200E_INDEX(rxq
[all...]
H A Dambassador.h635 amb_rxq rxq[NUM_RX_POOLS]; member in struct:amb_dev
/drivers/infiniband/hw/cxgb4/
H A Dcm.c127 static struct sk_buff_head rxq; variable in typeref:struct:sk_buff_head
2566 while ((skb = skb_dequeue(&rxq))) {
2605 skb_queue_tail(&rxq, skb);
2708 skb_queue_head_init(&rxq);
/drivers/net/ethernet/chelsio/cxgb4/
H A Dsge.c1514 static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, argument
1520 skb = napi_get_frags(&rxq->rspq.napi);
1523 rxq->stats.rx_drops++;
1532 skb_record_rx_queue(skb, rxq->rspq.idx);
1533 if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
1538 rxq->stats.vlan_ex++;
1540 ret = napi_gro_frags(&rxq->rspq.napi);
1542 rxq->stats.lro_pkts++;
1544 rxq->stats.lro_merged++;
1545 rxq
1563 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); local
1693 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); local
1895 struct sge_eth_rxq *rxq; local
[all...]
H A Dt4_hw.c2216 * @rxq: the max number of interruptless ingress queues
2230 unsigned int rxqi, unsigned int rxq, unsigned int tc,
2242 FW_PFVF_CMD_NIQ(rxq));
2228 t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, unsigned int rxqi, unsigned int rxq, unsigned int tc, unsigned int vi, unsigned int cmask, unsigned int pmask, unsigned int nexact, unsigned int rcaps, unsigned int wxcaps) argument
/drivers/net/ethernet/chelsio/cxgb4vf/
H A Dcxgb4vf_main.c286 int rxq, msi, err; local
300 for_each_ethrxq(s, rxq) {
304 &s->ethrxq[rxq].rspq);
312 while (--rxq >= 0)
313 free_irq(adapter->msix_info[--msi].vec, &s->ethrxq[rxq].rspq);
324 int rxq, msi; local
328 for_each_ethrxq(s, rxq)
330 &s->ethrxq[rxq].rspq);
355 int rxq; local
358 for_each_ethrxq(s, rxq)
380 int rxq; local
521 struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset]; local
552 struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset]; local
594 struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset]; local
1156 struct sge_eth_rxq *rxq; local
1455 const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset]; local
1618 const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL]; local
1775 const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL]; local
2329 struct sge_eth_rxq *rxq = &s->ethrxq[qs]; local
[all...]
H A Dsge.c1454 * @rxq: ingress RX Ethernet Queue
1461 static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, argument
1467 skb = napi_get_frags(&rxq->rspq.napi);
1470 rxq->stats.rx_drops++;
1479 skb_record_rx_queue(skb, rxq->rspq.idx);
1483 ret = napi_gro_frags(&rxq->rspq.napi);
1486 rxq->stats.lro_pkts++;
1488 rxq->stats.lro_merged++;
1489 rxq->stats.pkts++;
1490 rxq
1507 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); local
1639 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); local
1956 struct sge_eth_rxq *rxq; local
2346 struct sge_eth_rxq *rxq = s->ethrxq; local
[all...]
/drivers/net/ethernet/emulex/benet/
H A Dbe_cmds.c970 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
975 struct be_dma_mem *q_mem = &rxq->dma_mem;
1001 rxq->id = le16_to_cpu(resp->id);
1002 rxq->created = true;
969 be_cmd_rxq_create(struct be_adapter *adapter, struct be_queue_info *rxq, u16 cq_id, u16 frag_size, u32 if_id, u32 rss, u8 *rss_id) argument
H A Dbe_main.c1110 struct be_queue_info *rxq = &rxo->q; local
1122 atomic_dec(&rxq->used);
1130 struct be_queue_info *rxq = &rxo->q; local
1138 index_inc(&rxcp->rxq_idx, rxq->len);
1149 struct be_queue_info *rxq = &rxo->q; local
1189 index_inc(&rxcp->rxq_idx, rxq->len);
1213 index_inc(&rxcp->rxq_idx, rxq->len);
1259 struct be_queue_info *rxq = &rxo->q; local
1289 index_inc(&rxcp->rxq_idx, rxq->len);
1428 struct be_queue_info *rxq local
1577 struct be_queue_info *rxq = &rxo->q; local
[all...]
/drivers/net/wireless/ath/ath6kl/
H A Dhtc.c1887 struct list_head *rxq,
1896 n_scat_pkt = get_queue_depth(rxq);
1899 if ((get_queue_depth(rxq) - n_scat_pkt) > 0) {
1912 __func__, get_queue_depth(rxq), n_scat_pkt);
1919 get_queue_depth(rxq), n_scat_pkt);
1929 packet = list_first_entry(rxq, struct htc_packet, list);
1936 list_add(&packet->list, rxq);
1886 ath6kl_htc_rx_bundle(struct htc_target *target, struct list_head *rxq, struct list_head *sync_compq, int *n_pkt_fetched, bool part_bundle) argument
/drivers/net/xen-netback/
H A Dnetback.c599 struct sk_buff_head rxq; local
613 skb_queue_head_init(&rxq);
626 __skb_queue_tail(&rxq, skb);
643 while ((skb = __skb_dequeue(&rxq)) != NULL) {
/drivers/net/ethernet/brocade/bna/
H A Dbna_tx_rx.c1793 struct bna_rxq *rxq = NULL; local
1798 rxq = (struct bna_rxq *)qe;
1799 bfa_q_qe_init(&rxq->qe);
1801 return rxq;
1805 bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq) argument
1807 bfa_q_qe_init(&rxq->qe);
1808 list_add_tail(&rxq->qe, &rx_mod->rxq_free_q);
1892 rxp->rxq.single.only = q0;
1893 rxp->rxq.single.reserved = NULL;
1896 rxp->rxq
1909 bna_rxq_qpt_setup(struct bna_rxq *rxq, struct bna_rxp *rxp, u32 page_count, u32 page_size, struct bna_mem_descr *qpt_mem, struct bna_mem_descr *swqpt_mem, struct bna_mem_descr *page_mem) argument
[all...]
/drivers/net/ethernet/xscale/
H A Dixp4xx_eth.c662 qmgr_disable_irq(port->plat->rxq);
670 unsigned int rxq = port->plat->rxq, rxfreeq = RXFREE_QUEUE(port->id); local
686 if ((n = queue_get_desc(rxq, port, 0)) < 0) {
692 qmgr_enable_irq(rxq);
693 if (!qmgr_stat_below_low_watermark(rxq) &&
700 qmgr_disable_irq(rxq);
1023 err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0,
1052 qmgr_release_queue(port->plat->rxq);
1063 qmgr_release_queue(port->plat->rxq);
[all...]
/drivers/net/wan/
H A Dixp4xx_hss.c657 unsigned int rxq = queue_ids[port->id].rx; local
674 if ((n = queue_get_desc(rxq, port, 0)) < 0) {
680 qmgr_enable_irq(rxq);
681 if (!qmgr_stat_empty(rxq) &&
688 qmgr_disable_irq(rxq);

Completed in 640 milliseconds

12