Searched refs:rspq (Results 1 - 22 of 22) sorted by relevance

/drivers/net/ethernet/brocade/bna/
H A Dbfa_msgq.c319 static void bfa_msgq_rspq_dbell(struct bfa_msgq_rspq *rspq);
330 bfa_fsm_state_decl(rspq, stopped, struct bfa_msgq_rspq, enum rspq_event);
331 bfa_fsm_state_decl(rspq, init_wait, struct bfa_msgq_rspq,
333 bfa_fsm_state_decl(rspq, ready, struct bfa_msgq_rspq, enum rspq_event);
334 bfa_fsm_state_decl(rspq, dbell_wait, struct bfa_msgq_rspq,
338 rspq_sm_stopped_entry(struct bfa_msgq_rspq *rspq) argument
340 rspq->producer_index = 0;
341 rspq->consumer_index = 0;
342 rspq->flags = 0;
346 rspq_sm_stopped(struct bfa_msgq_rspq *rspq, enu argument
364 rspq_sm_init_wait_entry(struct bfa_msgq_rspq *rspq) argument
370 rspq_sm_init_wait(struct bfa_msgq_rspq *rspq, enum rspq_event event) argument
388 rspq_sm_ready_entry(struct bfa_msgq_rspq *rspq) argument
393 rspq_sm_ready(struct bfa_msgq_rspq *rspq, enum rspq_event event) argument
411 rspq_sm_dbell_wait_entry(struct bfa_msgq_rspq *rspq) argument
418 rspq_sm_dbell_wait(struct bfa_msgq_rspq *rspq, enum rspq_event event) argument
446 struct bfa_msgq_rspq *rspq = (struct bfa_msgq_rspq *)arg; local
451 bfa_msgq_rspq_dbell(struct bfa_msgq_rspq *rspq) argument
468 bfa_msgq_rspq_pi_update(struct bfa_msgq_rspq *rspq, struct bfi_mbmsg *mb) argument
499 bfa_msgq_rspq_attach(struct bfa_msgq_rspq *rspq, struct bfa_msgq *msgq) argument
648 struct bfa_msgq_rspq *rspq = &msgq->rspq; local
[all...]
H A Dbfa_msgq.h112 struct bfa_msgq_rspq rspq; member in struct:bfa_msgq
H A Dbfi.h446 struct bfi_msgq rspq; member in struct:bfi_msgq_cfg_req
/drivers/net/ethernet/chelsio/cxgb4vf/
H A Dsge.c1467 skb = napi_get_frags(&rxq->rspq.napi);
1479 skb_record_rx_queue(skb, rxq->rspq.idx);
1483 ret = napi_gro_frags(&rxq->rspq.napi);
1495 * @rspq: the response queue that received the packet
1501 int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, argument
1507 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
1514 (rspq->netdev->features & NETIF_F_GRO) && csum_ok &&
1530 skb->protocol = eth_type_trans(skb, rspq->netdev);
1531 skb_record_rx_queue(skb, rspq
1565 is_new_response(const struct rsp_ctrl *rc, const struct sge_rspq *rspq) argument
1614 rspq_next(struct sge_rspq *rspq) argument
1637 process_responses(struct sge_rspq *rspq, int budget) argument
1769 struct sge_rspq *rspq = container_of(napi, struct sge_rspq, napi); local
1796 struct sge_rspq *rspq = cookie; local
1816 struct sge_rspq *rspq; local
2034 t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, bool iqasynch, struct net_device *dev, int intr_dest, struct sge_fl *fl, rspq_handler_t hnd) argument
2310 free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq, struct sge_fl *fl) argument
[all...]
H A Dcxgb4vf_main.c304 &s->ethrxq[rxq].rspq);
313 free_irq(adapter->msix_info[--msi].vec, &s->ethrxq[rxq].rspq);
330 &s->ethrxq[rxq].rspq);
336 static void qenable(struct sge_rspq *rspq) argument
338 napi_enable(&rspq->napi);
344 t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
346 SEINTARM(rspq->intr_params) |
347 INGRESSQID(rspq->cntxt_id));
359 qenable(&s->ethrxq[rxq].rspq);
383 napi_disable(&s->ethrxq[rxq].rspq
390 fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp, const struct pkt_gl *gl) argument
969 qtimer_val(const struct adapter *adapter, const struct sge_rspq *rspq) argument
990 set_rxq_intr_params(struct adapter *adapter, struct sge_rspq *rspq, unsigned int us, unsigned int cnt) argument
1307 const struct sge_rspq *rspq = &adapter->sge.ethrxq[pi->first_qset].rspq; local
2250 init_rspq(struct sge_rspq *rspq, u8 timer_idx, u8 pkt_cnt_idx, unsigned int size, unsigned int iqe_size) argument
[all...]
H A Dadapter.h204 struct sge_rspq rspq; /* Response Queue */ member in struct:sge_eth_rxq
H A Dt4vf_hw.c759 * @rspq: values for the "Response Queue" (Ingress Queue) lookup table
760 * @nrspq: number of values in @rspq
766 * The caller must ensure the values in @rspq are in the range 0..1023.
769 int start, int n, const u16 *rspq, int nrspq)
771 const u16 *rsp = rspq;
772 const u16 *rsp_end = rspq+nrspq;
831 rsp = rspq;
768 t4vf_config_rss_range(struct adapter *adapter, unsigned int viid, int start, int n, const u16 *rspq, int nrspq) argument
/drivers/scsi/bfa/
H A Dbfa_hw_cb.c53 bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq, u32 ci) argument
55 writel(__HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq),
58 if (bfa_rspq_ci(bfa, rspq) == ci)
61 bfa_rspq_ci(bfa, rspq) = ci;
62 writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
67 bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci) argument
69 if (bfa_rspq_ci(bfa, rspq) == ci)
72 bfa_rspq_ci(bfa, rspq) = ci;
73 writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
H A Dbfa_hw_ct.c74 bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci) argument
78 r32 = readl(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]);
79 writel(r32, bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]);
81 bfa_rspq_ci(bfa, rspq) = ci;
82 writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
92 bfa_hwct2_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci) argument
94 bfa_rspq_ci(bfa, rspq) = ci;
95 writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
H A Dbfa.h187 void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq, u32 ci);
321 void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci);
333 void bfa_hwct_reqq_ack(struct bfa_s *bfa, int rspq);
334 void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci);
335 void bfa_hwct2_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci);
H A Dbfi.h553 struct bfi_msgq_s rspq; member in struct:bfi_msgq_cfg_req_s
/drivers/net/ethernet/chelsio/cxgb3/
H A Dsge.c174 return container_of(q, struct sge_qset, rspq);
652 memset(&q->rspq, 0, sizeof(q->rspq));
707 if (q->rspq.desc) {
709 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
712 q->rspq.size * sizeof(struct rsp_desc),
713 q->rspq.desc, q->rspq.phys_addr);
728 qs->rspq.cntxt_id = id;
1833 struct sge_rspq *q = &qs->rspq;
[all...]
H A Dadapter.h203 struct sge_rspq rspq; member in struct:sge_qset
H A Dcommon.h319 unsigned int polling; /* polling/interrupt service for rspq */
701 const u8 * cpus, const u16 *rspq);
750 unsigned int size, int rspq, int ovfl_mode,
H A Dt3_cpl.h1441 __u8 rspq:3; member in struct:cpl_rdma_terminate
1445 __u8 rspq:3; member in struct:cpl_rdma_terminate
H A Dcxgb3_main.c408 rspq.polling), 0,
444 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
457 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
1166 struct sge_rspq *q = &adap->sge.qs[i].rspq;
1271 adap->sge.qs[0].rspq.
2597 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
H A Dt3_hw.c2320 * @rspq: response queue for async notifications
2330 unsigned int size, int rspq, int ovfl_mode,
2343 V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
2493 * @rspq: values for the response queue lookup table (0xffff terminated)
2495 * Programs the receive packet steering logic. @cpus and @rspq provide
2501 const u8 * cpus, const u16 *rspq)
2517 if (rspq)
2520 (i << 16) | rspq[q_idx++]);
2521 if (rspq[q_idx] == 0xffff)
2329 t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr, unsigned int size, int rspq, int ovfl_mode, unsigned int credits, unsigned int credit_thres) argument
2500 t3_config_rss(struct adapter *adapter, unsigned int rss_config, const u8 * cpus, const u16 *rspq) argument
/drivers/staging/slicoss/
H A Dslicoss.c1403 struct slic_rspqueue *rspq = &adapter->rspqueue; local
1405 for (i = 0; i < rspq->num_pages; i++) {
1406 if (rspq->vaddr[i]) {
1408 rspq->vaddr[i], rspq->paddr[i]);
1410 rspq->vaddr[i] = NULL;
1411 rspq->paddr[i] = 0;
1413 rspq->offset = 0;
1414 rspq->pageindex = 0;
1415 rspq
1421 struct slic_rspqueue *rspq = &adapter->rspqueue; local
1471 struct slic_rspqueue *rspq = &adapter->rspqueue; local
[all...]
/drivers/net/ethernet/chelsio/cxgb4/
H A Dcxgb4_main.c481 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
570 &s->ethrxq[ethqidx].rspq);
578 &s->ofldrxq[ofldqidx].rspq);
586 &s->rdmarxq[rdmaqidx].rspq);
596 &s->rdmarxq[rdmaqidx].rspq);
599 &s->ofldrxq[ofldqidx].rspq);
601 free_irq(adap->msix_info[--msi].vec, &s->ethrxq[ethqidx].rspq);
613 free_irq(adap->msix_info[msi++].vec, &s->ethrxq[i].rspq);
615 free_irq(adap->msix_info[msi++].vec, &s->ofldrxq[i].rspq);
617 free_irq(adap->msix_info[msi++].vec, &s->rdmarxq[i].rspq);
[all...]
H A Dsge.c1520 skb = napi_get_frags(&rxq->rspq.napi);
1532 skb_record_rx_queue(skb, rxq->rspq.idx);
1533 if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
1540 ret = napi_gro_frags(&rxq->rspq.napi);
1563 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1693 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1904 if (napi_reschedule(&rxq->rspq.napi))
2297 if (eq->rspq.desc)
2298 free_rspq_fl(adap, &eq->rspq, &eq->fl);
2310 if (oq->rspq
[all...]
H A Dcxgb4.h370 struct sge_rspq rspq; member in struct:sge_eth_rxq
383 struct sge_rspq rspq; member in struct:sge_ofld_rxq
656 int start, int n, const u16 *rspq, unsigned int nrspq);
H A Dt4_hw.c1561 * @rspq: values for the response queue lookup table
1562 * @nrspq: number of values in @rspq
1568 * The caller must ensure the values in @rspq are in the range allowed for
1572 int start, int n, const u16 *rspq, unsigned int nrspq)
1575 const u16 *rsp = rspq;
1576 const u16 *rsp_end = rspq + nrspq;
1601 rsp = rspq;
1604 rsp = rspq;
1607 rsp = rspq;
1571 t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, int start, int n, const u16 *rspq, unsigned int nrspq) argument

Completed in 316 milliseconds