Searched defs:send_cq (Results 1 - 7 of 7) sorted by relevance
/drivers/infiniband/hw/amso1100/ |
H A D | c2_qp.c | 420 struct c2_cq *send_cq = to_c2cq(qp_attrs->send_cq); local 460 wr.sq_cq_handle = send_cq->adapter_handle; 572 static inline void c2_lock_cqs(struct c2_cq *send_cq, struct c2_cq *recv_cq) argument 574 if (send_cq == recv_cq) 575 spin_lock_irq(&send_cq->lock); 576 else if (send_cq > recv_cq) { 577 spin_lock_irq(&send_cq->lock); 581 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); 585 static inline void c2_unlock_cqs(struct c2_cq *send_cq, struc argument 600 struct c2_cq *send_cq; local [all...] |
/drivers/infiniband/hw/ehca/ |
H A D | ehca_classes.h | 207 struct ehca_cq *send_cq; member in struct:ehca_qp
|
/drivers/infiniband/hw/mlx4/ |
H A D | qp.c | 643 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) 644 __acquires(&send_cq->lock) __acquires(&recv_cq->lock) 646 if (send_cq == recv_cq) { 647 spin_lock_irq(&send_cq->lock); 649 } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { 650 spin_lock_irq(&send_cq->lock); 654 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); 658 static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) 659 __releases(&send_cq->lock) __releases(&recv_cq->lock) 661 if (send_cq 691 get_cqs(struct mlx4_ib_qp *qp, struct mlx4_ib_cq **send_cq, struct mlx4_ib_cq **recv_cq) argument 713 struct mlx4_ib_cq *send_cq, *recv_cq; local 1013 struct mlx4_ib_cq *send_cq, *recv_cq; local [all...] |
/drivers/infiniband/hw/mthca/ |
H A D | mthca_qp.c | 717 qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn); 819 if (qp->ibqp.send_cq != qp->ibqp.recv_cq) 820 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, NULL); 1140 struct mthca_cq *send_cq, 1268 struct mthca_cq *send_cq, 1295 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, 1310 static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) 1311 __acquires(&send_cq->lock) __acquires(&recv_cq->lock) 1313 if (send_cq == recv_cq) { 1314 spin_lock_irq(&send_cq 1138 mthca_alloc_qp_common(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_cq *send_cq, struct mthca_cq *recv_cq, enum ib_sig_type send_policy, struct mthca_qp *qp) argument 1266 mthca_alloc_qp(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_cq *send_cq, struct mthca_cq *recv_cq, enum ib_qp_type type, enum ib_sig_type send_policy, struct ib_qp_cap *cap, struct mthca_qp *qp) argument 1340 mthca_alloc_sqp(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_cq *send_cq, struct mthca_cq *recv_cq, enum ib_sig_type send_policy, struct ib_qp_cap *cap, int qpn, int port, struct mthca_sqp *sqp) argument 1421 struct mthca_cq *send_cq; local [all...] |
/drivers/infiniband/ulp/ipoib/ |
H A D | ipoib.h | 302 struct ib_cq *send_cq; member in struct:ipoib_dev_priv
|
/drivers/net/ethernet/ibm/ehea/ |
H A D | ehea.h | 365 struct ehea_cq *send_cq; member in struct:ehea_port_res
|
H A D | ehea_main.c | 197 arr[i++].fwh = pr->send_cq->fw_handle; 808 struct ehea_cq *send_cq = pr->send_cq; local 817 cqe = ehea_poll_cq(send_cq); 819 ehea_inc_cq(send_cq); 859 cqe = ehea_poll_cq(send_cq); 862 ehea_update_feca(send_cq, cqe_counter); 897 ehea_reset_cq_ep(pr->send_cq); 899 ehea_reset_cq_n1(pr->send_cq); 902 cqe_skb = ehea_poll_cq(pr->send_cq); [all...] |
Completed in 73 milliseconds