Searched defs:qp (Results 1 - 25 of 95) sorted by relevance

1234

/drivers/infiniband/hw/amso1100/
H A Dc2_vq.h47 struct c2_qp *qp; member in struct:c2_vq_req
H A Dc2_cm.c45 struct c2_qp *qp; local
53 qp = to_c2qp(ibqp);
56 cm_id->provider_data = qp;
58 qp->cm_id = cm_id;
70 err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird);
92 wr->qp_handle = qp->adapter_handle;
125 qp->cm_id = NULL;
286 struct c2_qp *qp; local
296 qp = to_c2qp(ibqp);
299 err = c2_qp_set_read_limits(c2dev, qp, iw_para
[all...]
H A Dc2_ae.c184 struct c2_qp *qp = (struct c2_qp *)resource_user_context; local
185 struct iw_cm_id *cm_id = qp->cm_id;
189 pr_debug("event received, but cm_id is <nul>, qp=%p!\n",
190 qp);
203 c2_set_qp_state(qp, be32_to_cpu(wr->ae.ae_generic.qp_state));
218 spin_lock_irqsave(&qp->lock, flags);
219 if (qp->cm_id) {
220 qp->cm_id->rem_ref(qp->cm_id);
221 qp
[all...]
H A Dc2_cq.c82 void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index) argument
100 if (msg->qp_user_context == (u64) (unsigned long) qp) {
135 struct c2_qp *qp; local
144 * if the qp returned is null then this qp has already
148 while ((qp =
158 entry->qp = &qp->ibqp;
190 c2_mq_lconsume(&qp->rq_mq, 1);
192 c2_mq_lconsume(&qp
[all...]
H A Dc2_qp.c120 void c2_set_qp_state(struct c2_qp *qp, int c2_state) argument
124 pr_debug("%s: qp[%p] state modify %s --> %s\n",
126 qp,
127 to_ib_state_str(qp->state),
129 qp->state = new_state;
134 int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp, argument
144 pr_debug("%s:%d qp=%p, %s --> %s\n",
146 qp,
147 to_ib_state_str(qp->state),
157 wr.qp_handle = qp
253 c2_qp_set_read_limits(struct c2_dev *c2dev, struct c2_qp *qp, int ord, int ird) argument
302 destroy_qp(struct c2_dev *c2dev, struct c2_qp *qp) argument
381 c2_alloc_qpn(struct c2_dev *c2dev, struct c2_qp *qp) argument
405 struct c2_qp *qp; local
413 c2_alloc_qp(struct c2_dev *c2dev, struct c2_pd *pd, struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp) argument
598 c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp) argument
760 qp_wr_post(struct c2_mq *q, union c2wr * wr, struct c2_qp *qp, u32 size) argument
792 struct c2_qp *qp = to_c2qp(ibqp); local
946 struct c2_qp *qp = to_c2qp(ibqp); local
[all...]
/drivers/infiniband/hw/ehca/
H A Dhipz_fns_core.h61 static inline void hipz_update_sqa(struct ehca_qp *qp, u16 nr_wqes) argument
64 hipz_galpa_store_qp(qp->galpas.kernel, qpx_sqa,
68 static inline void hipz_update_rqa(struct ehca_qp *qp, u16 nr_wqes) argument
71 hipz_galpa_store_qp(qp->galpas.kernel, qpx_rqa,
H A Dehca_cq.c55 int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp) argument
57 unsigned int qp_num = qp->real_qp_num;
62 hlist_add_head(&qp->list_entries, &cq->qp_hashtab[key]);
76 struct ehca_qp *qp; local
81 qp = hlist_entry(iter, struct ehca_qp, list_entries);
82 if (qp->real_qp_num == real_qp_num) {
85 "removed qp from cq .cq_num=%x real_qp_num=%x",
94 "qp not found cq_num=%x real_qp_num=%x",
105 struct ehca_qp *qp; local
107 qp
[all...]
H A Dehca_uverbs.c198 static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp, argument
205 ehca_dbg(qp->ib_qp.device, "qp_num=%x fw", qp->ib_qp.qp_num);
206 ret = ehca_mmap_fw(vma, &qp->galpas, &qp->mm_count_galpa);
208 ehca_err(qp->ib_qp.device,
210 ret, qp->ib_qp.qp_num);
215 case 1: /* qp rqueue_addr */
216 ehca_dbg(qp->ib_qp.device, "qp_num=%x rq", qp
256 struct ehca_qp *qp; local
[all...]
/drivers/net/ethernet/mellanox/mlx4/
H A Den_resources.c36 #include <linux/mlx4/qp.h>
98 void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event) argument
H A Dqp.c41 #include <linux/mlx4/qp.h>
49 struct mlx4_qp *qp; local
53 qp = __mlx4_qp_lookup(dev, qpn);
54 if (qp)
55 atomic_inc(&qp->refcount);
59 if (!qp) {
64 qp->event(qp, event_type);
66 if (atomic_dec_and_test(&qp->refcount))
67 complete(&qp
70 is_qp0(struct mlx4_dev *dev, struct mlx4_qp *qp) argument
76 __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state, struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar, int sqd_event, struct mlx4_qp *qp, int native) argument
169 mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state, struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar, int sqd_event, struct mlx4_qp *qp) argument
329 mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp) argument
363 mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp) argument
374 mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp) argument
458 mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp, struct mlx4_qp_context *context) argument
479 mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt, struct mlx4_qp_context *context, struct mlx4_qp *qp, enum mlx4_qp_state *qp_state) argument
[all...]
/drivers/infiniband/core/
H A Diwcm.h50 struct ib_qp *qp; member in struct:iwcm_id_private
H A Diwcm.c223 static int iwcm_modify_qp_err(struct ib_qp *qp) argument
227 if (!qp)
231 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
238 static int iwcm_modify_qp_sqd(struct ib_qp *qp) argument
242 BUG_ON(qp == NULL);
244 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
264 struct ib_qp *qp = NULL; local
277 if (cm_id_priv->qp)
278 qp = cm_id_priv->qp;
492 struct ib_qp *qp; local
549 struct ib_qp *qp; local
[all...]
/drivers/infiniband/hw/ipath/
H A Dipath_keys.c121 int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge, argument
124 struct ipath_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
136 struct ipath_pd *pd = to_ipd(qp->ibqp.pd);
151 qp->ibqp.pd != mr->pd)) {
199 int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss, argument
202 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
216 struct ipath_pd *pd = to_ipd(qp->ibqp.pd);
234 qp->ibqp.pd != mr->pd)) {
H A Dipath_uc.c42 * @qp: a pointer to the QP
46 int ipath_make_uc_req(struct ipath_qp *qp) argument
54 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
57 spin_lock_irqsave(&qp->s_lock, flags);
59 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)) {
60 if (!(ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND))
63 if (qp->s_last == qp->s_head)
66 if (atomic_read(&qp->s_dma_busy)) {
67 qp
240 ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, int has_grh, void *data, u32 tlen, struct ipath_qp *qp) argument
[all...]
H A Dipath_verbs_mcast.c51 * @qp: the QP to link
53 static struct ipath_mcast_qp *ipath_mcast_qp_alloc(struct ipath_qp *qp) argument
61 mqp->qp = qp;
62 atomic_inc(&qp->refcount);
70 struct ipath_qp *qp = mqp->qp; local
73 if (atomic_dec_and_test(&qp->refcount))
74 wake_up(&qp->wait);
193 if (p->qp
238 struct ipath_qp *qp = to_iqp(ibqp); local
287 struct ipath_qp *qp = to_iqp(ibqp); local
[all...]
H A Dipath_ruc.c80 * @qp: the QP
87 void ipath_insert_rnr_queue(struct ipath_qp *qp) argument
89 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
94 list_add(&qp->timerwait, &dev->rnrwait);
100 while (qp->s_rnr_timeout >= nqp->s_rnr_timeout) {
101 qp->s_rnr_timeout -= nqp->s_rnr_timeout;
111 nqp->s_rnr_timeout -= qp->s_rnr_timeout;
112 list_add(&qp->timerwait, l);
119 * @qp: the QP
123 int ipath_init_sge(struct ipath_qp *qp, struc argument
166 ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only) argument
263 struct ipath_qp *qp; local
517 want_buffer(struct ipath_devdata *dd, struct ipath_qp *qp) argument
541 ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev) argument
600 ipath_make_ruc_header(struct ipath_ibdev *dev, struct ipath_qp *qp, struct ipath_other_headers *ohdr, u32 bth0, u32 bth2) argument
641 struct ipath_qp *qp = (struct ipath_qp *)data; local
699 ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe, enum ib_wc_status status) argument
[all...]
H A Dipath_ud.c53 struct ipath_qp *qp; local
68 qp = ipath_lookup_qpn(&dev->qp_table, swqe->wr.wr.ud.remote_qpn);
69 if (!qp || !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) {
79 if (unlikely(qp->ibqp.qp_num &&
81 sqp->qkey : swqe->wr.wr.ud.remote_qkey) != qp->qkey)) {
107 if (qp->ibqp.srq) {
108 srq = to_isrq(qp->ibqp.srq);
114 rq = &qp->r_rq;
134 rsge.sg_list = qp
241 ipath_make_ud_req(struct ipath_qp *qp) argument
409 ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, int has_grh, void *data, u32 tlen, struct ipath_qp *qp) argument
[all...]
/drivers/infiniband/hw/mthca/
H A Dmthca_mcg.c43 __be32 qp[MTHCA_QP_PER_MGM]; member in struct:mthca_mgm
165 if (mgm->qp[i] == cpu_to_be32(ibqp->qp_num | (1 << 31))) {
170 } else if (!(mgm->qp[i] & cpu_to_be32(1 << 31))) {
171 mgm->qp[i] = cpu_to_be32(ibqp->qp_num | (1 << 31));
242 if (mgm->qp[i] == cpu_to_be32(ibqp->qp_num | (1 << 31)))
244 if (!(mgm->qp[i] & cpu_to_be32(1 << 31)))
254 mgm->qp[loc] = mgm->qp[i - 1];
255 mgm->qp[i - 1] = 0;
/drivers/infiniband/hw/qib/
H A Dqib_keys.c226 int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, argument
229 struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
241 struct qib_pd *pd = to_ipd(qp->ibqp.pd);
261 if (unlikely(mr == NULL || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
312 int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr) argument
314 struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
315 struct qib_pd *pd = to_ipd(qp->ibqp.pd);
329 if (unlikely(mr == NULL || qp->ibqp.pd != mr->pd))
H A Dqib_uc.c42 * @qp: a pointer to the QP
46 int qib_make_uc_req(struct qib_qp *qp) argument
54 u32 pmtu = qp->pmtu;
57 spin_lock_irqsave(&qp->s_lock, flags);
59 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_SEND_OK)) {
60 if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND))
63 if (qp->s_last == qp->s_head)
66 if (atomic_read(&qp->s_dma_busy)) {
67 qp
242 qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, int has_grh, void *data, u32 tlen, struct qib_qp *qp) argument
[all...]
H A Dqib_verbs_mcast.c40 * @qp: the QP to link
42 static struct qib_mcast_qp *qib_mcast_qp_alloc(struct qib_qp *qp) argument
50 mqp->qp = qp;
51 atomic_inc(&qp->refcount);
59 struct qib_qp *qp = mqp->qp; local
62 if (atomic_dec_and_test(&qp->refcount))
63 wake_up(&qp->wait);
182 if (p->qp
227 struct qib_qp *qp = to_iqp(ibqp); local
285 struct qib_qp *qp = to_iqp(ibqp); local
[all...]
H A Dqib_ruc.c81 static int qib_init_sge(struct qib_qp *qp, struct qib_rwqe *wqe) argument
89 rkt = &to_idev(qp->ibqp.device)->lk_table;
90 pd = to_ipd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
91 ss = &qp->r_sge;
92 ss->sg_list = qp->r_sg_list;
93 qp->r_len = 0;
101 qp->r_len += wqe->sg_list[i].length;
105 ss->total_len = qp
138 qib_get_rwqe(struct qib_qp *qp, int wr_id_only) argument
229 qib_migrate_qp(struct qib_qp *qp) argument
267 qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr, int has_grh, struct qib_qp *qp, u32 bth0) argument
358 struct qib_qp *qp; local
678 qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr, u32 bth0, u32 bth2) argument
722 struct qib_qp *qp = container_of(work, struct qib_qp, s_work); local
773 qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe, enum ib_wc_status status) argument
[all...]
H A Dqib_ud.c53 struct qib_qp *qp; local
61 qp = qib_lookup_qpn(ibp, swqe->wr.wr.ud.remote_qpn);
62 if (!qp) {
66 if (qp->ibqp.qp_type != sqp->ibqp.qp_type ||
67 !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
75 if (qp->ibqp.qp_num > 1) {
81 pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
87 sqp->ibqp.qp_num, qp->ibqp.qp_num,
99 if (qp->ibqp.qp_num) {
104 if (unlikely(qkey != qp
232 qib_make_ud_req(struct qib_qp *qp) argument
425 qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, int has_grh, void *data, u32 tlen, struct qib_qp *qp) argument
[all...]
/drivers/infiniband/hw/cxgb3/
H A Diwch_qp.c528 int iwch_bind_mw(struct ib_qp *qp, argument
546 qhp = to_iwch_qp(qp);
767 return iwch_cxgb3_ofld_send(ep->com.qp->rhp->rdev.t3cdev_p, skb);
817 /* locking hierarchy: cq lock first, then qp lock. */
831 /* locking hierarchy: cq lock first, then qp lock. */
/drivers/infiniband/hw/cxgb4/
H A Ddevice.c77 struct c4iw_qp *qp = p; local
82 if (id != qp->wq.sq.qid)
89 if (qp->ep)
91 "qp sq id %u rq id %u state %u onchip %u "
93 qp->wq.sq.qid, qp->wq.rq.qid, (int)qp->attr.state,
94 qp->wq.sq.flags & T4_SQ_ONCHIP,
95 qp->ep->hwtid, (int)qp
[all...]

Completed in 296 milliseconds

1234