Searched refs:qp (Results 1 - 25 of 172) sorted by relevance

1234567

/drivers/infiniband/hw/qib/
H A Dqib_qp.c222 static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp) argument
224 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
226 unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
228 atomic_inc(&qp->refcount);
231 if (qp->ibqp.qp_num == 0)
232 rcu_assign_pointer(ibp->qp0, qp);
233 else if (qp->ibqp.qp_num == 1)
234 rcu_assign_pointer(ibp->qp1, qp);
236 qp
247 remove_qp(struct qib_ibdev *dev, struct qib_qp *qp) argument
298 struct qib_qp *qp; local
340 struct qib_qp *qp = NULL; local
370 qib_reset_qp(struct qib_qp *qp, enum ib_qp_type type) argument
417 clear_mr_refs(struct qib_qp *qp, int clr_sends) argument
473 qib_error_qp(struct qib_qp *qp, enum ib_wc_status err) argument
572 struct qib_qp *qp = to_iqp(ibqp); local
857 struct qib_qp *qp = to_iqp(ibqp); local
910 qib_compute_aeth(struct qib_qp *qp) argument
978 struct qib_qp *qp; local
1191 struct qib_qp *qp = to_iqp(ibqp); local
1265 qib_get_credit(struct qib_qp *qp, u32 aeth) argument
1299 struct qib_qp *qp; member in struct:qib_qp_iter
1326 struct qib_qp *qp; local
1350 struct qib_qp *qp = iter->qp; local
[all...]
H A Dqib_rc.c57 static void start_timer(struct qib_qp *qp) argument
59 qp->s_flags |= QIB_S_TIMER;
60 qp->s_timer.function = rc_timeout;
61 /* 4.096 usec. * (1 << qp->timeout) */
62 qp->s_timer.expires = jiffies + qp->timeout_jiffies;
63 add_timer(&qp->s_timer);
69 * @qp: a pointer to the QP
77 static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp, argument
87 if (!(ib_qib_state_ops[qp
231 qib_make_rc_req(struct qib_qp *qp) argument
648 qib_send_rc_ack(struct qib_qp *qp) argument
783 reset_psn(struct qib_qp *qp, u32 psn) argument
868 qib_restart_rc(struct qib_qp *qp, u32 psn, int wait) argument
905 struct qib_qp *qp = (struct qib_qp *)arg; local
928 struct qib_qp *qp = (struct qib_qp *)arg; local
944 reset_sending_psn(struct qib_qp *qp, u32 psn) argument
969 qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr) argument
1044 update_last_psn(struct qib_qp *qp, u32 psn) argument
1054 do_rc_completion(struct qib_qp *qp, struct qib_swqe *wqe, struct qib_ibport *ibp) argument
1127 do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode, u64 val, struct qib_ctxtdata *rcd) argument
1350 rdma_seq_err(struct qib_qp *qp, struct qib_ibport *ibp, u32 psn, struct qib_ctxtdata *rcd) argument
1397 qib_rc_rcv_resp(struct qib_ibport *ibp, struct qib_other_headers *ohdr, void *data, u32 tlen, struct qib_qp *qp, u32 opcode, u32 psn, u32 hdrsize, u32 pmtu, struct qib_ctxtdata *rcd) argument
1622 qib_rc_rcv_error(struct qib_other_headers *ohdr, void *data, struct qib_qp *qp, u32 opcode, u32 psn, int diff, struct qib_ctxtdata *rcd) argument
1819 qib_rc_error(struct qib_qp *qp, enum ib_wc_status err) argument
1838 qib_update_ack_queue(struct qib_qp *qp, unsigned n) argument
1862 qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, int has_grh, void *data, u32 tlen, struct qib_qp *qp) argument
[all...]
H A Dqib_uc.c42 * @qp: a pointer to the QP
46 int qib_make_uc_req(struct qib_qp *qp) argument
54 u32 pmtu = qp->pmtu;
57 spin_lock_irqsave(&qp->s_lock, flags);
59 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_SEND_OK)) {
60 if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND))
63 if (qp->s_last == qp->s_head)
66 if (atomic_read(&qp->s_dma_busy)) {
67 qp
242 qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, int has_grh, void *data, u32 tlen, struct qib_qp *qp) argument
[all...]
H A Dqib_ud.c53 struct qib_qp *qp; local
62 qp = qib_lookup_qpn(ibp, swqe->wr.wr.ud.remote_qpn);
63 if (!qp) {
70 dqptype = qp->ibqp.qp_type == IB_QPT_GSI ?
71 IB_QPT_UD : qp->ibqp.qp_type;
74 !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
82 if (qp->ibqp.qp_num > 1) {
88 pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
94 sqp->ibqp.qp_num, qp->ibqp.qp_num,
106 if (qp
235 qib_make_ud_req(struct qib_qp *qp) argument
428 qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, int has_grh, void *data, u32 tlen, struct qib_qp *qp) argument
[all...]
H A Dqib_ruc.c81 static int qib_init_sge(struct qib_qp *qp, struct qib_rwqe *wqe) argument
89 rkt = &to_idev(qp->ibqp.device)->lk_table;
90 pd = to_ipd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
91 ss = &qp->r_sge;
92 ss->sg_list = qp->r_sg_list;
93 qp->r_len = 0;
101 qp->r_len += wqe->sg_list[i].length;
105 ss->total_len = qp
138 qib_get_rwqe(struct qib_qp *qp, int wr_id_only) argument
229 qib_migrate_qp(struct qib_qp *qp) argument
267 qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr, int has_grh, struct qib_qp *qp, u32 bth0) argument
358 struct qib_qp *qp; local
674 qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr, u32 bth0, u32 bth2) argument
719 struct qib_qp *qp = container_of(work, struct qib_qp, s_work); local
770 qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe, enum ib_wc_status status) argument
[all...]
H A Dqib_verbs.c333 * @qp: the QP to post on
336 static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr, argument
349 spin_lock_irqsave(&qp->s_lock, flags);
352 if (unlikely(!(ib_qib_state_ops[qp->state] & QIB_POST_SEND_OK)))
356 if (wr->num_sge > qp->s_max_sge)
365 if (qib_fast_reg_mr(qp, wr))
367 } else if (qp->ibqp.qp_type == IB_QPT_UC) {
370 } else if (qp->ibqp.qp_type != IB_QPT_RC) {
376 if (qp->ibqp.pd != wr->wr.ud.ah->pd)
385 else if (wr->opcode >= IB_WR_RDMA_READ && !qp
465 struct qib_qp *qp = to_iqp(ibqp); local
496 struct qib_qp *qp = to_iqp(ibqp); local
559 qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, int has_grh, void *data, u32 tlen, struct qib_qp *qp) argument
614 struct qib_qp *qp; local
709 struct qib_qp *qp = NULL; local
927 __get_txreq(struct qib_ibdev *dev, struct qib_qp *qp) argument
958 get_txreq(struct qib_ibdev *dev, struct qib_qp *qp) argument
983 struct qib_qp *qp; local
1036 struct qib_qp *qp, *nqp; local
1081 struct qib_qp *qp = tx->qp; local
1111 wait_kmem(struct qib_ibdev *dev, struct qib_qp *qp) argument
1134 qib_verbs_send_dma(struct qib_qp *qp, struct qib_ib_header *hdr, u32 hdrwords, struct qib_sge_state *ss, u32 len, u32 plen, u32 dwords) argument
1238 no_bufs_available(struct qib_qp *qp) argument
1269 qib_verbs_send_pio(struct qib_qp *qp, struct qib_ib_header *ibhdr, u32 hdrwords, struct qib_sge_state *ss, u32 len, u32 plen, u32 dwords) argument
1376 qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr, u32 hdrwords, struct qib_sge_state *ss, u32 len) argument
1510 struct qib_qp *qp; local
2327 qib_schedule_send(struct qib_qp *qp) argument
[all...]
H A Dqib_verbs_mcast.c40 * @qp: the QP to link
42 static struct qib_mcast_qp *qib_mcast_qp_alloc(struct qib_qp *qp) argument
50 mqp->qp = qp;
51 atomic_inc(&qp->refcount);
59 struct qib_qp *qp = mqp->qp; local
62 if (atomic_dec_and_test(&qp->refcount))
63 wake_up(&qp->wait);
182 if (p->qp
227 struct qib_qp *qp = to_iqp(ibqp); local
285 struct qib_qp *qp = to_iqp(ibqp); local
[all...]
/drivers/infiniband/hw/ipath/
H A Dipath_rc.c56 * ipath_init_restart- initialize the qp->s_sge after a restart
57 * @qp: the QP who's SGE we're restarting
62 static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe) argument
66 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn,
67 ib_mtu_enum_to_int(qp->path_mtu));
68 dev = to_idev(qp->ibqp.device);
70 if (list_empty(&qp->timerwait))
71 list_add_tail(&qp
86 ipath_make_rc_ack(struct ipath_ibdev *dev, struct ipath_qp *qp, struct ipath_other_headers *ohdr, u32 pmtu) argument
213 ipath_make_rc_req(struct ipath_qp *qp) argument
612 send_rc_ack(struct ipath_qp *qp) argument
724 reset_psn(struct ipath_qp *qp, u32 psn) argument
805 ipath_restart_rc(struct ipath_qp *qp, u32 psn) argument
841 update_last_psn(struct ipath_qp *qp, u32 psn) argument
857 do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode, u64 val) argument
1117 ipath_rc_rcv_resp(struct ipath_ibdev *dev, struct ipath_other_headers *ohdr, void *data, u32 tlen, struct ipath_qp *qp, u32 opcode, u32 psn, u32 hdrsize, u32 pmtu, int header_in_data) argument
1335 ipath_rc_rcv_error(struct ipath_ibdev *dev, struct ipath_other_headers *ohdr, void *data, struct ipath_qp *qp, u32 opcode, u32 psn, int diff, int header_in_data) argument
1525 ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err) argument
1544 ipath_update_ack_queue(struct ipath_qp *qp, unsigned n) argument
1570 ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, int has_grh, void *data, u32 tlen, struct ipath_qp *qp) argument
[all...]
H A Dipath_qp.c203 * @qp: the QP
209 static int ipath_alloc_qpn(struct ipath_qp_table *qpt, struct ipath_qp *qp, argument
218 qp->ibqp.qp_num = ret;
224 qp->next = qpt->table[ret];
225 qpt->table[ret] = qp;
226 atomic_inc(&qp->refcount);
238 * @qp: the QP to remove
243 static void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp) argument
251 qpp = &qpt->table[qp->ibqp.qp_num % qpt->max];
253 if (q == qp) {
274 struct ipath_qp *qp; local
304 struct ipath_qp *qp; local
324 ipath_reset_qp(struct ipath_qp *qp, enum ib_qp_type type) argument
377 ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err) argument
454 struct ipath_qp *qp = to_iqp(ibqp); local
624 struct ipath_qp *qp = to_iqp(ibqp); local
677 ipath_compute_aeth(struct ipath_qp *qp) argument
745 struct ipath_qp *qp; local
964 struct ipath_qp *qp = to_iqp(ibqp); local
1055 ipath_get_credit(struct ipath_qp *qp, u32 aeth) argument
[all...]
H A Dipath_ruc.c80 * @qp: the QP
87 void ipath_insert_rnr_queue(struct ipath_qp *qp) argument
89 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
94 list_add(&qp->timerwait, &dev->rnrwait);
100 while (qp->s_rnr_timeout >= nqp->s_rnr_timeout) {
101 qp->s_rnr_timeout -= nqp->s_rnr_timeout;
111 nqp->s_rnr_timeout -= qp->s_rnr_timeout;
112 list_add(&qp->timerwait, l);
119 * @qp: the QP
123 int ipath_init_sge(struct ipath_qp *qp, struc argument
166 ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only) argument
263 struct ipath_qp *qp; local
517 want_buffer(struct ipath_devdata *dd, struct ipath_qp *qp) argument
541 ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev) argument
600 ipath_make_ruc_header(struct ipath_ibdev *dev, struct ipath_qp *qp, struct ipath_other_headers *ohdr, u32 bth0, u32 bth2) argument
641 struct ipath_qp *qp = (struct ipath_qp *)data; local
699 ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe, enum ib_wc_status status) argument
[all...]
H A Dipath_uc.c42 * @qp: a pointer to the QP
46 int ipath_make_uc_req(struct ipath_qp *qp) argument
54 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
57 spin_lock_irqsave(&qp->s_lock, flags);
59 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)) {
60 if (!(ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND))
63 if (qp->s_last == qp->s_head)
66 if (atomic_read(&qp->s_dma_busy)) {
67 qp
240 ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, int has_grh, void *data, u32 tlen, struct ipath_qp *qp) argument
[all...]
H A Dipath_ud.c53 struct ipath_qp *qp; local
68 qp = ipath_lookup_qpn(&dev->qp_table, swqe->wr.wr.ud.remote_qpn);
69 if (!qp || !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) {
79 if (unlikely(qp->ibqp.qp_num &&
81 sqp->qkey : swqe->wr.wr.ud.remote_qkey) != qp->qkey)) {
107 if (qp->ibqp.srq) {
108 srq = to_isrq(qp->ibqp.srq);
114 rq = &qp->r_rq;
134 rsge.sg_list = qp
241 ipath_make_ud_req(struct ipath_qp *qp) argument
409 ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, int has_grh, void *data, u32 tlen, struct ipath_qp *qp) argument
[all...]
/drivers/ntb/
H A Dntb_transport.c84 struct ntb_transport_qp *qp; member in struct:ntb_queue_entry
109 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
119 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
203 #define QP_TO_MW(ndev, qp) ((qp) % ntb_max_mw(ndev))
398 struct ntb_transport_qp *qp; local
408 qp = filp->private_data;
413 "rx_bytes - \t%llu\n", qp->rx_bytes);
415 "rx_pkts - \t%llu\n", qp->rx_pkts);
417 "rx_memcpy - \t%llu\n", qp
503 struct ntb_transport_qp *qp = &nt->qps[qp_num]; local
598 ntb_qp_link_cleanup(struct ntb_transport_qp *qp) argument
617 struct ntb_transport_qp *qp = container_of(work, local
629 ntb_qp_link_down(struct ntb_transport_qp *qp) argument
793 struct ntb_transport_qp *qp = &nt->qps[i]; local
814 struct ntb_transport_qp *qp = container_of(work, local
856 struct ntb_transport_qp *qp; local
1026 struct ntb_transport_qp *qp = entry->qp; local
1057 struct ntb_transport_qp *qp = entry->qp; local
1136 ntb_process_rxc(struct ntb_transport_qp *qp) argument
1216 struct ntb_transport_qp *qp = data; local
1240 struct ntb_transport_qp *qp = entry->qp; local
1271 ntb_async_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry) argument
1347 ntb_process_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry) argument
1377 ntb_send_link_down(struct ntb_transport_qp *qp) argument
1429 struct ntb_transport_qp *qp; local
1509 ntb_transport_free_queue(struct ntb_transport_qp *qp) argument
1565 ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len) argument
1598 ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, unsigned int len) argument
1633 ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, unsigned int len) argument
1668 ntb_transport_link_up(struct ntb_transport_qp *qp) argument
1688 ntb_transport_link_down(struct ntb_transport_qp *qp) argument
1726 ntb_transport_link_query(struct ntb_transport_qp *qp) argument
1743 ntb_transport_qp_num(struct ntb_transport_qp *qp) argument
1760 ntb_transport_max_size(struct ntb_transport_qp *qp) argument
[all...]
/drivers/infiniband/hw/mthca/
H A Dmthca_qp.c195 static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp) argument
197 return qp->qpn >= dev->qp_table.sqp_start &&
198 qp->qpn <= dev->qp_table.sqp_start + 3;
201 static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp) argument
203 return qp->qpn >= dev->qp_table.sqp_start &&
204 qp->qpn <= dev->qp_table.sqp_start + 1;
207 static void *get_recv_wqe(struct mthca_qp *qp, int n) argument
209 if (qp->is_direct)
210 return qp->queue.direct.buf + (n << qp
216 get_send_wqe(struct mthca_qp *qp, int n) argument
240 struct mthca_qp *qp; local
327 get_hw_access_flags(struct mthca_qp *qp, const struct ib_qp_attr *attr, int attr_mask) argument
428 struct mthca_qp *qp = to_mqp(ibqp); local
548 struct mthca_qp *qp = to_mqp(ibqp); local
846 struct mthca_qp *qp = to_mqp(ibqp); local
911 mthca_max_data_size(struct mthca_dev *dev, struct mthca_qp *qp, int desc_sz) argument
945 mthca_adjust_qp_caps(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_qp *qp) argument
970 mthca_alloc_wqe_buf(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_qp *qp) argument
1062 mthca_free_wqe_buf(struct mthca_dev *dev, struct mthca_qp *qp) argument
1071 mthca_map_memfree(struct mthca_dev *dev, struct mthca_qp *qp) argument
1103 mthca_unmap_memfree(struct mthca_dev *dev, struct mthca_qp *qp) argument
1112 mthca_alloc_memfree(struct mthca_dev *dev, struct mthca_qp *qp) argument
1132 mthca_free_memfree(struct mthca_dev *dev, struct mthca_qp *qp) argument
1141 mthca_alloc_qp_common(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_cq *send_cq, struct mthca_cq *recv_cq, enum ib_sig_type send_policy, struct mthca_qp *qp) argument
1230 mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap, struct mthca_pd *pd, struct mthca_qp *qp) argument
1269 mthca_alloc_qp(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_cq *send_cq, struct mthca_cq *recv_cq, enum ib_qp_type type, enum ib_sig_type send_policy, struct ib_qp_cap *cap, struct mthca_qp *qp) argument
1410 get_qp_refcount(struct mthca_dev *dev, struct mthca_qp *qp) argument
1421 mthca_free_qp(struct mthca_dev *dev, struct mthca_qp *qp) argument
1606 struct mthca_qp *qp = to_mqp(ibqp); local
1809 struct mthca_qp *qp = to_mqp(ibqp); local
1920 struct mthca_qp *qp = to_mqp(ibqp); local
2160 struct mthca_qp *qp = to_mqp(ibqp); local
2227 mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, int index, int *dbd, __be32 *new_wqe) argument
[all...]
/drivers/infiniband/hw/mlx4/
H A Dqp.c43 #include <linux/mlx4/qp.h>
75 struct mlx4_ib_qp qp; member in struct:mlx4_ib_sqp
127 return container_of(mqp, struct mlx4_ib_sqp, qp);
130 static int is_tunnel_qp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) argument
135 return qp->mqp.qpn >= dev->dev->phys_caps.base_tunnel_sqpn &&
136 qp->mqp.qpn < dev->dev->phys_caps.base_tunnel_sqpn +
140 static int is_sqp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) argument
147 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn &&
148 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 3);
154 if (qp
165 is_qp0(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) argument
188 get_wqe(struct mlx4_ib_qp *qp, int offset) argument
193 get_recv_wqe(struct mlx4_ib_qp *qp, int n) argument
198 get_send_wqe(struct mlx4_ib_qp *qp, int n) argument
212 stamp_send_wqe(struct mlx4_ib_qp *qp, int n, int size) argument
242 post_nop_wqe(struct mlx4_ib_qp *qp, int n, int size) argument
280 pad_wraparound(struct mlx4_ib_qp *qp, int ind) argument
290 mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type) argument
382 set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, int is_user, int has_rq, struct mlx4_ib_qp *qp) argument
420 set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, enum mlx4_ib_qp_type type, struct mlx4_ib_qp *qp) argument
535 set_user_sq_size(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, struct mlx4_ib_create_qp *ucmd) argument
555 alloc_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp) argument
590 free_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp) argument
629 struct mlx4_ib_qp *qp; local
920 del_gid_entries(struct mlx4_ib_qp *qp) argument
930 get_pd(struct mlx4_ib_qp *qp) argument
938 get_cqs(struct mlx4_ib_qp *qp, struct mlx4_ib_cq **send_cq, struct mlx4_ib_cq **recv_cq) argument
957 destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, int is_user) argument
1056 struct mlx4_ib_qp *qp = NULL; local
1146 mlx4_ib_destroy_qp(struct ib_qp *qp) argument
1196 to_mlx4_access_flags(struct mlx4_ib_qp *qp, const struct ib_qp_attr *attr, int attr_mask) argument
1356 mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp, enum ib_qp_attr_mask qp_attr_mask, struct mlx4_ib_qp *mqp, struct mlx4_qp_path *path, u8 port) argument
1367 mlx4_set_alt_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp, enum ib_qp_attr_mask qp_attr_mask, struct mlx4_ib_qp *mqp, struct mlx4_qp_path *path, u8 port) argument
1380 update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) argument
1392 handle_eth_ud_smac_index(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, u8 *smac, struct mlx4_qp_context *context) argument
1420 struct mlx4_ib_qp *qp = to_mqp(ibqp); local
1887 struct mlx4_ib_qp *qp = to_mqp(ibqp); local
2545 build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr, struct mlx4_ib_qp *qp, unsigned *lso_seg_len, __be32 *lso_hdr_sz, __be32 *blh) argument
2591 struct mlx4_ib_qp *qp = to_mqp(ibqp); local
2898 struct mlx4_ib_qp *qp = to_mqp(ibqp); local
3053 struct mlx4_ib_qp *qp = to_mqp(ibqp); local
[all...]
/drivers/infiniband/hw/ehca/
H A Dehca_uverbs.c198 static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp, argument
205 ehca_dbg(qp->ib_qp.device, "qp_num=%x fw", qp->ib_qp.qp_num);
206 ret = ehca_mmap_fw(vma, &qp->galpas, &qp->mm_count_galpa);
208 ehca_err(qp->ib_qp.device,
210 ret, qp->ib_qp.qp_num);
215 case 1: /* qp rqueue_addr */
216 ehca_dbg(qp->ib_qp.device, "qp_num=%x rq", qp
256 struct ehca_qp *qp; local
[all...]
/drivers/infiniband/hw/amso1100/
H A Dc2_qp.c120 void c2_set_qp_state(struct c2_qp *qp, int c2_state) argument
124 pr_debug("%s: qp[%p] state modify %s --> %s\n",
126 qp,
127 to_ib_state_str(qp->state),
129 qp->state = new_state;
134 int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp, argument
144 pr_debug("%s:%d qp=%p, %s --> %s\n",
146 qp,
147 to_ib_state_str(qp->state),
157 wr.qp_handle = qp
253 c2_qp_set_read_limits(struct c2_dev *c2dev, struct c2_qp *qp, int ord, int ird) argument
302 destroy_qp(struct c2_dev *c2dev, struct c2_qp *qp) argument
381 c2_alloc_qpn(struct c2_dev *c2dev, struct c2_qp *qp) argument
407 struct c2_qp *qp; local
415 c2_alloc_qp(struct c2_dev *c2dev, struct c2_pd *pd, struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp) argument
600 c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp) argument
762 qp_wr_post(struct c2_mq *q, union c2wr * wr, struct c2_qp *qp, u32 size) argument
794 struct c2_qp *qp = to_c2qp(ibqp); local
948 struct c2_qp *qp = to_c2qp(ibqp); local
[all...]
H A Dc2_ae.c186 struct c2_qp *qp = (struct c2_qp *)resource_user_context; local
187 struct iw_cm_id *cm_id = qp->cm_id;
191 pr_debug("event received, but cm_id is <nul>, qp=%p!\n",
192 qp);
205 c2_set_qp_state(qp, be32_to_cpu(wr->ae.ae_generic.qp_state));
220 spin_lock_irqsave(&qp->lock, flags);
221 if (qp->cm_id) {
222 qp->cm_id->rem_ref(qp->cm_id);
223 qp
[all...]
/drivers/scsi/bnx2i/
H A Dbnx2i_hwi.c153 cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt;
170 cq_index = ep->qp.cqe_exp_seq_sn + next_index - 1;
171 if (cq_index > ep->qp.cqe_size * 2)
172 cq_index -= ep->qp.cqe_size * 2;
195 if (!bnx2i_conn->ep->qp.rqe_left)
198 bnx2i_conn->ep->qp.rqe_left--;
199 memcpy(ptr, (u8 *) bnx2i_conn->ep->qp.rq_cons_qe, len);
200 if (bnx2i_conn->ep->qp.rq_cons_qe == bnx2i_conn->ep->qp.rq_last_qe) {
201 bnx2i_conn->ep->qp
1981 struct qp_info *qp; local
[all...]
/drivers/infiniband/hw/mlx5/
H A Dqp.c98 static void *get_wqe(struct mlx5_ib_qp *qp, int offset) argument
100 return mlx5_buf_offset(&qp->buf, offset);
103 static void *get_recv_wqe(struct mlx5_ib_qp *qp, int n) argument
105 return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift));
108 void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n) argument
110 return get_wqe(qp, qp->sq.offset + (n << MLX5_IB_SQ_STRIDE));
113 static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, in argument
158 set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap, int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd) argument
268 calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, struct mlx5_ib_qp *qp) argument
312 set_user_buf_size(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd) argument
539 create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, struct mlx5_ib_qp *qp, struct ib_udata *udata, struct mlx5_create_qp_mbox_in **in, struct mlx5_ib_create_qp_resp *resp, int *inlen) argument
661 destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp) argument
672 create_kernel_qp(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *init_attr, struct mlx5_ib_qp *qp, struct mlx5_create_qp_mbox_in **in, int *inlen) argument
774 destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) argument
786 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr) argument
805 create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata, struct mlx5_ib_qp *qp) argument
1043 get_pd(struct mlx5_ib_qp *qp) argument
1048 get_cqs(struct mlx5_ib_qp *qp, struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq) argument
1082 destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) argument
1157 struct mlx5_ib_qp *qp; local
1235 mlx5_ib_destroy_qp(struct ib_qp *qp) argument
1247 to_mlx5_access_flags(struct mlx5_ib_qp *qp, const struct ib_qp_attr *attr, int attr_mask) argument
1496 struct mlx5_ib_qp *qp = to_mqp(ibqp); local
1692 struct mlx5_ib_qp *qp = to_mqp(ibqp); local
1970 set_data_inl_seg(struct mlx5_ib_qp *qp, struct ib_send_wr *wr, void *wqe, int *sz) argument
2116 set_sig_data_segment(struct ib_send_wr *wr, struct mlx5_ib_qp *qp, void **seg, int *size) argument
2249 set_sig_umr_wr(struct ib_send_wr *wr, struct mlx5_ib_qp *qp, void **seg, int *size) argument
2325 set_frwr_li_wr(void **seg, struct ib_send_wr *wr, int *size, struct mlx5_core_dev *mdev, struct mlx5_ib_pd *pd, struct mlx5_ib_qp *qp) argument
2357 dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16) argument
2377 mlx5_bf_copy(u64 __iomem *dst, u64 *src, unsigned bytecnt, struct mlx5_ib_qp *qp) argument
2412 begin_wqe(struct mlx5_ib_qp *qp, void **seg, struct mlx5_wqe_ctrl_seg **ctrl, struct ib_send_wr *wr, int *idx, int *size, int nreq) argument
2441 finish_wqe(struct mlx5_ib_qp *qp, struct mlx5_wqe_ctrl_seg *ctrl, u8 size, unsigned idx, u64 wr_id, int nreq, u8 fence, u8 next_fence, u32 mlx5_opcode) argument
2471 struct mlx5_ib_qp *qp = to_mqp(ibqp); local
2771 struct mlx5_ib_qp *qp = to_mqp(ibqp); local
2910 struct mlx5_ib_qp *qp = to_mqp(ibqp); local
[all...]
/drivers/infiniband/core/
H A Dverbs.c377 struct ib_qp *qp = context; local
380 spin_lock_irqsave(&qp->device->event_handler_lock, flags);
381 list_for_each_entry(event->element.qp, &qp->open_list, open_list)
382 if (event->element.qp->event_handler)
383 event->element.qp->event_handler(event, event->element.qp->qp_context);
384 spin_unlock_irqrestore(&qp->device->event_handler_lock, flags);
387 static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp) argument
390 list_add(&qp
398 struct ib_qp *qp; local
423 struct ib_qp *qp, *real_qp; local
445 struct ib_qp *qp, *real_qp; local
867 ib_resolve_eth_l2_attrs(struct ib_qp *qp, struct ib_qp_attr *qp_attr, int *qp_attr_mask) argument
902 ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, int qp_attr_mask) argument
916 ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) argument
927 ib_close_qp(struct ib_qp *qp) argument
947 __ib_destroy_shared_qp(struct ib_qp *qp) argument
975 ib_destroy_qp(struct ib_qp *qp) argument
1343 ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) argument
1359 ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) argument
1397 struct ib_qp *qp; local
1414 ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr, int domain) argument
1432 struct ib_qp *qp = flow_id->qp; local
[all...]
/drivers/net/ethernet/mellanox/mlx5/core/
H A Dqp.c37 #include <linux/mlx5/qp.h>
73 struct mlx5_core_qp *qp; local
80 qp = (struct mlx5_core_qp *)common;
81 qp->event(qp, event_type);
92 struct mlx5_core_qp *qp,
117 qp->qpn = be32_to_cpu(out.qpn) & 0xffffff;
118 mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
120 qp->common.res = MLX5_RES_QP;
122 err = radix_tree_insert(&table->tree, qp
91 mlx5_core_create_qp(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, struct mlx5_create_qp_mbox_in *in, int inlen) argument
152 mlx5_core_destroy_qp(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp) argument
186 mlx5_core_qp_modify(struct mlx5_core_dev *dev, enum mlx5_qp_state cur_state, enum mlx5_qp_state new_state, struct mlx5_modify_qp_mbox_in *in, int sqd_event, struct mlx5_core_qp *qp) argument
262 mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, struct mlx5_query_qp_mbox_out *out, int outlen) argument
[all...]
/drivers/infiniband/hw/ocrdma/
H A Docrdma_verbs.c1020 static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp) argument
1024 if (qp->id < OCRDMA_MAX_QP && dev->qp_tbl[qp->id] == NULL) {
1025 dev->qp_tbl[qp->id] = qp;
1031 static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp) argument
1033 dev->qp_tbl[qp->id] = NULL;
1043 pr_err("%s(%d) unsupported qp type=0x%x requested\n",
1109 static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp, argument
1116 struct ocrdma_dev *dev = qp
1168 ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp, struct ocrdma_pd *pd) argument
1188 ocrdma_alloc_wr_id_tbl(struct ocrdma_qp *qp) argument
1203 ocrdma_set_qp_init_params(struct ocrdma_qp *qp, struct ocrdma_pd *pd, struct ib_qp_init_attr *attrs) argument
1237 struct ocrdma_qp *qp; local
1310 struct ocrdma_qp *qp; local
1333 struct ocrdma_qp *qp; local
1404 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); local
1496 is_hw_sq_empty(struct ocrdma_qp *qp) argument
1501 is_hw_rq_empty(struct ocrdma_qp *qp) argument
1528 ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq) argument
1593 ocrdma_del_flush_qp(struct ocrdma_qp *qp) argument
1616 struct ocrdma_qp *qp; local
1818 ocrdma_build_ud_hdr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, struct ib_send_wr *wr) argument
1861 ocrdma_build_inline_sges(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, struct ocrdma_sge *sge, struct ib_send_wr *wr, u32 wqe_size) argument
1901 ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, struct ib_send_wr *wr) argument
1920 ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, struct ib_send_wr *wr) argument
1938 ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, struct ib_send_wr *wr) argument
2002 ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, struct ib_send_wr *wr) argument
2045 ocrdma_ring_sq_db(struct ocrdma_qp *qp) argument
2056 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); local
2157 ocrdma_ring_rq_db(struct ocrdma_qp *qp) argument
2190 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); local
2368 ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc, u32 wqe_idx) argument
2406 ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe) argument
2438 ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, struct ocrdma_qp *qp, int status) argument
2460 ocrdma_update_err_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, struct ocrdma_qp *qp, int status) argument
2470 ocrdma_update_err_scqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, struct ocrdma_qp *qp, int status) argument
2480 ocrdma_poll_err_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, struct ib_wc *ibwc, bool *polled, bool *stop) argument
2521 ocrdma_poll_success_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, struct ib_wc *ibwc, bool *polled) argument
2547 ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, struct ib_wc *ibwc, bool *polled, bool *stop) argument
2579 ocrdma_update_free_srq_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, struct ocrdma_qp *qp) argument
2600 ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, struct ib_wc *ibwc, bool *polled, bool *stop, int status) argument
2631 ocrdma_poll_success_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, struct ib_wc *ibwc) argument
2662 ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, struct ib_wc *ibwc, bool *polled, bool *stop) argument
2707 struct ocrdma_qp *qp = NULL; local
2768 ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries, struct ocrdma_qp *qp, struct ib_wc *ibwc) argument
2800 struct ocrdma_qp *qp; local
[all...]
/drivers/net/
H A Dntb_netdev.c65 struct ntb_transport_qp *qp; member in struct:ntb_netdev
79 ntb_transport_link_query(dev->qp));
86 if (!ntb_transport_link_query(dev->qp))
96 static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data, argument
128 rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN);
136 static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data, argument
165 rc = ntb_transport_tx_enqueue(dev->qp, skb, skb->data, skb->len);
191 rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
200 ntb_transport_link_up(dev->qp);
205 while ((skb = ntb_transport_rx_remove(dev->qp,
[all...]
/drivers/net/ethernet/mellanox/mlx4/
H A Dqp.c40 #include <linux/mlx4/qp.h>
48 struct mlx4_qp *qp; local
52 qp = __mlx4_qp_lookup(dev, qpn);
53 if (qp)
54 atomic_inc(&qp->refcount);
58 if (!qp) {
63 qp->event(qp, event_type);
65 if (atomic_dec_and_test(&qp->refcount))
66 complete(&qp
70 is_master_qp0(struct mlx4_dev *dev, struct mlx4_qp *qp, int *real_qp0, int *proxy_qp0) argument
83 __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state, struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar, int sqd_event, struct mlx4_qp *qp, int native) argument
198 mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state, struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar, int sqd_event, struct mlx4_qp *qp) argument
358 mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp) argument
435 mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp) argument
446 mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp) argument
583 mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp, struct mlx4_qp_context *context) argument
604 mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt, struct mlx4_qp_context *context, struct mlx4_qp *qp, enum mlx4_qp_state *qp_state) argument
[all...]

Completed in 320 milliseconds

1234567