/drivers/infiniband/hw/amso1100/ |
H A D | c2_vq.h | 47 struct c2_qp *qp; member in struct:c2_vq_req
|
H A D | c2_cm.c | 45 struct c2_qp *qp; local 57 qp = to_c2qp(ibqp); 60 cm_id->provider_data = qp; 62 qp->cm_id = cm_id; 74 err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird); 96 wr->qp_handle = qp->adapter_handle; 129 qp->cm_id = NULL; 294 struct c2_qp *qp; local 304 qp = to_c2qp(ibqp); 307 err = c2_qp_set_read_limits(c2dev, qp, iw_para [all...] |
H A D | c2_ae.c | 186 struct c2_qp *qp = (struct c2_qp *)resource_user_context; local 187 struct iw_cm_id *cm_id = qp->cm_id; 191 pr_debug("event received, but cm_id is <nul>, qp=%p!\n", 192 qp); 205 c2_set_qp_state(qp, be32_to_cpu(wr->ae.ae_generic.qp_state)); 220 spin_lock_irqsave(&qp->lock, flags); 221 if (qp->cm_id) { 222 qp->cm_id->rem_ref(qp->cm_id); 223 qp [all...] |
H A D | c2_cq.c | 82 void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index) argument 100 if (msg->qp_user_context == (u64) (unsigned long) qp) { 135 struct c2_qp *qp; local 144 * if the qp returned is null then this qp has already 148 while ((qp = 158 entry->qp = &qp->ibqp; 190 c2_mq_lconsume(&qp->rq_mq, 1); 192 c2_mq_lconsume(&qp [all...] |
H A D | c2_qp.c | 120 void c2_set_qp_state(struct c2_qp *qp, int c2_state) argument 124 pr_debug("%s: qp[%p] state modify %s --> %s\n", 126 qp, 127 to_ib_state_str(qp->state), 129 qp->state = new_state; 134 int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp, argument 144 pr_debug("%s:%d qp=%p, %s --> %s\n", 146 qp, 147 to_ib_state_str(qp->state), 157 wr.qp_handle = qp 253 c2_qp_set_read_limits(struct c2_dev *c2dev, struct c2_qp *qp, int ord, int ird) argument 302 destroy_qp(struct c2_dev *c2dev, struct c2_qp *qp) argument 381 c2_alloc_qpn(struct c2_dev *c2dev, struct c2_qp *qp) argument 407 struct c2_qp *qp; local 415 c2_alloc_qp(struct c2_dev *c2dev, struct c2_pd *pd, struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp) argument 600 c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp) argument 762 qp_wr_post(struct c2_mq *q, union c2wr * wr, struct c2_qp *qp, u32 size) argument 794 struct c2_qp *qp = to_c2qp(ibqp); local 948 struct c2_qp *qp = to_c2qp(ibqp); local [all...] |
/drivers/infiniband/hw/ehca/ |
H A D | hipz_fns_core.h | 61 static inline void hipz_update_sqa(struct ehca_qp *qp, u16 nr_wqes) argument 64 hipz_galpa_store_qp(qp->galpas.kernel, qpx_sqa, 68 static inline void hipz_update_rqa(struct ehca_qp *qp, u16 nr_wqes) argument 71 hipz_galpa_store_qp(qp->galpas.kernel, qpx_rqa,
|
H A D | ehca_cq.c | 55 int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp) argument 57 unsigned int qp_num = qp->real_qp_num; 62 hlist_add_head(&qp->list_entries, &cq->qp_hashtab[key]); 76 struct ehca_qp *qp; local 81 qp = hlist_entry(iter, struct ehca_qp, list_entries); 82 if (qp->real_qp_num == real_qp_num) { 85 "removed qp from cq .cq_num=%x real_qp_num=%x", 94 "qp not found cq_num=%x real_qp_num=%x", 105 struct ehca_qp *qp; local 107 qp [all...] |
H A D | ehca_uverbs.c | 198 static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp, argument 205 ehca_dbg(qp->ib_qp.device, "qp_num=%x fw", qp->ib_qp.qp_num); 206 ret = ehca_mmap_fw(vma, &qp->galpas, &qp->mm_count_galpa); 208 ehca_err(qp->ib_qp.device, 210 ret, qp->ib_qp.qp_num); 215 case 1: /* qp rqueue_addr */ 216 ehca_dbg(qp->ib_qp.device, "qp_num=%x rq", qp 256 struct ehca_qp *qp; local [all...] |
/drivers/net/ethernet/mellanox/mlx4/ |
H A D | en_resources.c | 36 #include <linux/mlx4/qp.h> 74 en_dbg(HW, priv, "Setting RX qp %x tunnel mode to RX tunneled & non-tunneled\n", qpn); 111 void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event) argument
|
/drivers/infiniband/core/ |
H A D | iwcm.h | 50 struct ib_qp *qp; member in struct:iwcm_id_private
|
H A D | iwcm.c | 245 static int iwcm_modify_qp_err(struct ib_qp *qp) argument 249 if (!qp) 253 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE); 260 static int iwcm_modify_qp_sqd(struct ib_qp *qp) argument 264 BUG_ON(qp == NULL); 266 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE); 286 struct ib_qp *qp = NULL; local 299 if (cm_id_priv->qp) 300 qp = cm_id_priv->qp; 516 struct ib_qp *qp; local 573 struct ib_qp *qp; local [all...] |
/drivers/infiniband/hw/ipath/ |
H A D | ipath_keys.c | 121 int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge, argument 124 struct ipath_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table; 136 struct ipath_pd *pd = to_ipd(qp->ibqp.pd); 151 qp->ibqp.pd != mr->pd)) { 199 int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss, argument 202 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); 216 struct ipath_pd *pd = to_ipd(qp->ibqp.pd); 234 qp->ibqp.pd != mr->pd)) {
|
H A D | ipath_uc.c | 42 * @qp: a pointer to the QP 46 int ipath_make_uc_req(struct ipath_qp *qp) argument 54 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu); 57 spin_lock_irqsave(&qp->s_lock, flags); 59 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)) { 60 if (!(ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND)) 63 if (qp->s_last == qp->s_head) 66 if (atomic_read(&qp->s_dma_busy)) { 67 qp 240 ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, int has_grh, void *data, u32 tlen, struct ipath_qp *qp) argument [all...] |
H A D | ipath_verbs_mcast.c | 51 * @qp: the QP to link 53 static struct ipath_mcast_qp *ipath_mcast_qp_alloc(struct ipath_qp *qp) argument 61 mqp->qp = qp; 62 atomic_inc(&qp->refcount); 70 struct ipath_qp *qp = mqp->qp; local 73 if (atomic_dec_and_test(&qp->refcount)) 74 wake_up(&qp->wait); 193 if (p->qp 238 struct ipath_qp *qp = to_iqp(ibqp); local 287 struct ipath_qp *qp = to_iqp(ibqp); local [all...] |
H A D | ipath_ruc.c | 80 * @qp: the QP 87 void ipath_insert_rnr_queue(struct ipath_qp *qp) argument 89 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); 94 list_add(&qp->timerwait, &dev->rnrwait); 100 while (qp->s_rnr_timeout >= nqp->s_rnr_timeout) { 101 qp->s_rnr_timeout -= nqp->s_rnr_timeout; 111 nqp->s_rnr_timeout -= qp->s_rnr_timeout; 112 list_add(&qp->timerwait, l); 119 * @qp: the QP 123 int ipath_init_sge(struct ipath_qp *qp, struc argument 166 ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only) argument 263 struct ipath_qp *qp; local 517 want_buffer(struct ipath_devdata *dd, struct ipath_qp *qp) argument 541 ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev) argument 600 ipath_make_ruc_header(struct ipath_ibdev *dev, struct ipath_qp *qp, struct ipath_other_headers *ohdr, u32 bth0, u32 bth2) argument 641 struct ipath_qp *qp = (struct ipath_qp *)data; local 699 ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe, enum ib_wc_status status) argument [all...] |
H A D | ipath_ud.c | 53 struct ipath_qp *qp; local 68 qp = ipath_lookup_qpn(&dev->qp_table, swqe->wr.wr.ud.remote_qpn); 69 if (!qp || !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) { 79 if (unlikely(qp->ibqp.qp_num && 81 sqp->qkey : swqe->wr.wr.ud.remote_qkey) != qp->qkey)) { 107 if (qp->ibqp.srq) { 108 srq = to_isrq(qp->ibqp.srq); 114 rq = &qp->r_rq; 134 rsge.sg_list = qp 241 ipath_make_ud_req(struct ipath_qp *qp) argument 409 ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, int has_grh, void *data, u32 tlen, struct ipath_qp *qp) argument [all...] |
/drivers/infiniband/hw/mthca/ |
H A D | mthca_mcg.c | 43 __be32 qp[MTHCA_QP_PER_MGM]; member in struct:mthca_mgm 165 if (mgm->qp[i] == cpu_to_be32(ibqp->qp_num | (1 << 31))) { 170 } else if (!(mgm->qp[i] & cpu_to_be32(1 << 31))) { 171 mgm->qp[i] = cpu_to_be32(ibqp->qp_num | (1 << 31)); 242 if (mgm->qp[i] == cpu_to_be32(ibqp->qp_num | (1 << 31))) 244 if (!(mgm->qp[i] & cpu_to_be32(1 << 31))) 254 mgm->qp[loc] = mgm->qp[i - 1]; 255 mgm->qp[i - 1] = 0;
|
/drivers/infiniband/hw/qib/ |
H A D | qib_uc.c | 42 * @qp: a pointer to the QP 46 int qib_make_uc_req(struct qib_qp *qp) argument 54 u32 pmtu = qp->pmtu; 57 spin_lock_irqsave(&qp->s_lock, flags); 59 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_SEND_OK)) { 60 if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND)) 63 if (qp->s_last == qp->s_head) 66 if (atomic_read(&qp->s_dma_busy)) { 67 qp 242 qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, int has_grh, void *data, u32 tlen, struct qib_qp *qp) argument [all...] |
H A D | qib_verbs_mcast.c | 40 * @qp: the QP to link 42 static struct qib_mcast_qp *qib_mcast_qp_alloc(struct qib_qp *qp) argument 50 mqp->qp = qp; 51 atomic_inc(&qp->refcount); 59 struct qib_qp *qp = mqp->qp; local 62 if (atomic_dec_and_test(&qp->refcount)) 63 wake_up(&qp->wait); 182 if (p->qp 227 struct qib_qp *qp = to_iqp(ibqp); local 285 struct qib_qp *qp = to_iqp(ibqp); local [all...] |
H A D | qib_keys.c | 237 * @qp: qp for validation 248 int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, argument 251 struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table; 262 struct qib_pd *pd = to_ipd(qp->ibqp.pd); 285 if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd)) 337 int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr) argument 339 struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table; 340 struct qib_pd *pd = to_ipd(qp->ibqp.pd); 356 if (unlikely(mr == NULL || qp [all...] |
H A D | qib_ruc.c | 81 static int qib_init_sge(struct qib_qp *qp, struct qib_rwqe *wqe) argument 89 rkt = &to_idev(qp->ibqp.device)->lk_table; 90 pd = to_ipd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd); 91 ss = &qp->r_sge; 92 ss->sg_list = qp->r_sg_list; 93 qp->r_len = 0; 101 qp->r_len += wqe->sg_list[i].length; 105 ss->total_len = qp 138 qib_get_rwqe(struct qib_qp *qp, int wr_id_only) argument 229 qib_migrate_qp(struct qib_qp *qp) argument 267 qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr, int has_grh, struct qib_qp *qp, u32 bth0) argument 358 struct qib_qp *qp; local 674 qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr, u32 bth0, u32 bth2) argument 719 struct qib_qp *qp = container_of(work, struct qib_qp, s_work); local 770 qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe, enum ib_wc_status status) argument [all...] |
H A D | qib_ud.c | 53 struct qib_qp *qp; local 62 qp = qib_lookup_qpn(ibp, swqe->wr.wr.ud.remote_qpn); 63 if (!qp) { 70 dqptype = qp->ibqp.qp_type == IB_QPT_GSI ? 71 IB_QPT_UD : qp->ibqp.qp_type; 74 !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) { 82 if (qp->ibqp.qp_num > 1) { 88 pkey2 = qib_get_pkey(ibp, qp->s_pkey_index); 94 sqp->ibqp.qp_num, qp->ibqp.qp_num, 106 if (qp 235 qib_make_ud_req(struct qib_qp *qp) argument 428 qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, int has_grh, void *data, u32 tlen, struct qib_qp *qp) argument [all...] |
/drivers/media/pci/solo6x10/ |
H A D | solo6x10-enc.c | 182 unsigned int qp) 187 if ((ch > 31) || (qp > 3)) 206 solo_dev->jpeg_qp[idx] |= (qp & 3) << ch; 181 solo_s_jpeg_qp(struct solo_dev *solo_dev, unsigned int ch, unsigned int qp) argument
|
/drivers/net/ethernet/mellanox/mlx5/core/ |
H A D | qp.c | 37 #include <linux/mlx5/qp.h> 73 struct mlx5_core_qp *qp; local 80 qp = (struct mlx5_core_qp *)common; 81 qp->event(qp, event_type); 92 struct mlx5_core_qp *qp, 117 qp->qpn = be32_to_cpu(out.qpn) & 0xffffff; 118 mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn); 120 qp->common.res = MLX5_RES_QP; 122 err = radix_tree_insert(&table->tree, qp 91 mlx5_core_create_qp(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, struct mlx5_create_qp_mbox_in *in, int inlen) argument 152 mlx5_core_destroy_qp(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp) argument 186 mlx5_core_qp_modify(struct mlx5_core_dev *dev, enum mlx5_qp_state cur_state, enum mlx5_qp_state new_state, struct mlx5_modify_qp_mbox_in *in, int sqd_event, struct mlx5_core_qp *qp) argument 262 mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, struct mlx5_query_qp_mbox_out *out, int outlen) argument [all...] |
/drivers/net/ |
H A D | ntb_netdev.c | 65 struct ntb_transport_qp *qp; member in struct:ntb_netdev 79 ntb_transport_link_query(dev->qp)); 86 if (!ntb_transport_link_query(dev->qp)) 96 static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data, argument 128 rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN); 136 static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data, argument 165 rc = ntb_transport_tx_enqueue(dev->qp, skb, skb->data, skb->len); 191 rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data, 200 ntb_transport_link_up(dev->qp); 205 while ((skb = ntb_transport_rx_remove(dev->qp, [all...] |