/drivers/infiniband/hw/qib/ |
H A D | qib_keys.c | 226 int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, argument 229 struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table; 241 struct qib_pd *pd = to_ipd(qp->ibqp.pd); 261 if (unlikely(mr == NULL || mr->lkey != rkey || qp->ibqp.pd != mr->pd)) 312 int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr) argument 314 struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table; 315 struct qib_pd *pd = to_ipd(qp->ibqp.pd); 329 if (unlikely(mr == NULL || qp->ibqp.pd != mr->pd))
|
/drivers/net/ethernet/mellanox/mlx4/ |
H A D | en_resources.c | 36 #include <linux/mlx4/qp.h> 98 void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event) argument
|
H A D | resource_tracker.c | 43 #include <linux/mlx4/qp.h> 711 enum res_qp_states state, struct res_qp **qp, 765 if (qp) 766 *qp = (struct res_qp *)r; 1751 struct res_qp *qp; local 1764 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0); 1767 qp->local_qpn = local_qpn; 1798 qp->mtt = mtt; 1800 qp->rcq = rcq; 1802 qp 710 qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn, enum res_qp_states state, struct res_qp **qp, int alloc) argument 2410 struct res_qp *qp; local 2447 struct res_qp *qp; local 2536 struct mlx4_qp qp; /* dummy for calling attach/detach */ local 2608 struct mlx4_qp qp; /* dummy for calling attach/detach */ local 2681 struct res_qp *qp; local [all...] |
/drivers/infiniband/hw/ehca/ |
H A D | ehca_classes.h | 153 /* struct to cache modify_qp()'s parms for GSI/SMI qp */ 211 /* array to cache modify_qp()'s parms for GSI/SMI qp */ 230 #define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ) 231 #define HAS_SQ(qp) (qp->ext_type != EQPT_SRQ) 232 #define HAS_RQ(qp) (qp->ext_type != EQPT_SRQBASE) 478 int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp);
|
/drivers/net/ethernet/sun/ |
H A D | sunqe.h | 298 #define TX_BUFFS_AVAIL(qp) \ 299 (((qp)->tx_old <= (qp)->tx_new) ? \ 300 (qp)->tx_old + (TX_RING_SIZE - 1) - (qp)->tx_new : \ 301 (qp)->tx_old - (qp)->tx_new - 1)
|
H A D | sunhme.c | 2126 struct quattro *qp = (struct quattro *) cookie; local 2130 struct net_device *dev = qp->happy_meals[i]; 2508 struct quattro *qp; local 2511 qp = dev_get_drvdata(&op->dev); 2512 if (qp) 2513 return qp; 2515 qp = kmalloc(sizeof(struct quattro), GFP_KERNEL); 2516 if (qp != NULL) { 2520 qp->happy_meals[i] = NULL; 2522 qp 2537 struct quattro *qp; local 2566 struct quattro *qp; local 2588 struct quattro *qp; local 2631 struct quattro *qp = NULL; local 2935 struct quattro *qp = NULL; local [all...] |
/drivers/infiniband/hw/mthca/ |
H A D | mthca_provider.c | 515 struct mthca_qp *qp; local 528 qp = kmalloc(sizeof *qp, GFP_KERNEL); 529 if (!qp) 536 kfree(qp); 544 kfree(qp); 556 kfree(qp); 560 qp->mr.ibmr.lkey = ucmd.lkey; 561 qp->sq.db_index = ucmd.sq_db_index; 562 qp 627 mthca_destroy_qp(struct ib_qp *qp) argument [all...] |
H A D | mthca_eq.c | 144 } __attribute__((packed)) qp; member in union:mthca_eqe::__anon1012 282 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, 287 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, 292 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, 297 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, 307 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, 312 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, 317 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, 322 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
|
/drivers/infiniband/core/ |
H A D | uverbs_cmd.c | 252 static void put_qp_read(struct ib_qp *qp) argument 254 put_uobj_read(qp->uobject); 257 static void put_qp_write(struct ib_qp *qp) argument 259 put_uobj_write(qp->uobject); 1237 tmp.qp_num = wc->qp->qp_num; 1392 struct ib_qp *qp; local 1469 qp = ib_create_qp(pd, &attr); 1471 qp = device->create_qp(pd, &attr, &udata); 1473 if (IS_ERR(qp)) { 1474 ret = PTR_ERR(qp); 1570 struct ib_qp *qp; local 1659 struct ib_qp *qp; local 1773 struct ib_qp *qp; local 1866 struct ib_qp *qp; local 1922 struct ib_qp *qp; local 2162 struct ib_qp *qp; local 2384 struct ib_qp *qp; local 2432 struct ib_qp *qp; local [all...] |
H A D | mad.c | 282 if (!port_priv->qp_info[qpn].qp) { 294 mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd, 317 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; 484 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp; 654 static void build_smp_wc(struct ib_qp *qp, argument 665 wc->qp = qp; 734 build_smp_wc(mad_agent_priv->agent.qp, 1180 ib_redirect_mad_qp(struct ib_qp *qp, u8 rmpp_version, ib_mad_send_handler send_handler, ib_mad_recv_handler recv_handler, void *context) argument 2669 struct ib_qp *qp; local [all...] |
H A D | uverbs_main.c | 174 static void ib_uverbs_detach_umcast(struct ib_qp *qp, argument 180 ib_detach_mcast(qp, &mcast->gid, mcast->lid); 205 struct ib_qp *qp = uobj->object; local 210 if (qp != qp->real_qp) { 211 ib_close_qp(qp); 213 ib_uverbs_detach_umcast(qp, uqp); 214 ib_destroy_qp(qp); 479 uobj = container_of(event->element.qp->uobject,
|
H A D | cma.c | 444 static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) argument 454 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); 459 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE); 465 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN); 470 static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) argument 480 return ib_modify_qp(qp, &qp_attr, qp_attr_mask); 487 struct ib_qp *qp; local 494 qp = ib_create_qp(pd, qp_init_attr); 495 if (IS_ERR(qp)) 496 return PTR_ERR(qp); [all...] |
/drivers/infiniband/hw/amso1100/ |
H A D | c2_cq.c | 82 void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index) argument 100 if (msg->qp_user_context == (u64) (unsigned long) qp) { 135 struct c2_qp *qp; local 144 * if the qp returned is null then this qp has already 148 while ((qp = 158 entry->qp = &qp->ibqp; 190 c2_mq_lconsume(&qp->rq_mq, 1); 192 c2_mq_lconsume(&qp [all...] |
H A D | c2.h | 490 struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp); 491 extern void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp); 493 extern int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp, 495 extern int c2_qp_set_read_limits(struct c2_dev *c2dev, struct c2_qp *qp, 517 extern void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index);
|
H A D | c2_intr.c | 184 c2_set_qp_state(req->qp,
|
H A D | c2_vq.c | 113 r->qp = NULL;
|
/drivers/net/ethernet/ibm/ehea/ |
H A D | ehea_main.c | 195 arr[i++].fwh = pr->qp->fw_handle; 419 ehea_update_rq1a(pr->qp, adder); 441 ehea_update_rq1a(pr->qp, i - 1); 449 struct ehea_qp *qp = pr->qp; local 491 rwqe = ehea_get_next_rwqe(qp, rq_nr); 511 ehea_update_rq2a(pr->qp, adder); 513 ehea_update_rq3a(pr->qp, adder); 645 pr->qp->init_attr.qp_nr); 660 struct ehea_qp *qp local 941 struct ehea_qp *qp; local 2196 ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp) argument 2506 struct ehea_qp qp = *orig_qp; local 2558 struct ehea_qp *qp = pr->qp; local 2609 struct ehea_qp qp = *orig_qp; local 2660 struct ehea_qp *qp = pr->qp; local [all...] |
/drivers/crypto/ |
H A D | n2_core.c | 233 static inline bool n2_should_run_async(struct spu_queue *qp, int this_len) 236 qp->head != qp->tail) 480 static unsigned long wait_for_tail(struct spu_queue *qp) argument 485 hv_ret = sun4v_ncs_gethead(qp->qhandle, &head); 490 if (head == qp->tail) { 491 qp->head = head; 498 static unsigned long submit_and_wait_for_tail(struct spu_queue *qp, argument 501 unsigned long hv_ret = spu_queue_submit(qp, ent); 504 hv_ret = wait_for_tail(qp); 517 struct spu_queue *qp; local 829 __n2_crypt_chunk(struct crypto_tfm *tfm, struct n2_crypto_chunk *cp, struct spu_queue *qp, bool encrypt) argument 981 struct spu_queue *qp; local 1033 struct spu_queue *qp; local [all...] |
/drivers/infiniband/hw/cxgb3/ |
H A D | iwch_cm.c | 681 ep->com.qp = NULL; 713 ep->com.qp = NULL; 740 ep->com.qp = NULL; 921 err = iwch_modify_qp(ep->com.qp->rhp, 922 ep->com.qp, mask, &attrs, 1); 926 if (peer2peer && iwch_rqes_posted(ep->com.qp) == 0) { 1480 iwch_modify_qp(ep->com.qp->rhp, ep->com.qp, 1493 if (ep->com.cm_id && ep->com.qp) { 1495 iwch_modify_qp(ep->com.qp 1798 struct iwch_qp *qp = get_qhp(h, conn_param->qpn); local [all...] |
H A D | iwch_provider.h | 182 void iwch_qp_add_ref(struct ib_qp *qp); 183 void iwch_qp_rem_ref(struct ib_qp *qp); 331 int iwch_bind_mw(struct ib_qp *qp,
|
/drivers/scsi/ |
H A D | qlogicpti.h | 504 #define for_each_qlogicpti(qp) \ 505 for((qp) = qptichain; (qp); (qp) = (qp)->next)
|
/drivers/infiniband/ulp/iser/ |
H A D | iser_verbs.c | 54 iser_err("got qp event %d\n",cause->event); 238 ib_conn->qp = ib_conn->cma_id->qp; 239 iser_err("setting conn %p cma_id %p: fmr_pool %p qp %p\n", 241 ib_conn->fmr_pool, ib_conn->cma_id->qp); 257 iser_err("freeing conn %p cma_id %p fmr pool %p qp %p\n", 259 ib_conn->fmr_pool, ib_conn->qp); 261 /* qp is created only once both addr & route are resolved */ 265 if (ib_conn->qp != NULL) 273 ib_conn->qp [all...] |
/drivers/infiniband/hw/cxgb4/ |
H A D | cm.c | 806 ep->com.qp = NULL; 846 ep->com.qp = NULL; 884 ep->com.qp = NULL; 1134 err = c4iw_modify_qp(ep->com.qp->rhp, 1135 ep->com.qp, mask, &attrs, 1); 1148 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1166 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1768 ret = c4iw_modify_qp(ep->com.qp 2137 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn); local [all...] |
H A D | resource.c | 114 if (kfifo_alloc(&rdev->resource.qid_fifo, rdev->lldi.vr->qp.size * 118 for (i = rdev->lldi.vr->qp.start; 119 i < rdev->lldi.vr->qp.start + rdev->lldi.vr->qp.size; i++) 197 * now put the same ids on the qp list since they all
|
/drivers/infiniband/ulp/srp/ |
H A D | ib_srp.h | 137 struct ib_qp *qp; member in struct:srp_target_port
|