/drivers/infiniband/hw/amso1100/ |
H A D | c2_mm.c | 56 struct c2wr_nsmr_pbl_req *wr; /* PBL WR ptr */ local 74 wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL); 75 if (!wr) { 78 c2_wr_set_id(wr, CCWR_NSMR_PBL); 85 wr->hdr.context = 0; 86 wr->rnic_handle = c2dev->adapter_handle; 87 wr->stag_index = stag_index; /* already swapped */ 88 wr->flags = 0; 92 wr->addrs_length = cpu_to_be32(count); 105 wr 180 struct c2wr_nsmr_register_req *wr; local 319 struct c2wr_stag_dealloc_req wr; /* work request */ local [all...] |
H A D | c2_cm.c | 46 struct c2wr_qp_connect_req *wr; /* variable size needs a malloc. */ local 81 wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL); 82 if (!wr) { 93 c2_wr_set_id(wr, CCWR_QP_CONNECT); 94 wr->hdr.context = 0; 95 wr->rnic_handle = c2dev->adapter_handle; 96 wr->qp_handle = qp->adapter_handle; 98 wr->remote_addr = raddr->sin_addr.s_addr; 99 wr->remote_port = raddr->sin_port; 106 wr 138 struct c2wr_ep_listen_create_req wr; local 228 struct c2wr_ep_listen_destroy_req wr; local 296 struct c2wr_cr_accept_req *wr; /* variable length WR */ local 400 struct c2wr_cr_reject_req wr; local [all...] |
H A D | c2_rnic.c | 83 struct c2wr_init_req wr; local 86 memset(&wr, 0, sizeof(wr)); 87 c2_wr_set_id(&wr, CCWR_INIT); 88 wr.hdr.context = 0; 89 wr.hint_count = cpu_to_be64(c2dev->hint_count_dma); 90 wr.q0_host_shared = cpu_to_be64(c2dev->req_vq.shared_dma); 91 wr.q1_host_shared = cpu_to_be64(c2dev->rep_vq.shared_dma); 92 wr.q1_host_msg_pool = cpu_to_be64(c2dev->rep_vq.host_dma); 93 wr 107 struct c2wr_init_req wr; local 126 struct c2wr_rnic_query_req wr; local 215 struct c2wr_rnic_setconfig_req *wr; local 277 struct c2wr_rnic_setconfig_req *wr; local 340 union c2wr wr; local 394 union c2wr wr; local [all...] |
H A D | c2_qp.c | 137 struct c2wr_qp_modify_req wr; local 154 c2_wr_set_id(&wr, CCWR_QP_MODIFY); 155 wr.hdr.context = (unsigned long) vq_req; 156 wr.rnic_handle = c2dev->adapter_handle; 157 wr.qp_handle = qp->adapter_handle; 158 wr.ord = cpu_to_be32(C2_QP_NO_ATTR_CHANGE); 159 wr.ird = cpu_to_be32(C2_QP_NO_ATTR_CHANGE); 160 wr.sq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE); 161 wr.rq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE); 170 wr 256 struct c2wr_qp_modify_req wr; local 305 struct c2wr_qp_destroy_req wr; local 419 struct c2wr_qp_create_req wr; local 762 qp_wr_post(struct c2_mq *q, union c2wr * wr, struct c2_qp *qp, u32 size) argument 795 union c2wr wr; local 949 union c2wr wr; local [all...] |
H A D | c2_ae.c | 150 union c2wr *wr; local 164 wr = c2_mq_consume(mq); 165 if (!wr) 171 event_id = c2_wr_get_id(wr); 172 resource_indicator = be32_to_cpu(wr->ae.ae_generic.resource_type); 174 (void *) (unsigned long) wr->ae.ae_generic.user_context; 176 status = cm_event.status = c2_convert_cm_status(c2_wr_get_result(wr)); 200 (unsigned long long) wr->ae.ae_generic.user_context, 201 be32_to_cpu(wr->ae.ae_generic.resource_type), 202 be32_to_cpu(wr [all...] |
H A D | c2_cq.c | 292 struct c2wr_cq_create_req wr; local 320 memset(&wr, 0, sizeof(wr)); 321 c2_wr_set_id(&wr, CCWR_CQ_CREATE); 322 wr.hdr.context = (unsigned long) vq_req; 323 wr.rnic_handle = c2dev->adapter_handle; 324 wr.msg_size = cpu_to_be32(cq->mq.msg_size); 325 wr.depth = cpu_to_be32(cq->mq.q_size); 326 wr.shared_ht = cpu_to_be64(cq->mq.shared_dma); 327 wr 393 struct c2wr_cq_destroy_req wr; local [all...] |
/drivers/media/dvb-frontends/ |
H A D | dib3000mb.c | 152 wr(DIB3000MB_REG_LOCK1_MASK, DIB3000MB_LOCK1_SEARCH_4); 158 wr(DIB3000MB_REG_FFT, DIB3000_TRANSMISSION_MODE_2K); 162 wr(DIB3000MB_REG_FFT, DIB3000_TRANSMISSION_MODE_8K); 175 wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_32); 179 wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_16); 183 wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_8); 187 wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_4); 200 wr(DIB3000MB_REG_DDS_INV, DIB3000_DDS_INVERSION_OFF); 207 wr(DIB3000MB_REG_DDS_INV, DIB3000_DDS_INVERSION_ON); 217 wr(DIB3000MB_REG_QA [all...] |
/drivers/infiniband/hw/cxgb3/ |
H A D | iwch_qp.c | 42 static int build_rdma_send(union t3_wr *wqe, struct ib_send_wr *wr, argument 48 switch (wr->opcode) { 50 if (wr->send_flags & IB_SEND_SOLICITED) 57 if (wr->send_flags & IB_SEND_SOLICITED) 61 wqe->send.rem_stag = cpu_to_be32(wr->ex.invalidate_rkey); 66 if (wr->num_sge > T3_MAX_SGE) 72 for (i = 0; i < wr->num_sge; i++) { 73 if ((plen + wr->sg_list[i].length) < plen) 76 plen += wr->sg_list[i].length; 77 wqe->send.sgl[i].stag = cpu_to_be32(wr 87 build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr, u8 *flit_cnt) argument 128 build_rdma_read(union t3_wr *wqe, struct ib_send_wr *wr, u8 *flit_cnt) argument 149 build_fastreg(union t3_wr *wqe, struct ib_send_wr *wr, u8 *flit_cnt, int *wr_cnt, struct t3_wq *wq) argument 191 build_inv_stag(union t3_wr *wqe, struct ib_send_wr *wr, u8 *flit_cnt) argument 247 build_rdma_recv(struct iwch_qp *qhp, union t3_wr *wqe, struct ib_recv_wr *wr) argument 287 build_zero_stag_recv(struct iwch_qp *qhp, union t3_wr *wqe, struct ib_recv_wr *wr) argument 350 iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, struct ib_send_wr **bad_wr) argument 465 iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, struct ib_recv_wr **bad_wr) argument [all...] |
/drivers/infiniband/hw/qib/ |
H A D | qib_keys.c | 337 int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr) argument 342 u32 rkey = wr->wr.fast_reg.rkey; 359 if (wr->wr.fast_reg.page_list_len > mr->max_segs) 362 ps = 1UL << wr->wr.fast_reg.page_shift; 363 if (wr->wr.fast_reg.length > ps * wr [all...] |
H A D | qib_ud.c | 62 qp = qib_lookup_qpn(ibp, swqe->wr.wr.ud.remote_qpn); 79 ah_attr = &to_iah(swqe->wr.wr.ud.ah)->attr; 109 qkey = (int)swqe->wr.wr.ud.remote_qkey < 0 ? 110 sqp->qkey : swqe->wr.wr.ud.remote_qkey; 133 if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) { 135 wc.ex.imm_data = swqe->wr [all...] |
H A D | qib_srq.c | 43 * @wr: the list of work requests to post 48 int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr, argument 56 for (; wr; wr = wr->next) { 61 if ((unsigned) wr->num_sge > srq->rq.max_sge) { 62 *bad_wr = wr; 74 *bad_wr = wr; 80 wqe->wr_id = wr->wr_id; 81 wqe->num_sge = wr [all...] |
H A D | qib_ruc.c | 429 sqp->s_sge.num_sge = wqe->wr.num_sge; 431 switch (wqe->wr.opcode) { 434 wc.ex.imm_data = wqe->wr.ex.imm_data; 448 wc.ex.imm_data = wqe->wr.ex.imm_data; 461 wqe->wr.wr.rdma.remote_addr, 462 wqe->wr.wr.rdma.rkey, 474 wqe->wr.wr [all...] |
/drivers/infiniband/hw/mthca/ |
H A D | mthca_qp.c | 1479 int ind, struct ib_send_wr *wr, 1488 mthca_ah_grh_present(to_mah(wr->wr.ud.ah)), 0, 1491 err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header); 1502 switch (wr->opcode) { 1510 sqp->ud_header.immediate_data = wr->ex.imm_data; 1519 sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); 1525 wr->wr 1478 build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, int ind, struct ib_send_wr *wr, struct mthca_mlx_seg *mlx, struct mthca_data_seg *data) argument 1571 set_atomic_seg(struct mthca_atomic_seg *aseg, struct ib_send_wr *wr) argument 1584 set_tavor_ud_seg(struct mthca_tavor_ud_seg *useg, struct ib_send_wr *wr) argument 1594 set_arbel_ud_seg(struct mthca_arbel_ud_seg *useg, struct ib_send_wr *wr) argument 1602 mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, struct ib_send_wr **bad_wr) argument 1805 mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, struct ib_recv_wr **bad_wr) argument 1916 mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, struct ib_send_wr **bad_wr) argument 2156 mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, struct ib_recv_wr **bad_wr) argument [all...] |
H A D | mthca_srq.c | 475 int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, argument 494 for (nreq = 0; wr; wr = wr->next) { 502 *bad_wr = wr; 514 if (unlikely(wr->num_sge > srq->max_gs)) { 516 *bad_wr = wr; 521 for (i = 0; i < wr->num_sge; ++i) { 522 mthca_set_data_seg(wqe, wr->sg_list + i); 532 srq->wrid[ind] = wr 575 mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, struct ib_recv_wr **bad_wr) argument [all...] |
/drivers/infiniband/hw/mlx5/ |
H A D | qp.c | 1765 struct ib_send_wr *wr) 1767 memcpy(&dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof(struct mlx5_av)); 1768 dseg->av.dqp_dct = cpu_to_be32(wr->wr.ud.remote_qpn | MLX5_EXTENDED_UD_AV); 1769 dseg->av.key.qkey.qkey = cpu_to_be32(wr->wr.ud.remote_qkey); 1826 struct ib_send_wr *wr, int li) 1837 umr->klm_octowords = get_klm_octo(wr->wr 1764 set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg, struct ib_send_wr *wr) argument 1825 set_frwr_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr, struct ib_send_wr *wr, int li) argument 1841 set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr, struct ib_send_wr *wr) argument 1883 set_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr, int li, int *writ) argument 1903 set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr) argument 1920 set_frwr_pages(struct mlx5_wqe_data_seg *dseg, struct ib_send_wr *wr, struct mlx5_core_dev *mdev, struct mlx5_ib_pd *pd, int writ) argument 1938 send_ieth(struct ib_send_wr *wr) argument 1970 set_data_inl_seg(struct mlx5_ib_qp *qp, struct ib_send_wr *wr, void *wqe, int *sz) argument 2116 set_sig_data_segment(struct ib_send_wr *wr, struct mlx5_ib_qp *qp, void **seg, int *size) argument 2217 set_sig_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr, u32 nelements, u32 length, u32 pdn) argument 2237 set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr, struct ib_send_wr *wr, u32 nelements) argument 2249 set_sig_umr_wr(struct ib_send_wr *wr, struct mlx5_ib_qp *qp, void **seg, int *size) argument 2325 set_frwr_li_wr(void **seg, struct ib_send_wr *wr, int *size, struct mlx5_core_dev *mdev, struct mlx5_ib_pd *pd, struct mlx5_ib_qp *qp) argument 2395 get_fence(u8 fence, struct ib_send_wr *wr) argument 2412 begin_wqe(struct mlx5_ib_qp *qp, void **seg, struct mlx5_wqe_ctrl_seg **ctrl, struct ib_send_wr *wr, int *idx, int *size, int nreq) argument 2465 mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, struct ib_send_wr **bad_wr) argument 2768 mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, struct ib_recv_wr **bad_wr) argument [all...] |
H A D | mr.c | 671 static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr, argument 683 wr->next = NULL; 684 wr->send_flags = 0; 685 wr->sg_list = sg; 687 wr->num_sge = 1; 689 wr->num_sge = 0; 691 wr->opcode = MLX5_IB_WR_UMR; 692 wr->wr.fast_reg.page_list_len = n; 693 wr 701 prep_umr_unreg_wqe(struct mlx5_ib_dev *dev, struct ib_send_wr *wr, u32 key) argument 739 struct ib_send_wr wr, *bad; local 942 struct ib_send_wr wr, *bad; local [all...] |
/drivers/infiniband/hw/mlx4/ |
H A D | qp.c | 1982 struct ib_send_wr *wr, 1989 struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah); 1997 if (wr->opcode != IB_WR_SEND) 2002 for (i = 0; i < wr->num_sge; ++i) 2003 send_size += wr->sg_list[i].length; 2028 sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); 2032 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); 2104 static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, argument 1981 build_sriov_qp0_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, void *wqe, unsigned *mlx_seg_len) argument 2351 set_fmr_seg(struct mlx4_wqe_fmr_seg *fseg, struct ib_send_wr *wr) argument 2372 set_bind_seg(struct mlx4_wqe_bind_seg *bseg, struct ib_send_wr *wr) argument 2404 set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg, struct ib_send_wr *wr) argument 2419 set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg, struct ib_send_wr *wr) argument 2428 set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg, struct ib_send_wr *wr) argument 2438 set_tunnel_datagram_seg(struct mlx4_ib_dev *dev, struct mlx4_wqe_datagram_seg *dseg, struct ib_send_wr *wr, enum mlx4_ib_qp_type qpt) argument 2462 build_tunnel_header(struct ib_send_wr *wr, void *wqe, unsigned *mlx_seg_len) argument 2545 build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr, struct mlx4_ib_qp *qp, unsigned *lso_seg_len, __be32 *lso_hdr_sz, __be32 *blh) argument 2566 send_ieth(struct ib_send_wr *wr) argument 2588 mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, struct ib_send_wr **bad_wr) argument 2895 mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, struct ib_recv_wr **bad_wr) argument [all...] |
/drivers/infiniband/hw/cxgb4/ |
H A D | qp.c | 365 struct ib_send_wr *wr, int max, u32 *plenp) 373 for (i = 0; i < wr->num_sge; i++) { 374 if ((plen + wr->sg_list[i].length) > max) 376 srcp = (u8 *)(unsigned long)wr->sg_list[i].addr; 377 plen += wr->sg_list[i].length; 378 rem = wr->sg_list[i].length; 435 struct ib_send_wr *wr, u8 *len16) 441 if (wr->num_sge > T4_MAX_SEND_SGE) 443 switch (wr->opcode) { 445 if (wr 364 build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp, struct ib_send_wr *wr, int max, u32 *plenp) argument 434 build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) argument 501 build_rdma_write(struct t4_sq *sq, union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) argument 544 build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) argument 573 build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe, struct ib_recv_wr *wr, u8 *len16) argument 588 build_fastreg(struct t4_sq *sq, union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16, u8 t5dev) argument 660 build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) argument 724 c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, struct ib_send_wr **bad_wr) argument 853 c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, struct ib_recv_wr **bad_wr) argument [all...] |
/drivers/infiniband/hw/ipath/ |
H A D | ipath_ruc.c | 330 sqp->s_sge.num_sge = wqe->wr.num_sge; 332 switch (wqe->wr.opcode) { 335 wc.ex.imm_data = wqe->wr.ex.imm_data; 346 wc.ex.imm_data = wqe->wr.ex.imm_data; 356 wqe->wr.wr.rdma.remote_addr, 357 wqe->wr.wr.rdma.rkey, 366 wqe->wr.wr [all...] |
H A D | ipath_ud.c | 68 qp = ipath_lookup_qpn(&dev->qp_table, swqe->wr.wr.ud.remote_qpn); 80 ((int) swqe->wr.wr.ud.remote_qkey < 0 ? 81 sqp->qkey : swqe->wr.wr.ud.remote_qkey) != qp->qkey)) { 96 if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) { 98 wc.ex.imm_data = swqe->wr.ex.imm_data; 178 ah_attr = &to_iah(swqe->wr.wr [all...] |
H A D | ipath_rc.c | 50 ss->num_sge = wqe->wr.num_sge; 294 if ((wqe->wr.send_flags & IB_SEND_FENCE) && 310 switch (wqe->wr.opcode) { 326 if (wqe->wr.opcode == IB_WR_SEND) 331 ohdr->u.imm_data = wqe->wr.ex.imm_data; 334 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 353 cpu_to_be64(wqe->wr.wr.rdma.remote_addr); 355 cpu_to_be32(wqe->wr.wr [all...] |
H A D | ipath_srq.c | 43 * @wr: the list of work requests to post 48 int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr, argument 56 for (; wr; wr = wr->next) { 61 if ((unsigned) wr->num_sge > srq->rq.max_sge) { 62 *bad_wr = wr; 74 *bad_wr = wr; 80 wqe->wr_id = wr->wr_id; 81 wqe->num_sge = wr [all...] |
H A D | ipath_uc.c | 100 qp->s_sge.num_sge = wqe->wr.num_sge; 102 switch (wqe->wr.opcode) { 110 if (wqe->wr.opcode == IB_WR_SEND) 116 ohdr->u.imm_data = wqe->wr.ex.imm_data; 119 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 129 cpu_to_be64(wqe->wr.wr.rdma.remote_addr); 131 cpu_to_be32(wqe->wr.wr.rdma.rkey); 139 if (wqe->wr [all...] |
/drivers/infiniband/ulp/isert/ |
H A D | ib_isert.c | 49 struct isert_rdma_wr *wr); 54 struct isert_rdma_wr *wr); 1561 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; local 1565 if (wr->data.sg) { 1567 isert_unmap_data_buf(isert_conn, &wr->data); 1570 if (wr->send_wr) { 1572 kfree(wr->send_wr); 1573 wr->send_wr = NULL; 1576 if (wr->ib_sge) { 1578 kfree(wr 1586 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; local 1760 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; local 1786 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; local 1868 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; local 1903 struct isert_rdma_wr *wr; local 1962 struct isert_rdma_wr *wr; local 1998 struct isert_rdma_wr *wr; local 2415 isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, struct isert_rdma_wr *wr) argument 2548 struct ib_send_wr *bad_wr, *wr = NULL; local 2690 struct ib_send_wr *bad_wr, *wr = NULL; local 2756 isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, struct isert_rdma_wr *wr) argument 2864 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; local 2917 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; local [all...] |
/drivers/infiniband/hw/ocrdma/ |
H A D | ocrdma_verbs.c | 1820 struct ib_send_wr *wr) 1824 struct ocrdma_ah *ah = get_ocrdma_ah(wr->wr.ud.ah); 1826 ud_hdr->rsvd_dest_qpn = wr->wr.ud.remote_qpn; 1830 ud_hdr->qkey = wr->wr.ud.remote_qkey; 1864 struct ib_send_wr *wr, u32 wqe_size) 1869 if (wr->send_flags & IB_SEND_INLINE && qp->qp_type != IB_QPT_UD) { 1870 hdr->total_len = ocrdma_sglist_len(wr 1818 ocrdma_build_ud_hdr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, struct ib_send_wr *wr) argument 1861 ocrdma_build_inline_sges(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, struct ocrdma_sge *sge, struct ib_send_wr *wr, u32 wqe_size) argument 1901 ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, struct ib_send_wr *wr) argument 1920 ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, struct ib_send_wr *wr) argument 1938 ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, struct ib_send_wr *wr) argument 1957 build_frmr_pbes(struct ib_send_wr *wr, struct ocrdma_pbl *pbl_tbl, struct ocrdma_hw_mr *hwmr) argument 2002 ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, struct ib_send_wr *wr) argument 2052 ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, struct ib_send_wr **bad_wr) argument 2164 ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe, struct ib_recv_wr *wr, u16 tag) argument 2185 ocrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, struct ib_recv_wr **bad_wr) argument 2257 ocrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, struct ib_recv_wr **bad_wr) argument [all...] |