Searched defs:ibdev (Results 1 - 25 of 53) sorted by relevance

123

/drivers/infiniband/hw/mlx4/
H A Dah.c74 struct mlx4_ib_dev *ibdev = to_mdev(pd->device); local
75 struct mlx4_dev *dev = ibdev->dev;
H A Dcm.c135 static union ib_gid gid_from_req_msg(struct ib_device *ibdev, struct ib_mad *mad) argument
144 id_map_find_by_sl_id(struct ib_device *ibdev, u32 slave_id, u32 sl_cm_id) argument
146 struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map;
192 static void id_map_find_del(struct ib_device *ibdev, int pv_cm_id) argument
194 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
202 found_ent = id_map_find_by_sl_id(ibdev, ent->slave_id, ent->sl_cm_id);
210 static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new) argument
212 struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map;
218 ent = id_map_find_by_sl_id(ibdev, slave_id, sl_cm_id);
243 id_map_alloc(struct ib_device *ibdev, in argument
284 id_map_get(struct ib_device *ibdev, int *pv_cm_id, int sl_cm_id, int slave_id) argument
301 schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id) argument
317 mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id, struct ib_mad *mad) argument
358 mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave, struct ib_mad *mad) argument
[all...]
H A Dalias_GUID.c346 static int set_guid_rec(struct ib_device *ibdev, argument
351 struct mlx4_ib_dev *dev = to_mdev(ibdev);
360 err = __mlx4_ib_query_port(ibdev, port, &attr, 1);
401 ibdev, port, &guid_info_rec,
H A Dcq.c169 struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector, argument
173 struct mlx4_ib_dev *dev = to_mdev(ibdev);
229 vector = dev->eq_table[vector % ibdev->num_comp_vectors];
H A Dmr.c387 struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device *ibdev, argument
390 struct mlx4_ib_dev *dev = to_mdev(ibdev);
/drivers/infiniband/hw/mlx5/
H A Dmad.c60 int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, argument
94 err = mlx5_MAD_IFC(to_mdev(ibdev),
/drivers/infiniband/hw/ehca/
H A Dehca_hca.c53 int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props) argument
56 struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
182 int ehca_query_port(struct ib_device *ibdev, argument
187 struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
281 int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) argument
288 shca = container_of(ibdev, struct ehca_shca, ib_device);
315 int ehca_query_gid(struct ib_device *ibdev, u8 port, argument
320 struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
356 int ehca_modify_port(struct ib_device *ibdev, argument
366 shca = container_of(ibdev, struc
[all...]
H A Dehca_sqp.c142 static int ehca_process_perf(struct ib_device *ibdev, u8 port_num, argument
153 container_of(ibdev, struct ehca_shca, ib_device);
156 ehca_dbg(ibdev, "method=%x", in_perf->mad_hdr.method);
161 ehca_warn(ibdev, "Unsupported class_version=%x",
185 ehca_query_pkey(ibdev, port_num, in_wc->pkey_index,
198 ehca_query_gid(ibdev, port_num, 0,
201 ehca_dbg(ibdev, "ehca_pma_lid=%x ehca_pma_qp=%x",
219 int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, argument
225 if (!port_num || port_num > ibdev->phys_port_cnt || !in_wc)
232 ehca_dbg(ibdev, "port_nu
[all...]
/drivers/infiniband/hw/ipath/
H A Dipath_cq.c190 * @ibdev: the device this completion queue is attached to
200 struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, int comp_vector, argument
204 struct ipath_ibdev *dev = to_idev(ibdev);
H A Dipath_mad.c59 struct ib_device *ibdev)
64 memcpy(smp->data, ibdev->node_desc, sizeof(smp->data));
85 struct ib_device *ibdev, u8 port)
88 struct ipath_devdata *dd = to_idev(ibdev)->dd;
103 nip->num_ports = ibdev->phys_port_cnt;
105 nip->sys_guid = to_idev(ibdev)->sys_image_guid;
123 struct ib_device *ibdev)
137 __be64 g = to_idev(ibdev)->dd->ipath_guid;
236 struct ib_device *ibdev, u8 port)
246 if (be32_to_cpu(smp->attr_mod) > ibdev
58 recv_subn_get_nodedescription(struct ib_smp *smp, struct ib_device *ibdev) argument
84 recv_subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev, u8 port) argument
122 recv_subn_get_guidinfo(struct ib_smp *smp, struct ib_device *ibdev) argument
235 recv_subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev, u8 port) argument
360 recv_subn_get_pkeytable(struct ib_smp *smp, struct ib_device *ibdev) argument
384 recv_subn_set_guidinfo(struct ib_smp *smp, struct ib_device *ibdev) argument
417 recv_subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, u8 port) argument
781 recv_subn_set_pkeytable(struct ib_smp *smp, struct ib_device *ibdev, u8 port) argument
834 recv_pma_get_portsamplescontrol(struct ib_pma_mad *pmp, struct ib_device *ibdev, u8 port) argument
883 recv_pma_set_portsamplescontrol(struct ib_pma_mad *pmp, struct ib_device *ibdev, u8 port) argument
970 recv_pma_get_portsamplesresult(struct ib_pma_mad *pmp, struct ib_device *ibdev) argument
995 recv_pma_get_portsamplesresult_ext(struct ib_pma_mad *pmp, struct ib_device *ibdev) argument
1022 recv_pma_get_portcounters(struct ib_pma_mad *pmp, struct ib_device *ibdev, u8 port) argument
1121 recv_pma_get_portcounters_ext(struct ib_pma_mad *pmp, struct ib_device *ibdev, u8 port) argument
1158 recv_pma_set_portcounters(struct ib_pma_mad *pmp, struct ib_device *ibdev, u8 port) argument
1221 recv_pma_set_portcounters_ext(struct ib_pma_mad *pmp, struct ib_device *ibdev, u8 port) argument
1259 process_subn(struct ib_device *ibdev, int mad_flags, u8 port_num, struct ib_mad *in_mad, struct ib_mad *out_mad) argument
1391 process_perf(struct ib_device *ibdev, u8 port_num, struct ib_mad *in_mad, struct ib_mad *out_mad) argument
1492 ipath_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, struct ib_wc *in_wc, struct ib_grh *in_grh, struct ib_mad *in_mad, struct ib_mad *out_mad) argument
[all...]
/drivers/infiniband/hw/mthca/
H A Dmthca_mad.c105 static void smp_snoop(struct ib_device *ibdev, argument
120 mthca_update_rate(to_mdev(ibdev), port_num);
121 update_sm_ah(to_mdev(ibdev), port_num,
125 event.device = ibdev;
140 event.device = ibdev;
195 int mthca_process_mad(struct ib_device *ibdev, argument
211 forward_trap(to_mdev(ibdev), port_num, in_mad);
248 !ib_query_port(ibdev, port_num, &pattr))
251 err = mthca_MAD_IFC(to_mdev(ibdev),
258 mthca_err(to_mdev(ibdev), "MAD_IF
[all...]
H A Dmthca_provider.c60 static int mthca_query_device(struct ib_device *ibdev, argument
66 struct mthca_dev *mdev = to_mdev(ibdev);
133 static int mthca_query_port(struct ib_device *ibdev, argument
151 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
163 props->gid_tbl_len = to_mdev(ibdev)->limits.gid_table_len;
165 props->pkey_tbl_len = to_mdev(ibdev)->limits.pkey_table_len;
182 static int mthca_modify_device(struct ib_device *ibdev, argument
190 if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
192 memcpy(ibdev->node_desc, props->node_desc, 64);
193 mutex_unlock(&to_mdev(ibdev)
199 mthca_modify_port(struct ib_device *ibdev, u8 port, int port_modify_mask, struct ib_port_modify *props) argument
228 mthca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) argument
257 mthca_query_gid(struct ib_device *ibdev, u8 port, int index, union ib_gid *gid) argument
297 mthca_alloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata) argument
371 mthca_alloc_pd(struct ib_device *ibdev, struct ib_ucontext *context, struct ib_udata *udata) argument
644 mthca_create_cq(struct ib_device *ibdev, int entries, int comp_vector, struct ib_ucontext *context, struct ib_udata *udata) argument
[all...]
/drivers/infiniband/hw/ocrdma/
H A Docrdma_ah.c114 status = ocrdma_query_gid(&dev->ibdev, 1, attr->grh.sgid_index, &sgid);
190 int ocrdma_process_mad(struct ib_device *ibdev, argument
/drivers/infiniband/hw/usnic/
H A Dusnic_ib.h82 struct usnic_ib_dev *to_usdev(struct ib_device *ibdev) argument
84 return container_of(ibdev, struct usnic_ib_dev, ib_dev);
H A Dusnic_ib_verbs.c250 int usnic_ib_query_device(struct ib_device *ibdev, argument
253 struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
306 int usnic_ib_query_port(struct ib_device *ibdev, u8 port, argument
309 struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
389 int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index, argument
393 struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
408 int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, argument
418 struct ib_pd *usnic_ib_alloc_pd(struct ib_device *ibdev, argument
438 pd, context, ibdev->name);
573 struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev, in argument
634 usnic_ib_alloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata) argument
[all...]
/drivers/infiniband/hw/cxgb3/
H A Diwch.h105 struct ib_device ibdev; member in struct:iwch_dev
117 static inline struct iwch_dev *to_iwch_dev(struct ib_device *ibdev) argument
119 return container_of(ibdev, struct iwch_dev, ibdev);
H A Diwch_provider.c85 static int iwch_process_mad(struct ib_device *ibdev, argument
109 static struct ib_ucontext *iwch_alloc_ucontext(struct ib_device *ibdev, argument
113 struct iwch_dev *rhp = to_iwch_dev(ibdev);
115 PDBG("%s ibdev %p\n", __func__, ibdev);
141 static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int vector, argument
153 PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
154 rhp = to_iwch_dev(ibdev);
415 static struct ib_pd *iwch_allocate_pd(struct ib_device *ibdev, argument
423 PDBG("%s ibdev
1105 iwch_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 * pkey) argument
1113 iwch_query_gid(struct ib_device *ibdev, u8 port, int index, union ib_gid *gid) argument
1148 iwch_query_device(struct ib_device *ibdev, struct ib_device_attr *props) argument
1181 iwch_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props) argument
1279 iwch_get_mib(struct ib_device *ibdev, union rdma_protocol_stats *stats) argument
[all...]
/drivers/infiniband/hw/qib/
H A Dqib_cq.c205 * @ibdev: the device this completion queue is attached to
215 struct ib_cq *qib_create_cq(struct ib_device *ibdev, int entries, argument
219 struct qib_ibdev *dev = to_idev(ibdev);
H A Dqib_mr.c342 qib_alloc_fast_reg_page_list(struct ib_device *ibdev, int page_list_len) argument
H A Dqib_sysfs.c505 container_of(device, struct qib_ibdev, ibdev.dev);
514 container_of(device, struct qib_ibdev, ibdev.dev);
536 container_of(device, struct qib_ibdev, ibdev.dev);
548 container_of(device, struct qib_ibdev, ibdev.dev);
560 container_of(device, struct qib_ibdev, ibdev.dev);
575 container_of(device, struct qib_ibdev, ibdev.dev);
586 container_of(device, struct qib_ibdev, ibdev.dev);
600 container_of(device, struct qib_ibdev, ibdev.dev);
618 container_of(device, struct qib_ibdev, ibdev.dev);
643 container_of(device, struct qib_ibdev, ibdev
702 qib_create_port_files(struct ib_device *ibdev, u8 port_num, struct kobject *kobj) argument
[all...]
/drivers/infiniband/hw/cxgb4/
H A Dcq.c862 struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, argument
874 PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
876 rhp = to_c4iw_dev(ibdev);
H A Dprovider.c82 static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags, argument
104 static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev, argument
108 struct c4iw_dev *rhp = to_c4iw_dev(ibdev);
114 PDBG("%s ibdev %p\n", __func__, ibdev);
247 static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev, argument
255 PDBG("%s ibdev %p\n", __func__, ibdev);
256 rhp = (struct c4iw_dev *) ibdev;
282 static int c4iw_query_pkey(struct ib_device *ibdev, u argument
290 c4iw_query_gid(struct ib_device *ibdev, u8 port, int index, union ib_gid *gid) argument
304 c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *props) argument
339 c4iw_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props) argument
440 c4iw_get_mib(struct ib_device *ibdev, union rdma_protocol_stats *stats) argument
[all...]
/drivers/infiniband/ulp/iser/
H A Diser_memory.c176 struct ib_device *ibdev, u64 *pages,
191 start_addr = ib_sg_dma_address(ibdev, sg);
194 dma_len = ib_sg_dma_len(ibdev, sg);
229 struct ib_device *ibdev)
239 start_addr = ib_sg_dma_address(ibdev, sgl);
249 end_addr = start_addr + ib_sg_dma_len(ibdev, sg);
250 start_addr = ib_sg_dma_address(ibdev, next_sg);
268 struct ib_device *ibdev)
277 i, (unsigned long)ib_sg_dma_address(ibdev, sg),
279 sg->length, ib_sg_dma_len(ibdev, s
175 iser_sg_to_page_vec(struct iser_data_buf *data, struct ib_device *ibdev, u64 *pages, int *offset, int *data_size) argument
228 iser_data_buf_aligned_len(struct iser_data_buf *data, struct ib_device *ibdev) argument
267 iser_data_buf_dump(struct iser_data_buf *data, struct ib_device *ibdev) argument
292 iser_page_vec_build(struct iser_data_buf *data, struct iser_page_vec *page_vec, struct ib_device *ibdev) argument
344 fall_to_bounce_buf(struct iscsi_iser_task *iser_task, struct ib_device *ibdev, struct iser_data_buf *mem, struct iser_data_buf *mem_copy, enum iser_data_dir cmd_dir, int aligned_len) argument
382 struct ib_device *ibdev = device->ib_device; local
611 struct ib_device *ibdev = device->ib_device; local
702 struct ib_device *ibdev = device->ib_device; local
[all...]
/drivers/infiniband/core/
H A Dsysfs.c45 struct ib_device *ibdev; member in struct:ib_port
102 ret = ib_query_port(p->ibdev, p->port_num, &attr);
117 ret = ib_query_port(p->ibdev, p->port_num, &attr);
131 ret = ib_query_port(p->ibdev, p->port_num, &attr);
144 ret = ib_query_port(p->ibdev, p->port_num, &attr);
157 ret = ib_query_port(p->ibdev, p->port_num, &attr);
170 ret = ib_query_port(p->ibdev, p->port_num, &attr);
185 ret = ib_query_port(p->ibdev, p->port_num, &attr);
232 ret = ib_query_port(p->ibdev, p->port_num, &attr);
251 switch (rdma_port_get_link_layer(p->ibdev,
[all...]
/drivers/infiniband/hw/amso1100/
H A Dc2_provider.c66 static int c2_query_device(struct ib_device *ibdev, argument
69 struct c2_dev *c2dev = to_c2dev(ibdev);
77 static int c2_query_port(struct ib_device *ibdev, argument
102 static int c2_query_pkey(struct ib_device *ibdev, argument
110 static int c2_query_gid(struct ib_device *ibdev, u8 port, argument
113 struct c2_dev *c2dev = to_c2dev(ibdev);
125 static struct ib_ucontext *c2_alloc_ucontext(struct ib_device *ibdev, argument
151 static struct ib_pd *c2_alloc_pd(struct ib_device *ibdev, argument
164 err = c2_pd_alloc(to_c2dev(ibdev), !context, pd);
172 c2_pd_free(to_c2dev(ibdev), p
289 c2_create_cq(struct ib_device *ibdev, int entries, int vector, struct ib_ucontext *context, struct ib_udata *udata) argument
582 c2_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, struct ib_wc *in_wc, struct ib_grh *in_grh, struct ib_mad *in_mad, struct ib_mad *out_mad) argument
[all...]

Completed in 6716 milliseconds

123