Searched refs:ib_dev (Results 1 - 25 of 37) sorted by path

12

/drivers/infiniband/core/
H A Ducm.c63 struct ib_device *ib_dev; member in struct:ib_ucm_device
491 ctx->cm_id = ib_create_cm_id(file->device->ib_dev,
1224 return sprintf(buf, "%s\n", ucm_dev->ib_dev->name);
1264 ucm_dev->ib_dev = device;
H A Duser_mad.c95 struct ib_device *ib_dev; member in struct:ib_umad_port
628 if (!file->port->ib_dev) {
677 agent = ib_register_mad_agent(file->port->ib_dev, file->port->port_num,
730 if (!file->port->ib_dev) {
793 agent = ib_register_mad_agent(file->port->ib_dev, file->port->port_num,
932 if (!port->ib_dev)
1038 ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props);
1054 ib_modify_port(port->ib_dev, port->port_num, 0, &props);
1072 if (port->ib_dev)
1073 ret = ib_modify_port(port->ib_dev, por
[all...]
H A Duverbs.h92 struct ib_device *ib_dev; member in struct:ib_uverbs_device
H A Duverbs_cmd.c290 struct ib_device *ibdev = file->device->ib_dev;
351 INIT_IB_EVENT_HANDLER(&file->event_handler, file->device->ib_dev,
396 ret = ib_query_device(file->device->ib_dev, &attr);
403 resp.node_guid = file->device->ib_dev->node_guid;
441 resp.phys_port_cnt = file->device->ib_dev->phys_port_cnt;
465 ret = ib_query_port(file->device->ib_dev, cmd.port_num, &attr);
490 resp.link_layer = rdma_port_get_link_layer(file->device->ib_dev,
528 pd = file->device->ib_dev->alloc_pd(file->device->ib_dev,
535 pd->device = file->device->ib_dev;
[all...]
H A Duverbs_main.c307 module_put(file->device->ib_dev->owner);
629 if (!(file->device->ib_dev->uverbs_cmd_mask & (1ull << command)))
662 if (!(file->device->ib_dev->uverbs_ex_cmd_mask & (1ull << command)))
722 return file->device->ib_dev->mmap(file->ucontext, vma);
747 if (!try_module_get(dev->ib_dev->owner)) {
769 module_put(dev->ib_dev->owner);
821 return sprintf(buf, "%s\n", dev->ib_dev->name);
833 return sprintf(buf, "%d\n", dev->ib_dev->uverbs_abi_ver);
905 uverbs_dev->ib_dev = device;
/drivers/infiniband/hw/ehca/
H A Dehca_eq.c62 struct ib_device *ib_dev = &shca->ib_device; local
69 ehca_err(ib_dev, "Invalid EQ type %x. eq=%p", type, eq);
73 ehca_err(ib_dev, "EQ length must not be zero. eq=%p", eq);
86 ehca_err(ib_dev, "Can't allocate EQ/NEQ. eq=%p", eq);
93 ehca_err(ib_dev, "Can't allocate EQ pages eq=%p", eq);
131 ehca_err(ib_dev, "Can't map interrupt handler.");
139 ehca_err(ib_dev, "Can't map interrupt handler.");
H A Dehca_qp.c291 struct ib_device *ib_dev = &shca->ib_device; local
310 ehca_err(ib_dev, "Cannot allocate page for queue. ipz_rc=%i",
319 ehca_err(ib_dev, "ipz_qpageit_get_inc() "
333 ehca_err(ib_dev, "hipz_qp_register_rpage() "
340 ehca_err(ib_dev, "ipz_qpageit_get_inc() "
347 ehca_err(ib_dev, "hipz_qp_register_rpage() "
H A Dehca_tools.h69 #define ehca_dbg(ib_dev, format, arg...) \
72 dev_printk(KERN_DEBUG, (ib_dev)->dma_device, \
78 #define ehca_info(ib_dev, format, arg...) \
79 dev_info((ib_dev)->dma_device, "PU%04x EHCA_INFO:%s " format "\n", \
82 #define ehca_warn(ib_dev, format, arg...) \
83 dev_warn((ib_dev)->dma_device, "PU%04x EHCA_WARN:%s " format "\n", \
86 #define ehca_err(ib_dev, format, arg...) \
87 dev_err((ib_dev)->dma_device, "PU%04x EHCA_ERR:%s " format "\n", \
90 /* use this one only if no ib_dev available */
/drivers/infiniband/hw/mlx4/
H A Dalias_GUID.c249 mlx4_ib_warn(&dev->ib_dev, "%s:Record num %d in "
272 mlx4_ib_warn(&dev->ib_dev, "%s: Failed to set"
548 set_guid_rec(&dev->ib_dev, rec->port + 1, rec->block_num,
627 if (dev->ib_dev.query_gid(&dev->ib_dev , i, 0, &gid)) {
H A Dcm.c181 found_ent = id_map_find_by_sl_id(&dev->ib_dev, ent->slave_id, ent->sl_cm_id);
H A Dmad.c420 return ib_find_cached_pkey(&dev->ib_dev, port, pkey, ix);
430 ret = ib_get_cached_pkey(&dev->ib_dev, port, pkey_ix, &slot_pkey);
489 ret = ib_get_cached_pkey(&dev->ib_dev, port, wc->pkey_index, &cached_pkey);
532 ib_dma_sync_single_for_cpu(&dev->ib_dev,
574 ib_dma_sync_single_for_device(&dev->ib_dev,
888 ll = rdma_port_get_link_layer(&dev->ib_dev, p + 1);
891 agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
979 mlx4_ib_warn(&dev->ib_dev, "failed to allocate memory for guid info mads\n");
1001 mlx4_ib_warn(&dev->ib_dev, "Failed in get GUID INFO MAD_IFC\n");
1022 struct mlx4_ib_dev *dev = ew->ib_dev;
[all...]
H A Dmain.c1356 memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
1365 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
1377 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1385 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1395 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1403 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1488 if (mlx4_ib_port_link_layer(&gw->dev->ib_dev, gw->port) ==
1803 if (rdma_port_get_link_layer(&ibdev->ib_dev, i) ==
2016 ibdev->ib_dev.num_comp_vectors = total_eqs;
2028 ibdev->ib_dev
[all...]
H A Dmcg.c237 if (ib_find_cached_pkey(&dev->ib_dev, ctx->port, IB_DEFAULT_PKEY_FULL, &wc.pkey_index))
H A Dmlx4_ib.h405 struct ib_device *ib_dev; member in struct:mlx4_ib_demux_pv_ctx
415 struct ib_device *ib_dev; member in struct:mlx4_ib_demux_ctx
498 struct ib_device ib_dev; member in struct:mlx4_ib_dev
534 struct mlx4_ib_dev *ib_dev; member in struct:ib_event_work
547 return container_of(ibdev, struct mlx4_ib_dev, ib_dev);
H A Dqp.c1026 free_proxy_bufs(&dev->ib_dev, qp);
1246 int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port) ==
1431 rdma_port_get_link_layer(&dev->ib_dev, qp->port) ==
1662 if (rdma_port_get_link_layer(&dev->ib_dev, qp->port) ==
1692 &dev->ib_dev, qp->port) ==
1900 ll = rdma_port_get_link_layer(&dev->ib_dev, port);
1923 (rdma_port_get_link_layer(&dev->ib_dev, attr->port_num) !=
1986 struct ib_device *ib_dev = &mdev->ib_dev; local
2029 ib_get_cached_pkey(ib_dev, sq
2107 struct ib_device *ib_dev = sqp->qp.ibqp.device; local
[all...]
H A Dsysfs.c142 ret = __mlx4_ib_query_gid(&mdev->ib_dev, port->num,
169 ret = __mlx4_ib_query_pkey(&mdev->ib_dev, port->num,
250 ret = __mlx4_ib_query_port(&device->ib_dev, port_num, &attr, 1);
613 int is_eth = rdma_port_get_link_layer(&p->dev->ib_dev, p->port_num) ==
648 int is_eth = rdma_port_get_link_layer(&p->dev->ib_dev, p->port_num) ==
839 kobject_get(dev->ib_dev.ports_parent->parent));
852 for (i = 1; i <= dev->ib_dev.phys_port_cnt; ++i) {
869 kobject_put(dev->ib_dev.ports_parent->parent);
905 kobject_put(device->ib_dev.ports_parent->parent);
/drivers/infiniband/hw/mlx5/
H A Dcq.c60 event.device = &dev->ib_dev;
H A Dmain.c829 memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
838 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
850 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
859 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
868 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
876 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
885 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
893 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
963 ibev.device = &ibdev->ib_dev;
1002 err = mlx5_ib_query_device(&dev->ib_dev, dprop
[all...]
H A Dmlx5_ib.h47 pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
51 pr_err("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
55 pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
362 struct ib_device ib_dev; member in struct:mlx5_ib_dev
394 return container_of(ibdev, struct mlx5_ib_dev, ib_dev);
H A Dmr.c736 struct device *ddev = dev->ib_dev.dma_device;
/drivers/infiniband/hw/mthca/
H A Dmthca_av.c281 ib_get_cached_gid(&dev->ib_dev,
H A Dmthca_catas.c94 event.device = &dev->ib_dev;
H A Dmthca_cq.c257 event.device = &dev->ib_dev;
H A Dmthca_dev.h290 struct ib_device ib_dev; member in struct:mthca_dev
588 return container_of(ibdev, struct mthca_dev, ib_dev);
H A Dmthca_eq.c253 record.device = &dev->ib_dev;

Completed in 216 milliseconds

12