Searched refs:fmr (Results 1 - 22 of 22) sorted by relevance

/drivers/infiniband/core/
H A Dfmr_pool.c120 struct ib_pool_fmr *fmr; local
127 hlist_for_each_entry(fmr, bucket, cache_node)
128 if (io_virtual_address == fmr->io_virtual_address &&
129 page_list_len == fmr->page_list_len &&
130 !memcmp(page_list, fmr->page_list,
132 return fmr;
140 struct ib_pool_fmr *fmr; local
146 list_for_each_entry(fmr, &pool->dirty_list, list) {
147 hlist_del_init(&fmr->cache_node);
148 fmr
300 struct ib_pool_fmr *fmr; local
360 struct ib_pool_fmr *fmr; local
399 struct ib_pool_fmr *fmr, *next; local
440 struct ib_pool_fmr *fmr; local
512 ib_fmr_pool_unmap(struct ib_pool_fmr *fmr) argument
[all...]
H A Dverbs.c1299 struct ib_fmr *fmr; local
1304 fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
1305 if (!IS_ERR(fmr)) {
1306 fmr->device = pd->device;
1307 fmr->pd = pd;
1311 return fmr;
1317 struct ib_fmr *fmr; local
1322 fmr = list_entry(fmr_list->next, struct ib_fmr, list);
1323 return fmr->device->unmap_fmr(fmr_list);
1327 int ib_dealloc_fmr(struct ib_fmr *fmr) argument
[all...]
/drivers/infiniband/hw/ipath/
H A Dipath_mr.c285 struct ipath_fmr *fmr; local
291 fmr = kmalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL);
292 if (!fmr)
297 fmr->mr.map[i] = kmalloc(sizeof *fmr->mr.map[0],
299 if (!fmr->mr.map[i])
302 fmr->mr.mapsz = m;
305 * ib_alloc_fmr() will initialize fmr
350 struct ipath_fmr *fmr = to_ifmr(ibfmr); local
393 struct ipath_fmr *fmr; local
416 struct ipath_fmr *fmr = to_ifmr(ibfmr); local
[all...]
/drivers/infiniband/hw/qib/
H A Dqib_mr.c382 struct qib_fmr *fmr; local
389 fmr = kzalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL);
390 if (!fmr)
393 rval = init_qib_mregion(&fmr->mr, pd, fmr_attr->max_pages);
398 * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
401 rval = qib_alloc_lkey(&fmr->mr, 0);
404 fmr->ibfmr.rkey = fmr
439 struct qib_fmr *fmr = to_ifmr(ibfmr); local
485 struct qib_fmr *fmr; local
508 struct qib_fmr *fmr = to_ifmr(ibfmr); local
[all...]
/drivers/infiniband/hw/mthca/
H A Dmthca_mr.c544 /* Free mr or fmr */
678 int mthca_free_fmr(struct mthca_dev *dev, struct mthca_fmr *fmr) argument
680 if (fmr->maps)
683 mthca_free_region(dev, fmr->ibmr.lkey);
684 mthca_free_mtt(dev, fmr->mtt);
689 static inline int mthca_check_fmr(struct mthca_fmr *fmr, u64 *page_list, argument
694 if (list_len > fmr->attr.max_pages)
697 page_mask = (1 << fmr->attr.page_shift) - 1;
710 if (fmr->maps >= fmr
720 struct mthca_fmr *fmr = to_mfmr(ibfmr); local
761 struct mthca_fmr *fmr = to_mfmr(ibfmr); local
807 mthca_tavor_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr) argument
817 mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr) argument
[all...]
H A Dmthca_provider.c1091 struct mthca_fmr *fmr; local
1094 fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
1095 if (!fmr)
1098 memcpy(&fmr->attr, fmr_attr, sizeof *fmr_attr);
1100 convert_access(mr_access_flags), fmr);
1103 kfree(fmr);
1107 return &fmr->ibmr;
1110 static int mthca_dealloc_fmr(struct ib_fmr *fmr) argument
1112 struct mthca_fmr *mfmr = to_mfmr(fmr);
1125 struct ib_fmr *fmr; local
[all...]
H A Dmthca_dev.h482 u32 access, struct mthca_fmr *fmr);
485 void mthca_tavor_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr);
488 void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr);
489 int mthca_free_fmr(struct mthca_dev *dev, struct mthca_fmr *fmr);
/drivers/net/ethernet/mellanox/mlx4/
H A Dmr.c980 static inline int mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list, argument
985 if (npages > fmr->max_pages)
988 page_mask = (1 << fmr->page_shift) - 1;
1001 if (fmr->maps >= fmr->max_maps)
1007 int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, argument
1013 err = mlx4_check_fmr(fmr, page_list, npages, iova);
1017 ++fmr->maps;
1019 key = key_to_hw_index(fmr->mr.key);
1021 *lkey = *rkey = fmr
1054 mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, int max_maps, u8 page_shift, struct mlx4_fmr *fmr) argument
1097 mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr) argument
1115 mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u32 *lkey, u32 *rkey) argument
1145 mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr) argument
[all...]
/drivers/infiniband/ulp/iser/
H A Diser_verbs.c205 ib_conn->fmr.page_vec = kmalloc(sizeof(*ib_conn->fmr.page_vec) +
208 if (!ib_conn->fmr.page_vec)
211 ib_conn->fmr.page_vec->pages = (u64 *)(ib_conn->fmr.page_vec + 1);
227 ib_conn->fmr.pool = ib_create_fmr_pool(device->pd, &params);
228 if (!IS_ERR(ib_conn->fmr.pool))
232 kfree(ib_conn->fmr.page_vec);
233 ib_conn->fmr.page_vec = NULL;
235 ret = PTR_ERR(ib_conn->fmr
[all...]
H A Diser_memory.c421 iser_page_vec_build(mem, ib_conn->fmr.page_vec, ibdev);
422 err = iser_reg_page_vec(ib_conn, ib_conn->fmr.page_vec,
430 ib_conn->fmr.page_vec->data_size,
431 ib_conn->fmr.page_vec->length,
432 ib_conn->fmr.page_vec->offset);
433 for (i = 0; i < ib_conn->fmr.page_vec->length; i++)
435 (unsigned long long)ib_conn->fmr.page_vec->pages[i]);
H A Discsi_iser.h441 * @lock: protects fmr/fastreg pool
442 * @union.fmr:
466 } fmr; member in union:ib_conn::__anon1675
/drivers/infiniband/hw/mlx4/
H A Dmr.c437 struct mlx4_ib_fmr *fmr; local
440 fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
441 if (!fmr)
446 fmr_attr->page_shift, &fmr->mfmr);
450 err = mlx4_fmr_enable(to_mdev(pd->device)->dev, &fmr->mfmr);
454 fmr->ibfmr.rkey = fmr->ibfmr.lkey = fmr->mfmr.mr.key;
456 return &fmr
[all...]
H A Dmlx4_ib.h702 int mlx4_ib_fmr_dealloc(struct ib_fmr *fmr);
/drivers/infiniband/hw/ehca/
H A Dehca_mrmw.c855 int ehca_map_phys_fmr(struct ib_fmr *fmr, argument
862 container_of(fmr->device, struct ehca_shca, ib_device);
863 struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
864 struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd);
869 ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
879 ehca_err(fmr->device, "bad iova, iova=%llx fmr_page_size=%x",
886 ehca_info(fmr->device, "map limit exceeded, fmr=%p "
888 fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps);
897 pginfo.u.fmr
981 ehca_dealloc_fmr(struct ib_fmr *fmr) argument
[all...]
H A Dehca_iverbs.h109 int ehca_map_phys_fmr(struct ib_fmr *fmr,
114 int ehca_dealloc_fmr(struct ib_fmr *fmr);
H A Dehca_classes.h332 } fmr; member in union:ehca_mr_pginfo::__anon1451
/drivers/mtd/nand/
H A Dfsl_elbc_nand.c59 unsigned int fmr; /* FCM Flash Mode Register value */ member in struct:fsl_elbc_mtd
205 out_be32(&lbc->fmr, priv->fmr | 3);
210 "fsl_elbc_run_command: fmr=%08x fir=%08x fcr=%08x\n",
211 in_be32(&lbc->fmr), in_be32(&lbc->fir), in_be32(&lbc->fcr));
635 priv->fmr |= al << FMR_AL_SHIFT;
683 chip->ecc.layout = (priv->fmr & FMR_ECCM) ?
753 priv->fmr = 15 << FMR_CWTO_SHIFT;
755 priv->fmr |= FMR_ECCM;
784 chip->ecc.layout = (priv->fmr
[all...]
/drivers/infiniband/hw/ocrdma/
H A Docrdma_hw.h103 int ocrdma_mbx_dealloc_lkey(struct ocrdma_dev *, int fmr, u32 lkey);
/drivers/staging/lustre/lnet/klnds/o2iblnd/
H A Do2iblnd.c1522 kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status) argument
1525 kib_fmr_pool_t *fpo = fmr->fmr_pool;
1531 rc = ib_fmr_pool_unmap(fmr->fmr_pfmr);
1539 fmr->fmr_pool = NULL;
1540 fmr->fmr_pfmr = NULL;
1563 __u64 iov, kib_fmr_t *fmr)
1581 fmr->fmr_pool = fpo;
1582 fmr->fmr_pfmr = pfmr;
2189 CERROR("Can't set fmr pool size (%d) < ntx / 4(%d)\n",
1562 kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages, __u64 iov, kib_fmr_t *fmr) argument
H A Do2iblnd.h322 int fpo_failed; /* fmr pool is failed */
327 struct ib_pool_fmr *fmr_pfmr; /* IB pool fmr */
341 kib_fmr_poolset_t **ibn_fmr_ps; /* fmr pool-set */
553 kib_fmr_t fmr; /* FMR */ member in union:kib_tx::__anon6465
961 int npages, __u64 iov, kib_fmr_t *fmr);
962 void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status);
H A Do2iblnd_cb.c578 rc = kiblnd_fmr_pool_map(fps, pages, npages, 0, &tx->tx_u.fmr);
586 rd->rd_key = (rd != tx->tx_rd) ? tx->tx_u.fmr.fmr_pfmr->fmr->rkey :
587 tx->tx_u.fmr.fmr_pfmr->fmr->lkey;
638 if (net->ibn_fmr_ps != NULL && tx->tx_u.fmr.fmr_pfmr != NULL) {
639 kiblnd_fmr_pool_unmap(&tx->tx_u.fmr, tx->tx_status);
640 tx->tx_u.fmr.fmr_pfmr = NULL;
/drivers/infiniband/ulp/srp/
H A Dib_srp.c1170 struct ib_pool_fmr *fmr; local
1173 fmr = ib_fmr_pool_map_phys(target->fmr_pool, state->pages,
1175 if (IS_ERR(fmr))
1176 return PTR_ERR(fmr);
1178 *state->next_fmr++ = fmr;
1181 srp_map_desc(state, 0, state->dma_len, fmr->fmr->rkey);

Completed in 332 milliseconds