Searched defs:fmr (Results 1 - 12 of 12) sorted by relevance

/drivers/infiniband/core/
H A Dfmr_pool.c120 struct ib_pool_fmr *fmr; local
128 hlist_for_each_entry(fmr, pos, bucket, cache_node)
129 if (io_virtual_address == fmr->io_virtual_address &&
130 page_list_len == fmr->page_list_len &&
131 !memcmp(page_list, fmr->page_list,
133 return fmr;
141 struct ib_pool_fmr *fmr; local
147 list_for_each_entry(fmr, &pool->dirty_list, list) {
148 hlist_del_init(&fmr->cache_node);
149 fmr
301 struct ib_pool_fmr *fmr; local
361 struct ib_pool_fmr *fmr; local
400 struct ib_pool_fmr *fmr, *next; local
441 struct ib_pool_fmr *fmr; local
513 ib_fmr_pool_unmap(struct ib_pool_fmr *fmr) argument
[all...]
H A Dverbs.c1140 struct ib_fmr *fmr; local
1145 fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
1146 if (!IS_ERR(fmr)) {
1147 fmr->device = pd->device;
1148 fmr->pd = pd;
1152 return fmr;
1158 struct ib_fmr *fmr; local
1163 fmr = list_entry(fmr_list->next, struct ib_fmr, list);
1164 return fmr->device->unmap_fmr(fmr_list);
1168 int ib_dealloc_fmr(struct ib_fmr *fmr) argument
[all...]
/drivers/infiniband/hw/ipath/
H A Dipath_mr.c290 struct ipath_fmr *fmr; local
296 fmr = kmalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL);
297 if (!fmr)
302 fmr->mr.map[i] = kmalloc(sizeof *fmr->mr.map[0],
304 if (!fmr->mr.map[i])
307 fmr->mr.mapsz = m;
310 * ib_alloc_fmr() will initialize fmr
355 struct ipath_fmr *fmr = to_ifmr(ibfmr); local
398 struct ipath_fmr *fmr; local
421 struct ipath_fmr *fmr = to_ifmr(ibfmr); local
[all...]
/drivers/infiniband/hw/mlx4/
H A Dmr.c272 struct mlx4_ib_fmr *fmr; local
275 fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
276 if (!fmr)
281 fmr_attr->page_shift, &fmr->mfmr);
285 err = mlx4_fmr_enable(to_mdev(pd->device)->dev, &fmr->mfmr);
289 fmr->ibfmr.rkey = fmr->ibfmr.lkey = fmr->mfmr.mr.key;
291 return &fmr
[all...]
/drivers/infiniband/hw/qib/
H A Dqib_mr.c357 struct qib_fmr *fmr; local
363 fmr = kmalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL);
364 if (!fmr)
369 fmr->mr.map[i] = kmalloc(sizeof *fmr->mr.map[0],
371 if (!fmr->mr.map[i])
374 fmr->mr.mapsz = m;
377 * ib_alloc_fmr() will initialize fmr
424 struct qib_fmr *fmr = to_ifmr(ibfmr); local
469 struct qib_fmr *fmr; local
492 struct qib_fmr *fmr = to_ifmr(ibfmr); local
[all...]
/drivers/mtd/nand/
H A Dfsl_elbc_nand.c59 unsigned int fmr; /* FCM Flash Mode Register value */ member in struct:fsl_elbc_mtd
218 out_be32(&lbc->fmr, priv->fmr | 3);
223 "fsl_elbc_run_command: fmr=%08x fir=%08x fcr=%08x\n",
224 in_be32(&lbc->fmr), in_be32(&lbc->fir), in_be32(&lbc->fcr));
676 priv->fmr |= al << FMR_AL_SHIFT;
726 chip->ecc.layout = (priv->fmr & FMR_ECCM) ?
780 priv->fmr = 15 << FMR_CWTO_SHIFT;
782 priv->fmr |= FMR_ECCM;
812 chip->ecc.layout = (priv->fmr
[all...]
/drivers/infiniband/hw/mthca/
H A Dmthca_mr.c544 /* Free mr or fmr */
678 int mthca_free_fmr(struct mthca_dev *dev, struct mthca_fmr *fmr) argument
680 if (fmr->maps)
683 mthca_free_region(dev, fmr->ibmr.lkey);
684 mthca_free_mtt(dev, fmr->mtt);
689 static inline int mthca_check_fmr(struct mthca_fmr *fmr, u64 *page_list, argument
694 if (list_len > fmr->attr.max_pages)
697 page_mask = (1 << fmr->attr.page_shift) - 1;
710 if (fmr->maps >= fmr
720 struct mthca_fmr *fmr = to_mfmr(ibfmr); local
761 struct mthca_fmr *fmr = to_mfmr(ibfmr); local
807 mthca_tavor_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr) argument
817 mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr) argument
[all...]
H A Dmthca_provider.c1094 struct mthca_fmr *fmr; local
1097 fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
1098 if (!fmr)
1101 memcpy(&fmr->attr, fmr_attr, sizeof *fmr_attr);
1103 convert_access(mr_access_flags), fmr);
1106 kfree(fmr);
1110 return &fmr->ibmr;
1113 static int mthca_dealloc_fmr(struct ib_fmr *fmr) argument
1115 struct mthca_fmr *mfmr = to_mfmr(fmr);
1128 struct ib_fmr *fmr; local
[all...]
/drivers/net/ethernet/mellanox/mlx4/
H A Dmr.c713 static inline int mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list, argument
718 if (npages > fmr->max_pages)
721 page_mask = (1 << fmr->page_shift) - 1;
734 if (fmr->maps >= fmr->max_maps)
740 int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, argument
746 err = mlx4_check_fmr(fmr, page_list, npages, iova);
750 ++fmr->maps;
752 key = key_to_hw_index(fmr->mr.key);
754 *lkey = *rkey = fmr
787 mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, int max_maps, u8 page_shift, struct mlx4_fmr *fmr) argument
833 mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr) argument
851 mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u32 *lkey, u32 *rkey) argument
883 mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr) argument
[all...]
/drivers/infiniband/hw/ehca/
H A Dehca_classes.h332 } fmr; member in union:ehca_mr_pginfo::__anon941
H A Dehca_mrmw.c855 int ehca_map_phys_fmr(struct ib_fmr *fmr, argument
862 container_of(fmr->device, struct ehca_shca, ib_device);
863 struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
864 struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd);
869 ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
879 ehca_err(fmr->device, "bad iova, iova=%llx fmr_page_size=%x",
886 ehca_info(fmr->device, "map limit exceeded, fmr=%p "
888 fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps);
897 pginfo.u.fmr
981 ehca_dealloc_fmr(struct ib_fmr *fmr) argument
[all...]
/drivers/infiniband/ulp/srp/
H A Dib_srp.c703 struct ib_pool_fmr *fmr; local
716 fmr = ib_fmr_pool_map_phys(dev->fmr_pool, state->pages,
718 if (IS_ERR(fmr))
719 return PTR_ERR(fmr);
721 *state->next_fmr++ = fmr;
724 srp_map_desc(state, 0, state->fmr_len, fmr->fmr->rkey);

Completed in 188 milliseconds