Searched refs:npages (Results 1 - 25 of 79) sorted by relevance

1234

/drivers/gpu/drm/nouveau/core/subdev/fb/
H A Dramgk20a.c59 u32 npages, order; local
65 npages = size >> PAGE_SHIFT;
66 if (npages == 0)
67 npages = 1;
80 npages = max(align, npages);
86 mem->base.size = npages;
89 mem->base.pages = kzalloc(sizeof(dma_addr_t) * npages, GFP_KERNEL);
97 mem->cpuaddr = dma_alloc_coherent(dev, npages << PAGE_SHIFT,
113 npages << PAGE_SHIF
[all...]
/drivers/net/ethernet/mellanox/mlx5/core/
H A Dpagealloc.c55 s32 npages; member in struct:mlx5_pages_req
166 s32 *npages, int boot)
184 *npages = be32_to_cpu(out.num_pages);
275 static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, argument
286 inlen = sizeof(*in) + npages * sizeof(in->pas[0]);
294 for (i = 0; i < npages; i++) {
311 in->num_entries = cpu_to_be32(npages);
314 mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
315 func_id, npages, err);
318 dev->priv.fw_pages += npages;
165 mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, s32 *npages, int boot) argument
356 reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, int *nclaimed) argument
425 mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, s32 npages) argument
[all...]
H A Dalloc.c58 buf->npages = 1;
69 buf->npages *= 2;
76 buf->npages = buf->nbufs;
229 for (i = 0; i < buf->npages; i++) {
/drivers/gpu/drm/ttm/
H A Dttm_page_alloc.c70 * @npages: Number of pages in pool.
77 unsigned npages; member in struct:ttm_page_pool
276 static void ttm_pages_put(struct page *pages[], unsigned npages) argument
279 if (set_pages_array_wb(pages, npages))
280 pr_err("Failed to set %d pages to wb!\n", npages);
281 for (i = 0; i < npages; ++i)
288 pool->npages -= freed_pages;
426 count += _manager->pools[i].npages;
589 && count > pool->npages) {
607 pool->npages
672 ttm_put_pages(struct page **pages, unsigned npages, int flags, enum ttm_caching_state cstate) argument
720 ttm_get_pages(struct page **pages, unsigned npages, int flags, enum ttm_caching_state cstate) argument
[all...]
/drivers/infiniband/hw/cxgb3/
H A Diwch_mem.c81 int npages)
87 if (npages > mhp->attr.pbl_size)
109 int iwch_alloc_pbl(struct iwch_mr *mhp, int npages) argument
112 npages << 3);
117 mhp->attr.pbl_size = npages;
128 int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset) argument
131 mhp->attr.pbl_addr + (offset << 3), npages);
138 int *npages,
176 *npages = 0;
178 *npages
78 iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php, struct iwch_mr *mhp, int shift, int npages) argument
134 build_phys_page_list(struct ib_phys_buf *buffer_list, int num_phys_buf, u64 *iova_start, u64 *total_size, int *npages, int *shift, __be64 **page_list) argument
[all...]
H A Dcxio_dbg.c78 int size, npages; local
81 npages = (len + (1ULL << shift) - 1) >> shift;
82 size = npages * sizeof(u64);
93 __func__, m->addr, m->len, npages);
H A Diwch_provider.h345 int npages);
346 int iwch_alloc_pbl(struct iwch_mr *mhp, int npages);
348 int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset);
353 int *npages,
/drivers/staging/lustre/lustre/ptlrpc/
H A Dsec_bulk.c176 static void enc_pools_release_free_pages(long npages) argument
181 LASSERT(npages > 0);
182 LASSERT(npages <= page_pools.epp_free_pages);
188 page_pools.epp_free_pages -= npages;
189 page_pools.epp_total_pages -= npages;
199 while (npages--) {
278 int npages_to_npools(unsigned long npages) argument
280 return (int) ((npages + PAGES_PER_POOL - 1) / PAGES_PER_POOL);
308 * merge @npools pointed by @pools which contains @npages new pages
314 static void enc_pools_insert(struct page ***pools, int npools, int npages) argument
395 enc_pools_add_pages(int npages) argument
[all...]
/drivers/infiniband/core/
H A Dumem.c57 for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) {
87 unsigned long npages; local
135 npages = PAGE_ALIGN(size + umem->offset) >> PAGE_SHIFT;
139 locked = npages + current->mm->pinned_vm;
149 if (npages == 0) {
154 ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL);
161 while (npages) {
163 min_t(unsigned long, npages,
170 umem->npages += ret;
172 npages
[all...]
/drivers/net/ethernet/mellanox/mlx4/
H A Dicm.c59 pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
62 for (i = 0; i < chunk->npages; ++i)
71 for (i = 0; i < chunk->npages; ++i)
126 struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, argument
152 while (npages > 0) {
167 chunk->npages = 0;
172 while (1 << cur_order > npages)
177 &chunk->mem[chunk->npages],
180 ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
191 ++chunk->npages;
[all...]
H A Dmr.c203 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, argument
208 if (!npages) {
215 for (mtt->order = 0, i = 1; i < npages; i <<= 1)
426 u64 iova, u64 size, u32 access, int npages,
436 return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
536 int npages, int page_shift, struct mlx4_mr *mr)
546 access, npages, page_shift, mr);
597 u64 iova, u64 size, int npages,
606 err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
703 int start_index, int npages, u6
425 mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd, u64 iova, u64 size, u32 access, int npages, int page_shift, struct mlx4_mr *mr) argument
535 mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, int npages, int page_shift, struct mlx4_mr *mr) argument
596 mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, u64 iova, u64 size, int npages, int page_shift, struct mlx4_mpt_entry *mpt_entry) argument
702 mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int start_index, int npages, u64 *page_list) argument
728 __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int start_index, int npages, u64 *page_list) argument
756 mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int start_index, int npages, u64 *page_list) argument
980 mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list, int npages, u64 iova) argument
1007 mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, int npages, u64 iova, u32 *lkey, u32 *rkey) argument
[all...]
H A Dicm.h52 int npages; member in struct:mlx4_icm_chunk
70 struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
/drivers/infiniband/hw/mthca/
H A Dmthca_allocator.c199 int npages, shift; local
206 npages = 1;
220 npages *= 2;
223 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
227 for (i = 0; i < npages; ++i)
231 npages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
234 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
238 buf->page_list = kmalloc(npages * sizeof *buf->page_list,
243 for (i = 0; i < npages; ++i)
246 for (i = 0; i < npages;
[all...]
H A Dmthca_memfree.c69 pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
72 for (i = 0; i < chunk->npages; ++i)
81 for (i = 0; i < chunk->npages; ++i) {
137 struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages, argument
157 while (npages > 0) {
165 chunk->npages = 0;
170 while (1 << cur_order > npages)
175 &chunk->mem[chunk->npages],
178 ret = mthca_alloc_icm_pages(&chunk->mem[chunk->npages],
182 ++chunk->npages;
526 int npages; local
[all...]
H A Dmthca_memfree.h53 int npages; member in struct:mthca_icm_chunk
82 struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
145 int npages; member in struct:mthca_db_table
/drivers/gpu/drm/exynos/
H A Dexynos_drm_gem.h161 unsigned int npages,
167 unsigned int npages,
H A Dexynos_drm_gem.c419 unsigned int npages,
429 for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
438 if (i != npages) {
447 npages, 1, 1, pages, NULL);
449 if (get_npages != npages) {
460 unsigned int npages,
466 for (i = 0; i < npages; i++) {
418 exynos_gem_get_pages_from_userptr(unsigned long start, unsigned int npages, struct page **pages, struct vm_area_struct *vma) argument
459 exynos_gem_put_pages_to_userptr(struct page **pages, unsigned int npages, struct vm_area_struct *vma) argument
/drivers/gpu/drm/nouveau/
H A Dnouveau_prime.c34 int npages = nvbo->bo.num_pages; local
36 return drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages);
/drivers/gpu/drm/udl/
H A Dudl_gem.c230 int npages; local
232 npages = size / PAGE_SIZE;
235 obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE);
240 obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
242 DRM_ERROR("obj pages is NULL %d\n", npages);
246 drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
/drivers/staging/lustre/lustre/libcfs/
H A Dmodule.c90 kportal_memhog_alloc(struct libcfs_device_userstate *ldu, int npages, argument
102 if (npages < 0)
105 if (npages == 0)
118 while (ldu->ldu_memhog_pages < npages &&
133 while (ldu->ldu_memhog_pages < npages &&
/drivers/infiniband/hw/usnic/
H A Dusnic_uiom.c109 unsigned long npages; local
129 npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT;
133 locked = npages + current->mm->locked_vm;
146 while (npages) {
148 min_t(unsigned long, npages,
155 npages -= ret;
220 int npages; local
226 npages = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
228 vpn_last = vpn_start + npages - 1;
342 unsigned long npages; local
[all...]
/drivers/gpu/drm/radeon/
H A Dradeon_prime.c35 int npages = bo->tbo.num_pages; local
37 return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
/drivers/gpu/drm/msm/
H A Dmsm_gem.c37 int npages)
45 p = drm_malloc_ab(npages, sizeof(struct page *));
50 npages, 0, DRM_MM_SEARCH_DEFAULT);
57 for (i = 0; i < npages; i++) {
73 int npages = obj->size >> PAGE_SHIFT; local
78 p = get_pages_vram(obj, npages);
86 msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
664 int ret, npages; local
680 npages = size / PAGE_SIZE;
684 msm_obj->pages = drm_malloc_ab(npages, sizeo
36 get_pages_vram(struct drm_gem_object *obj, int npages) argument
[all...]
/drivers/infiniband/hw/cxgb4/
H A Dmem.c387 struct c4iw_mr *mhp, int shift, int npages)
392 if (npages > mhp->attr.pbl_size)
412 static int alloc_pbl(struct c4iw_mr *mhp, int npages) argument
415 npages << 3);
420 mhp->attr.pbl_size = npages;
427 u64 *total_size, int *npages,
464 *npages = 0;
466 *npages += (buffer_list[i].size +
469 if (!*npages)
472 *page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNE
386 reregister_mem(struct c4iw_dev *rhp, struct c4iw_pd *php, struct c4iw_mr *mhp, int shift, int npages) argument
425 build_phys_page_list(struct ib_phys_buf *buffer_list, int num_phys_buf, u64 *iova_start, u64 *total_size, int *npages, int *shift, __be64 **page_list) argument
504 int npages; local
565 int npages; local
[all...]
/drivers/infiniband/hw/mlx5/
H A Dmr.c127 int npages = 1 << ent->order; local
150 in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2);
659 int npages; local
662 npages = ALIGN(len + offset, page_size) >> ilog2(page_size);
663 return (npages + 1) / 2;
732 u64 virt_addr, u64 len, int npages,
742 int size = sizeof(u64) * npages;
779 prep_umr_reg_wqe(pd, &wr, &sg, mr->dma, npages, mr->mmr.key, page_shift, virt_addr, len, access_flags);
817 int npages, int page_shift,
830 inlen = sizeof(*in) + sizeof(*in->pas) * ((npages
731 reg_umr(struct ib_pd *pd, struct ib_umem *umem, u64 virt_addr, u64 len, int npages, int page_shift, int order, int access_flags) argument
815 reg_create(struct ib_pd *pd, u64 virt_addr, u64 length, struct ib_umem *umem, int npages, int page_shift, int access_flags) argument
879 int npages; local
976 int npages = mr->npages; local
[all...]

Completed in 504 milliseconds

1234