Searched refs:npages (Results 1 - 25 of 51) sorted by relevance

123

/drivers/staging/omapdrm/
H A Domap_gem_helpers.c40 int i, npages; local
46 npages = obj->size >> PAGE_SHIFT;
48 pages = drm_malloc_ab(npages, sizeof(struct page *));
54 for (i = 0; i < npages; i++) {
98 int i, npages; local
100 npages = obj->size >> PAGE_SHIFT;
102 for (i = 0; i < npages; i++) {
H A Domap_dmm_tiler.h82 uint32_t npages, uint32_t roll, bool wait);
H A Domap_gem.c234 int i, npages = obj->size >> PAGE_SHIFT; local
235 dma_addr_t *addrs = kmalloc(npages * sizeof(addrs), GFP_KERNEL);
236 for (i = 0; i < npages; i++) {
256 int i, npages = obj->size >> PAGE_SHIFT; local
257 for (i = 0; i < npages; i++) {
613 uint32_t npages = obj->size >> PAGE_SHIFT; local
616 if (roll > npages) {
631 ret = tiler_pin(omap_obj->block, pages, npages, roll, true);
658 uint32_t npages = obj->size >> PAGE_SHIFT; local
684 ret = tiler_pin(block, pages, npages,
[all...]
H A Domap_fbdev.c85 int npages; local
88 npages = fbi->fix.line_length >> PAGE_SHIFT;
89 omap_gem_roll(fbdev->bo, fbi->var.yoffset * npages);
/drivers/gpu/drm/ttm/
H A Dttm_page_alloc.c70 * @npages: Number of pages in pool.
77 unsigned npages; member in struct:ttm_page_pool
276 static void ttm_pages_put(struct page *pages[], unsigned npages) argument
279 if (set_pages_array_wb(pages, npages))
280 pr_err("Failed to set %d pages to wb!\n", npages);
281 for (i = 0; i < npages; ++i)
288 pool->npages -= freed_pages;
386 total += _manager->pools[i].npages;
573 && count > pool->npages) {
591 pool->npages
656 ttm_put_pages(struct page **pages, unsigned npages, int flags, enum ttm_caching_state cstate) argument
704 ttm_get_pages(struct page **pages, unsigned npages, int flags, enum ttm_caching_state cstate) argument
[all...]
H A Dttm_page_alloc_dma.c378 struct page *pages[], unsigned npages)
383 if (npages && !(pool->type & IS_CACHED) &&
384 set_pages_array_wb(pages, npages))
386 pool->dev_name, npages);
942 unsigned count = 0, i, npages = 0; local
966 npages = count;
968 npages = pool->npages_free - _manager->options.max_size;
971 if (npages < NUM_PAGES_TO_ALLOC)
972 npages = NUM_PAGES_TO_ALLOC;
997 if (npages)
377 ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages, struct page *pages[], unsigned npages) argument
[all...]
/drivers/infiniband/hw/cxgb3/
H A Diwch_mem.c81 int npages)
87 if (npages > mhp->attr.pbl_size)
109 int iwch_alloc_pbl(struct iwch_mr *mhp, int npages) argument
112 npages << 3);
117 mhp->attr.pbl_size = npages;
128 int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset) argument
131 mhp->attr.pbl_addr + (offset << 3), npages);
138 int *npages,
176 *npages = 0;
178 *npages
78 iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php, struct iwch_mr *mhp, int shift, int npages) argument
134 build_phys_page_list(struct ib_phys_buf *buffer_list, int num_phys_buf, u64 *iova_start, u64 *total_size, int *npages, int *shift, __be64 **page_list) argument
[all...]
H A Dcxio_dbg.c78 int size, npages; local
81 npages = (len + (1ULL << shift) - 1) >> shift;
82 size = npages * sizeof(u64);
93 __func__, m->addr, m->len, npages);
H A Diwch_provider.h345 int npages);
346 int iwch_alloc_pbl(struct iwch_mr *mhp, int npages);
348 int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset);
353 int *npages,
/drivers/gpu/drm/exynos/
H A Dexynos_drm_buf.c38 unsigned int npages, page_size, i = 0; local
55 npages = buf->size >> SECTION_SHIFT;
58 npages = buf->size >> 16;
61 npages = buf->size >> PAGE_SHIFT;
71 ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
87 buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
97 while (i < npages) {
H A Dexynos_drm_gem.c89 int i, npages; local
95 npages = obj->size >> PAGE_SHIFT;
97 pages = drm_malloc_ab(npages, sizeof(struct page *));
103 for (i = 0; i < npages; i++) {
124 int i, npages; local
126 npages = obj->size >> PAGE_SHIFT;
128 for (i = 0; i < npages; i++) {
168 unsigned int npages, i = 0; local
182 npages = obj->size >> PAGE_SHIFT;
191 ret = sg_alloc_table(buf->sgt, npages, GFP_KERNE
[all...]
/drivers/net/ethernet/mellanox/mlx4/
H A Dicm.c59 pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
62 for (i = 0; i < chunk->npages; ++i)
71 for (i = 0; i < chunk->npages; ++i)
122 struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, argument
142 while (npages > 0) {
150 chunk->npages = 0;
155 while (1 << cur_order > npages)
160 &chunk->mem[chunk->npages],
163 ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
173 ++chunk->npages;
[all...]
H A Dmr.c209 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, argument
214 if (!npages) {
221 for (mtt->order = 0, i = 1; i < npages; i <<= 1)
308 u64 iova, u64 size, u32 access, int npages,
318 return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
418 int npages, int page_shift, struct mlx4_mr *mr)
428 access, npages, page_shift, mr);
530 int start_index, int npages, u64 *page_list)
544 npages * sizeof (u64), DMA_TO_DEVICE);
546 for (i = 0; i < npages;
307 mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd, u64 iova, u64 size, u32 access, int npages, int page_shift, struct mlx4_mr *mr) argument
417 mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, int npages, int page_shift, struct mlx4_mr *mr) argument
529 mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int start_index, int npages, u64 *page_list) argument
555 __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int start_index, int npages, u64 *page_list) argument
583 mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int start_index, int npages, u64 *page_list) argument
713 mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list, int npages, u64 iova) argument
740 mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, int npages, u64 iova, u32 *lkey, u32 *rkey) argument
[all...]
H A Dicm.h52 int npages; member in struct:mlx4_icm_chunk
70 struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
/drivers/infiniband/hw/mthca/
H A Dmthca_allocator.c199 int npages, shift; local
206 npages = 1;
220 npages *= 2;
223 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
227 for (i = 0; i < npages; ++i)
231 npages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
234 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
238 buf->page_list = kmalloc(npages * sizeof *buf->page_list,
243 for (i = 0; i < npages; ++i)
246 for (i = 0; i < npages;
[all...]
H A Dmthca_memfree.c69 pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
72 for (i = 0; i < chunk->npages; ++i)
81 for (i = 0; i < chunk->npages; ++i) {
137 struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages, argument
157 while (npages > 0) {
165 chunk->npages = 0;
170 while (1 << cur_order > npages)
175 &chunk->mem[chunk->npages],
178 ret = mthca_alloc_icm_pages(&chunk->mem[chunk->npages],
182 ++chunk->npages;
526 int npages; local
[all...]
H A Dmthca_memfree.h53 int npages; member in struct:mthca_icm_chunk
82 struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
145 int npages; member in struct:mthca_db_table
H A Dmthca_eq.c470 int npages; local
480 npages = ALIGN(eq->nent * MTHCA_EQ_ENTRY_SIZE, PAGE_SIZE) / PAGE_SIZE;
482 eq->page_list = kmalloc(npages * sizeof *eq->page_list,
487 for (i = 0; i < npages; ++i)
490 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
499 for (i = 0; i < npages; ++i) {
519 dma_list, PAGE_SHIFT, npages,
520 0, npages * PAGE_SIZE,
571 for (i = 0; i < npages; ++i)
593 int npages local
[all...]
/drivers/infiniband/core/
H A Dumem.c88 unsigned long npages; local
136 npages = PAGE_ALIGN(size + umem->offset) >> PAGE_SHIFT;
140 locked = npages + current->mm->pinned_vm;
151 while (npages) {
153 min_t(unsigned long, npages,
161 npages -= ret;
/drivers/media/video/omap3isp/
H A Dispqueue.c72 if (buf->vbuf.m.userptr == 0 || buf->npages == 0 ||
73 buf->npages > ISP_CACHE_FLUSH_PAGES_MAX)
156 unsigned int npages; local
161 npages = PAGE_ALIGN(buf->vbuf.length) >> PAGE_SHIFT;
163 sglist = vmalloc(npages * sizeof(*sglist));
167 sg_init_table(sglist, npages);
169 for (i = 0; i < npages; ++i, addr += PAGE_SIZE) {
180 buf->sglen = npages;
197 sglist = vmalloc(buf->npages * sizeof(*sglist));
201 sg_init_table(sglist, buf->npages);
[all...]
H A Dispqueue.h72 * @npages: Number of pages (for userspace buffers)
95 unsigned int npages; member in struct:isp_video_buffer
/drivers/infiniband/hw/cxgb4/
H A Dmem.c249 struct c4iw_mr *mhp, int shift, int npages)
254 if (npages > mhp->attr.pbl_size)
274 static int alloc_pbl(struct c4iw_mr *mhp, int npages) argument
277 npages << 3);
282 mhp->attr.pbl_size = npages;
289 u64 *total_size, int *npages,
326 *npages = 0;
328 *npages += (buffer_list[i].size +
331 if (!*npages)
334 *page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNE
248 reregister_mem(struct c4iw_dev *rhp, struct c4iw_pd *php, struct c4iw_mr *mhp, int shift, int npages) argument
287 build_phys_page_list(struct ib_phys_buf *buffer_list, int num_phys_buf, u64 *iova_start, u64 *total_size, int *npages, int *shift, __be64 **page_list) argument
366 int npages; local
427 int npages; local
[all...]
/drivers/xen/
H A Dprivcmd.c168 if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
169 ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
174 ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
179 msg->mfn, msg->npages,
185 st->va += msg->npages << PAGE_SHIFT;
/drivers/infiniband/hw/ipath/
H A Dipath_user_sdma.c277 unsigned long addr, int tlen, int npages)
284 npages, 0, 1, pages, NULL);
286 if (ret != npages) {
296 for (j = 0; j < npages; j++) {
333 const int npages = ipath_user_sdma_num_pages(iov + idx); local
338 npages);
357 unsigned long niov, int npages)
361 if (npages >= ARRAY_SIZE(pkt->addr))
418 int npages = 0; local
494 npages
275 ipath_user_sdma_pin_pages(const struct ipath_devdata *dd, struct ipath_user_sdma_pkt *pkt, unsigned long addr, int tlen, int npages) argument
353 ipath_user_sdma_init_payload(const struct ipath_devdata *dd, struct ipath_user_sdma_queue *pq, struct ipath_user_sdma_pkt *pkt, const struct iovec *iov, unsigned long niov, int npages) argument
[all...]
/drivers/infiniband/hw/qib/
H A Dqib_user_sdma.c281 unsigned long addr, int tlen, int npages)
288 npages, 0, 1, pages, NULL);
290 if (ret != npages) {
300 for (j = 0; j < npages; j++) {
335 const int npages = qib_user_sdma_num_pages(iov + idx); local
339 iov[idx].iov_len, npages);
358 unsigned long niov, int npages)
362 if (npages >= ARRAY_SIZE(pkt->addr))
420 int npages = 0; local
495 npages
279 qib_user_sdma_pin_pages(const struct qib_devdata *dd, struct qib_user_sdma_pkt *pkt, unsigned long addr, int tlen, int npages) argument
354 qib_user_sdma_init_payload(const struct qib_devdata *dd, struct qib_user_sdma_queue *pq, struct qib_user_sdma_pkt *pkt, const struct iovec *iov, unsigned long niov, int npages) argument
[all...]

Completed in 697 milliseconds

123