Searched defs:npages (Results 1 - 25 of 41) sorted by relevance

12

/drivers/gpu/drm/exynos/
H A Dexynos_drm_buf.c38 unsigned int npages, page_size, i = 0; local
55 npages = buf->size >> SECTION_SHIFT;
58 npages = buf->size >> 16;
61 npages = buf->size >> PAGE_SHIFT;
71 ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
87 buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
97 while (i < npages) {
H A Dexynos_drm_gem.c89 int i, npages; local
95 npages = obj->size >> PAGE_SHIFT;
97 pages = drm_malloc_ab(npages, sizeof(struct page *));
103 for (i = 0; i < npages; i++) {
124 int i, npages; local
126 npages = obj->size >> PAGE_SHIFT;
128 for (i = 0; i < npages; i++) {
168 unsigned int npages, i = 0; local
182 npages = obj->size >> PAGE_SHIFT;
191 ret = sg_alloc_table(buf->sgt, npages, GFP_KERNE
[all...]
/drivers/infiniband/hw/cxgb3/
H A Dcxio_dbg.c78 int size, npages; local
81 npages = (len + (1ULL << shift) - 1) >> shift;
82 size = npages * sizeof(u64);
93 __func__, m->addr, m->len, npages);
H A Diwch_mem.c81 int npages)
87 if (npages > mhp->attr.pbl_size)
109 int iwch_alloc_pbl(struct iwch_mr *mhp, int npages) argument
112 npages << 3);
117 mhp->attr.pbl_size = npages;
128 int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset) argument
131 mhp->attr.pbl_addr + (offset << 3), npages);
138 int *npages,
176 *npages = 0;
178 *npages
78 iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php, struct iwch_mr *mhp, int shift, int npages) argument
134 build_phys_page_list(struct ib_phys_buf *buffer_list, int num_phys_buf, u64 *iova_start, u64 *total_size, int *npages, int *shift, __be64 **page_list) argument
[all...]
/drivers/staging/omapdrm/
H A Domap_gem_helpers.c40 int i, npages; local
46 npages = obj->size >> PAGE_SHIFT;
48 pages = drm_malloc_ab(npages, sizeof(struct page *));
54 for (i = 0; i < npages; i++) {
98 int i, npages; local
100 npages = obj->size >> PAGE_SHIFT;
102 for (i = 0; i < npages; i++) {
H A Domap_fbdev.c85 int npages; local
88 npages = fbi->fix.line_length >> PAGE_SHIFT;
89 omap_gem_roll(fbdev->bo, fbi->var.yoffset * npages);
/drivers/net/ethernet/mellanox/mlx4/
H A Dicm.h52 int npages; member in struct:mlx4_icm_chunk
70 struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
H A Dicm.c59 pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
62 for (i = 0; i < chunk->npages; ++i)
71 for (i = 0; i < chunk->npages; ++i)
122 struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, argument
142 while (npages > 0) {
150 chunk->npages = 0;
155 while (1 << cur_order > npages)
160 &chunk->mem[chunk->npages],
163 ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
173 ++chunk->npages;
[all...]
H A Deq.c628 int npages; local
637 npages = PAGE_ALIGN(eq->nent * MLX4_EQ_ENTRY_SIZE) / PAGE_SIZE;
639 eq->page_list = kmalloc(npages * sizeof *eq->page_list,
644 for (i = 0; i < npages; ++i)
647 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
656 for (i = 0; i < npages; ++i) {
678 err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt);
682 err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list);
717 for (i = 0; i < npages; ++i)
739 int npages local
[all...]
H A Dmr.c209 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, argument
214 if (!npages) {
221 for (mtt->order = 0, i = 1; i < npages; i <<= 1)
308 u64 iova, u64 size, u32 access, int npages,
318 return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
418 int npages, int page_shift, struct mlx4_mr *mr)
428 access, npages, page_shift, mr);
530 int start_index, int npages, u64 *page_list)
544 npages * sizeof (u64), DMA_TO_DEVICE);
546 for (i = 0; i < npages;
307 mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd, u64 iova, u64 size, u32 access, int npages, int page_shift, struct mlx4_mr *mr) argument
417 mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, int npages, int page_shift, struct mlx4_mr *mr) argument
529 mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int start_index, int npages, u64 *page_list) argument
555 __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int start_index, int npages, u64 *page_list) argument
583 mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int start_index, int npages, u64 *page_list) argument
713 mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list, int npages, u64 iova) argument
740 mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, int npages, u64 iova, u32 *lkey, u32 *rkey) argument
[all...]
/drivers/infiniband/core/
H A Dumem.c88 unsigned long npages; local
136 npages = PAGE_ALIGN(size + umem->offset) >> PAGE_SHIFT;
140 locked = npages + current->mm->pinned_vm;
151 while (npages) {
153 min_t(unsigned long, npages,
161 npages -= ret;
/drivers/infiniband/hw/mthca/
H A Dmthca_allocator.c199 int npages, shift; local
206 npages = 1;
220 npages *= 2;
223 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
227 for (i = 0; i < npages; ++i)
231 npages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
234 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
238 buf->page_list = kmalloc(npages * sizeof *buf->page_list,
243 for (i = 0; i < npages; ++i)
246 for (i = 0; i < npages;
[all...]
H A Dmthca_memfree.h53 int npages; member in struct:mthca_icm_chunk
82 struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
145 int npages; member in struct:mthca_db_table
H A Dmthca_memfree.c69 pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
72 for (i = 0; i < chunk->npages; ++i)
81 for (i = 0; i < chunk->npages; ++i) {
137 struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages, argument
157 while (npages > 0) {
165 chunk->npages = 0;
170 while (1 << cur_order > npages)
175 &chunk->mem[chunk->npages],
178 ret = mthca_alloc_icm_pages(&chunk->mem[chunk->npages],
182 ++chunk->npages;
526 int npages; local
[all...]
H A Dmthca_provider.c903 int npages; local
930 npages = 0;
932 npages += (buffer_list[i].size + (1ULL << shift) - 1) >> shift;
934 if (!npages)
937 page_list = kmalloc(npages * sizeof *page_list, GFP_KERNEL);
951 "in PD %x; shift %d, npages %d.\n",
955 shift, npages);
959 page_list, shift, npages,
/drivers/infiniband/hw/mlx4/
H A Dmr.c303 int npages, u64 iova)
308 return mlx4_map_phys_fmr(dev->dev, &ifmr->mfmr, page_list, npages, iova,
302 mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, int npages, u64 iova) argument
/drivers/media/video/omap3isp/
H A Dispqueue.h72 * @npages: Number of pages (for userspace buffers)
95 unsigned int npages; member in struct:isp_video_buffer
H A Dispqueue.c72 if (buf->vbuf.m.userptr == 0 || buf->npages == 0 ||
73 buf->npages > ISP_CACHE_FLUSH_PAGES_MAX)
156 unsigned int npages; local
161 npages = PAGE_ALIGN(buf->vbuf.length) >> PAGE_SHIFT;
163 sglist = vmalloc(npages * sizeof(*sglist));
167 sg_init_table(sglist, npages);
169 for (i = 0; i < npages; ++i, addr += PAGE_SIZE) {
180 buf->sglen = npages;
197 sglist = vmalloc(buf->npages * sizeof(*sglist));
201 sg_init_table(sglist, buf->npages);
[all...]
/drivers/net/wireless/iwmc3200wifi/
H A Dtx.c103 u32 npages = BYTES_TO_PAGES(nb); local
105 if (npages <= iwm_tx_credit_get(&iwm->tx_credit, id))
194 u32 npages = BYTES_TO_PAGES(nb); local
205 iwm_tx_credit_dec(iwm, id, npages);
/drivers/gpu/ion/
H A Dion_system_heap.c256 int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; local
257 struct page **pages = vmalloc(sizeof(struct page *) * npages);
271 BUG_ON(i >= npages);
276 vaddr = vmap(pages, npages, VM_MAP, pgprot);
/drivers/gpu/drm/ttm/
H A Dttm_page_alloc.c70 * @npages: Number of pages in pool.
77 unsigned npages; member in struct:ttm_page_pool
276 static void ttm_pages_put(struct page *pages[], unsigned npages) argument
279 if (set_pages_array_wb(pages, npages))
280 pr_err("Failed to set %d pages to wb!\n", npages);
281 for (i = 0; i < npages; ++i)
288 pool->npages -= freed_pages;
386 total += _manager->pools[i].npages;
573 && count > pool->npages) {
591 pool->npages
656 ttm_put_pages(struct page **pages, unsigned npages, int flags, enum ttm_caching_state cstate) argument
704 ttm_get_pages(struct page **pages, unsigned npages, int flags, enum ttm_caching_state cstate) argument
[all...]
/drivers/infiniband/hw/cxgb4/
H A Dmem.c249 struct c4iw_mr *mhp, int shift, int npages)
254 if (npages > mhp->attr.pbl_size)
274 static int alloc_pbl(struct c4iw_mr *mhp, int npages) argument
277 npages << 3);
282 mhp->attr.pbl_size = npages;
289 u64 *total_size, int *npages,
326 *npages = 0;
328 *npages += (buffer_list[i].size +
331 if (!*npages)
334 *page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNE
248 reregister_mem(struct c4iw_dev *rhp, struct c4iw_pd *php, struct c4iw_mr *mhp, int shift, int npages) argument
287 build_phys_page_list(struct ib_phys_buf *buffer_list, int num_phys_buf, u64 *iova_start, u64 *total_size, int *npages, int *shift, __be64 **page_list) argument
366 int npages; local
427 int npages; local
[all...]
/drivers/infiniband/hw/ipath/
H A Dipath_user_sdma.c277 unsigned long addr, int tlen, int npages)
284 npages, 0, 1, pages, NULL);
286 if (ret != npages) {
296 for (j = 0; j < npages; j++) {
333 const int npages = ipath_user_sdma_num_pages(iov + idx); local
338 npages);
357 unsigned long niov, int npages)
361 if (npages >= ARRAY_SIZE(pkt->addr))
418 int npages = 0; local
494 npages
275 ipath_user_sdma_pin_pages(const struct ipath_devdata *dd, struct ipath_user_sdma_pkt *pkt, unsigned long addr, int tlen, int npages) argument
353 ipath_user_sdma_init_payload(const struct ipath_devdata *dd, struct ipath_user_sdma_queue *pq, struct ipath_user_sdma_pkt *pkt, const struct iovec *iov, unsigned long niov, int npages) argument
[all...]
/drivers/infiniband/hw/qib/
H A Dqib_user_sdma.c281 unsigned long addr, int tlen, int npages)
288 npages, 0, 1, pages, NULL);
290 if (ret != npages) {
300 for (j = 0; j < npages; j++) {
335 const int npages = qib_user_sdma_num_pages(iov + idx); local
339 iov[idx].iov_len, npages);
358 unsigned long niov, int npages)
362 if (npages >= ARRAY_SIZE(pkt->addr))
420 int npages = 0; local
495 npages
279 qib_user_sdma_pin_pages(const struct qib_devdata *dd, struct qib_user_sdma_pkt *pkt, unsigned long addr, int tlen, int npages) argument
354 qib_user_sdma_init_payload(const struct qib_devdata *dd, struct qib_user_sdma_queue *pq, struct qib_user_sdma_pkt *pkt, const struct iovec *iov, unsigned long niov, int npages) argument
[all...]
/drivers/iommu/
H A Damd_iommu_v2.c502 int npages, write; local
507 npages = get_user_pages(fault->state->task, fault->state->mm,
510 if (npages == 1) {

Completed in 546 milliseconds

12