Searched refs:page_list (Results 1 - 25 of 64) sorted by relevance

123

/drivers/infiniband/hw/mthca/
H A Dmthca_allocator.c122 if (array->page_list[p].page)
123 return array->page_list[p].page[index & MTHCA_ARRAY_MASK];
133 if (!array->page_list[p].page)
134 array->page_list[p].page = (void **) get_zeroed_page(GFP_ATOMIC);
136 if (!array->page_list[p].page)
139 array->page_list[p].page[index & MTHCA_ARRAY_MASK] = value;
140 ++array->page_list[p].used;
149 if (--array->page_list[p].used == 0) {
150 free_page((unsigned long) array->page_list[p].page);
151 array->page_list[
[all...]
H A Dmthca_eq.c231 return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE;
482 eq->page_list = kmalloc(npages * sizeof *eq->page_list,
484 if (!eq->page_list)
488 eq->page_list[i].buf = NULL;
500 eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev,
502 if (!eq->page_list[i].buf)
506 dma_unmap_addr_set(&eq->page_list[i], mapping, t);
508 clear_page(eq->page_list[i].buf);
572 if (eq->page_list[
[all...]
H A Dmthca_provider.h54 struct mthca_buf_list *page_list; member in union:mthca_buf
114 struct mthca_buf_list *page_list; member in struct:mthca_eq
/drivers/net/ethernet/mellanox/mlx5/core/
H A Dalloc.c78 buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
80 if (!buf->page_list)
84 buf->page_list[i].buf =
87 if (!buf->page_list[i].buf)
90 buf->page_list[i].map = t;
99 pages[i] = virt_to_page(buf->page_list[i].buf);
128 if (buf->page_list[i].buf)
130 buf->page_list[i].buf,
131 buf->page_list[
[all...]
/drivers/infiniband/core/
H A Dfmr_pool.c115 u64 *page_list,
125 bucket = pool->cache_bucket + ib_fmr_hash(*page_list);
130 !memcmp(page_list, fmr->page_list,
131 page_list_len * sizeof *page_list))
428 * @page_list:List of pages to map
429 * @list_len:Number of pages in @page_list
435 u64 *page_list,
449 page_list,
474 result = ib_map_phys_fmr(fmr->fmr, page_list, list_le
114 ib_fmr_cache_lookup(struct ib_fmr_pool *pool, u64 *page_list, int page_list_len, u64 io_virtual_address) argument
434 ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle, u64 *page_list, int list_len, u64 io_virtual_address) argument
[all...]
H A Dumem.c82 struct page **page_list; local
121 page_list = (struct page **) __get_free_page(GFP_KERNEL);
122 if (!page_list) {
165 1, !umem->writable, page_list, vma_list);
178 sg_set_page(sg, page_list[i], PAGE_SIZE, 0);
210 free_page((unsigned long) page_list);
/drivers/net/ethernet/mellanox/mlx4/
H A Dalloc.c202 buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
204 if (!buf->page_list)
208 buf->page_list[i].buf =
211 if (!buf->page_list[i].buf)
214 buf->page_list[i].map = t;
216 memset(buf->page_list[i].buf, 0, PAGE_SIZE);
225 pages[i] = virt_to_page(buf->page_list[i].buf);
254 if (buf->page_list[i].buf)
256 buf->page_list[
[all...]
H A Dmr.c703 int start_index, int npages, u64 *page_list)
720 mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
729 int start_index, int npages, u64 *page_list)
744 err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list);
749 page_list += chunk;
757 int start_index, int npages, u64 *page_list)
780 inbox[i + 2] = cpu_to_be64(page_list[i] |
790 page_list += chunk;
796 return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list);
803 u64 *page_list; local
702 mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int start_index, int npages, u64 *page_list) argument
728 __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int start_index, int npages, u64 *page_list) argument
756 mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int start_index, int npages, u64 *page_list) argument
980 mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list, int npages, u64 iova) argument
1007 mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, int npages, u64 iova, u32 *lkey, u32 *rkey) argument
[all...]
H A Den_resources.c93 pages[i] = virt_to_page(buf->page_list[i].buf);
H A Deq.c116 return eq->page_list[offset / PAGE_SIZE].buf + (offset + (eqe_factor ? MLX4_EQ_ENTRY_SIZE : 0)) % PAGE_SIZE;
906 eq->page_list = kmalloc(npages * sizeof *eq->page_list,
908 if (!eq->page_list)
912 eq->page_list[i].buf = NULL;
924 eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev,
926 if (!eq->page_list[i].buf)
930 eq->page_list[i].map = t;
932 memset(eq->page_list[i].buf, 0, PAGE_SIZE);
984 if (eq->page_list[
[all...]
/drivers/infiniband/hw/usnic/
H A Dusnic_uiom.c52 ((PAGE_SIZE - offsetof(struct usnic_uiom_chunk, page_list)) /\
53 ((void *) &((struct usnic_uiom_chunk *) 0)->page_list[1] - \
54 (void *) &((struct usnic_uiom_chunk *) 0)->page_list[0]))
88 for_each_sg(chunk->page_list, sg, chunk->nents, i) {
103 struct page **page_list; local
125 page_list = (struct page **) __get_free_page(GFP_KERNEL);
126 if (!page_list)
150 1, !writable, page_list, NULL);
169 sg_init_table(chunk->page_list, chunk->nents);
170 for_each_sg(chunk->page_list, s
[all...]
H A Dusnic_uiom.h64 struct scatterlist page_list[0]; member in struct:usnic_uiom_chunk
/drivers/md/
H A Ddm-kcopyd.c41 struct page_list *pages;
72 static struct page_list zero_page_list;
193 static struct page_list *alloc_pl(gfp_t gfp)
195 struct page_list *pl;
210 static void free_pl(struct page_list *pl)
220 static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl)
222 struct page_list *next;
240 unsigned int nr, struct page_list **pages)
242 struct page_list *pl;
271 static void drop_pages(struct page_list *p
[all...]
H A Ddm-io.c178 struct page_list *pl = (struct page_list *) dp->context_ptr;
187 struct page_list *pl = (struct page_list *) dp->context_ptr;
192 static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
/drivers/misc/genwqe/
H A Dcard_utils.c249 struct page **page_list, int num_pages,
260 daddr = pci_map_page(pci_dev, page_list[i],
519 static int free_user_pages(struct page **page_list, unsigned int nr_pages, argument
525 if (page_list[i] != NULL) {
527 set_page_dirty_lock(page_list[i]);
528 put_page(page_list[i]);
549 * page_list and pci_alloc_consistent for the sg_list.
551 * be fixed with some effort. The page_list must be split into
571 /* determine space needed for page_list. */
576 m->page_list
248 genwqe_map_pages(struct genwqe_dev *cd, struct page **page_list, int num_pages, dma_addr_t *dma_list) argument
[all...]
/drivers/infiniband/hw/cxgb3/
H A Diwch_mem.c140 __be64 **page_list)
184 *page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNEL);
185 if (!*page_list)
193 (*page_list)[n++] = cpu_to_be64(buffer_list[i].addr +
134 build_phys_page_list(struct ib_phys_buf *buffer_list, int num_phys_buf, u64 *iova_start, u64 *total_size, int *npages, int *shift, __be64 **page_list) argument
H A Diwch_provider.c478 __be64 *page_list; local
510 &total_size, &npages, &shift, &page_list);
516 kfree(page_list);
520 ret = iwch_write_pbl(mhp, page_list, npages, 0);
521 kfree(page_list);
560 __be64 *page_list = NULL; local
590 &shift, &page_list);
596 kfree(page_list);
840 struct ib_fast_reg_page_list *page_list; local
842 page_list
853 iwch_free_fastreg_pbl(struct ib_fast_reg_page_list *page_list) argument
[all...]
/drivers/infiniband/hw/mlx4/
H A Dmr.c401 mfrpl->ibfrpl.page_list = kmalloc(size, GFP_KERNEL);
402 if (!mfrpl->ibfrpl.page_list)
416 kfree(mfrpl->ibfrpl.page_list);
421 void mlx4_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list) argument
423 struct mlx4_ib_dev *dev = to_mdev(page_list->device);
424 struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list);
425 int size = page_list->max_page_list_len * sizeof (u64);
429 kfree(mfrpl->ibfrpl.page_list);
467 int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, argument
473 return mlx4_map_phys_fmr(dev->dev, &ifmr->mfmr, page_list, npage
[all...]
/drivers/infiniband/hw/qib/
H A Dqib_keys.c346 u64 *page_list; local
371 page_list = wr->wr.fast_reg.page_list->page_list;
375 mr->map[m]->segs[n].vaddr = (void *) page_list[i];
H A Dqib_mr.c354 pl->page_list = kzalloc(size, GFP_KERNEL);
355 if (!pl->page_list)
367 kfree(pl->page_list);
429 * @page_list: the list of pages to associate with the fast memory region
436 int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, argument
463 fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i];
/drivers/gpu/drm/ttm/
H A Dttm_page_alloc_dma.c123 * @page_list: The link to the 'page_list' in 'struct dma_pool'.
129 struct list_head page_list; member in struct:dma_page
389 list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
390 list_del(&d_page->page_list);
402 list_del(&d_page->page_list);
448 page_list) {
453 list_move(&dma_p->page_list, &d_pages);
686 list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
690 list_del(&d_page->page_list);
[all...]
/drivers/staging/comedi/
H A Dcomedi_buf.c38 if (bm->page_list) {
40 buf = &bm->page_list[i];
54 vfree(bm->page_list);
111 bm->page_list = vzalloc(sizeof(*buf) * n_pages);
112 if (bm->page_list)
119 buf = &bm->page_list[i];
/drivers/infiniband/hw/cxgb4/
H A Dmem.c428 int *shift, __be64 **page_list)
472 *page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNEL);
473 if (!*page_list)
481 (*page_list)[n++] = cpu_to_be64(buffer_list[i].addr +
501 __be64 *page_list = NULL; local
534 &shift, &page_list);
540 kfree(page_list);
562 __be64 *page_list; local
595 &page_list);
601 kfree(page_list);
425 build_phys_page_list(struct ib_phys_buf *buffer_list, int num_phys_buf, u64 *iova_start, u64 *total_size, int *npages, int *shift, __be64 **page_list) argument
[all...]
/drivers/infiniband/hw/ehca/
H A Dehca_mrmw.h107 u64 *page_list,
/drivers/infiniband/hw/ocrdma/
H A Docrdma_verbs.h97 void ocrdma_free_frmr_page_list(struct ib_fast_reg_page_list *page_list);

Completed in 423 milliseconds

123