/drivers/dma/ |
H A D | iovlock.c | 71 /* single kmalloc for pinned list, page_list[], and the page arrays */ 79 pages = (struct page **) &local_list->page_list[nr_iovecs]; 84 struct dma_page_list *page_list = &local_list->page_list[i]; local 91 page_list->nr_pages = num_pages_spanned(&iov[i]); 92 page_list->base_address = iov[i].iov_base; 94 page_list->pages = pages; 95 pages += page_list->nr_pages; 103 page_list->nr_pages, 106 page_list 132 struct dma_page_list *page_list = &pinned_list->page_list[i]; local 165 struct dma_page_list *page_list; local 234 struct dma_page_list *page_list; local [all...] |
/drivers/infiniband/hw/mthca/ |
H A D | mthca_allocator.c | 122 if (array->page_list[p].page) 123 return array->page_list[p].page[index & MTHCA_ARRAY_MASK]; 133 if (!array->page_list[p].page) 134 array->page_list[p].page = (void **) get_zeroed_page(GFP_ATOMIC); 136 if (!array->page_list[p].page) 139 array->page_list[p].page[index & MTHCA_ARRAY_MASK] = value; 140 ++array->page_list[p].used; 149 if (--array->page_list[p].used == 0) { 150 free_page((unsigned long) array->page_list[p].page); 151 array->page_list[ [all...] |
H A D | mthca_eq.c | 231 return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE; 482 eq->page_list = kmalloc(npages * sizeof *eq->page_list, 484 if (!eq->page_list) 488 eq->page_list[i].buf = NULL; 500 eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev, 502 if (!eq->page_list[i].buf) 506 dma_unmap_addr_set(&eq->page_list[i], mapping, t); 508 clear_page(eq->page_list[i].buf); 572 if (eq->page_list[ [all...] |
H A D | mthca_provider.h | 54 struct mthca_buf_list *page_list; member in union:mthca_buf 114 struct mthca_buf_list *page_list; member in struct:mthca_eq
|
H A D | mthca_mr.c | 689 static inline int mthca_check_fmr(struct mthca_fmr *fmr, u64 *page_list, argument 703 /* Trust the user not to pass misaligned data in page_list */ 706 if (page_list[i] & ~page_mask) 717 int mthca_tavor_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, argument 726 err = mthca_check_fmr(fmr, page_list, list_len, iova); 739 __be64 mtt_entry = cpu_to_be64(page_list[i] | 758 int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, argument 766 err = mthca_check_fmr(fmr, page_list, list_len, iova); 787 fmr->mem.arbel.mtts[i] = cpu_to_be64(page_list[i] |
|
/drivers/infiniband/core/ |
H A D | umem.c | 46 ((PAGE_SIZE - offsetof(struct ib_umem_chunk, page_list)) / \ 47 ((void *) &((struct ib_umem_chunk *) 0)->page_list[1] - \ 48 (void *) &((struct ib_umem_chunk *) 0)->page_list[0])) 56 ib_dma_unmap_sg(dev, chunk->page_list, 59 struct page *page = sg_page(&chunk->page_list[i]); 82 struct page **page_list; local 122 page_list = (struct page **) __get_free_page(GFP_KERNEL); 123 if (!page_list) { 155 1, !umem->writable, page_list, vma_list); 175 sg_init_table(chunk->page_list, chun [all...] |
H A D | fmr_pool.c | 115 u64 *page_list, 126 bucket = pool->cache_bucket + ib_fmr_hash(*page_list); 131 !memcmp(page_list, fmr->page_list, 132 page_list_len * sizeof *page_list)) 429 * @page_list:List of pages to map 430 * @list_len:Number of pages in @page_list 436 u64 *page_list, 450 page_list, 475 result = ib_map_phys_fmr(fmr->fmr, page_list, list_le 114 ib_fmr_cache_lookup(struct ib_fmr_pool *pool, u64 *page_list, int page_list_len, u64 io_virtual_address) argument 435 ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle, u64 *page_list, int list_len, u64 io_virtual_address) argument [all...] |
H A D | verbs.c | 1077 struct ib_fast_reg_page_list *page_list; local 1082 page_list = device->alloc_fast_reg_page_list(device, max_page_list_len); 1084 if (!IS_ERR(page_list)) { 1085 page_list->device = device; 1086 page_list->max_page_list_len = max_page_list_len; 1089 return page_list; 1093 void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list) argument 1095 page_list->device->free_fast_reg_page_list(page_list);
|
/drivers/infiniband/hw/mlx4/ |
H A D | mr.c | 97 len = sg_dma_len(&chunk->page_list[j]) >> mtt->page_shift; 99 pages[i++] = sg_dma_address(&chunk->page_list[j]) + 236 mfrpl->ibfrpl.page_list = kmalloc(size, GFP_KERNEL); 237 if (!mfrpl->ibfrpl.page_list) 251 kfree(mfrpl->ibfrpl.page_list); 256 void mlx4_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list) argument 258 struct mlx4_ib_dev *dev = to_mdev(page_list->device); 259 struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list); 260 int size = page_list->max_page_list_len * sizeof (u64); 264 kfree(mfrpl->ibfrpl.page_list); 302 mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, int npages, u64 iova) argument [all...] |
H A D | doorbell.c | 77 db->dma = sg_dma_address(chunk->page_list) + (virt & ~PAGE_MASK);
|
/drivers/net/ethernet/mellanox/mlx4/ |
H A D | alloc.c | 199 buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list), 201 if (!buf->page_list) 205 buf->page_list[i].buf = 208 if (!buf->page_list[i].buf) 211 buf->page_list[i].map = t; 213 memset(buf->page_list[i].buf, 0, PAGE_SIZE); 222 pages[i] = virt_to_page(buf->page_list[i].buf); 251 if (buf->page_list[i].buf) 253 buf->page_list[ [all...] |
H A D | mr.c | 530 int start_index, int npages, u64 *page_list) 547 mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); 556 int start_index, int npages, u64 *page_list) 571 err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list); 576 page_list += chunk; 584 int start_index, int npages, u64 *page_list) 607 inbox[i + 2] = cpu_to_be64(page_list[i] | 617 page_list += chunk; 623 return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list); 630 u64 *page_list; local 529 mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int start_index, int npages, u64 *page_list) argument 555 __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int start_index, int npages, u64 *page_list) argument 583 mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int start_index, int npages, u64 *page_list) argument 713 mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list, int npages, u64 iova) argument 740 mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, int npages, u64 iova, u32 *lkey, u32 *rkey) argument [all...] |
H A D | en_resources.c | 80 pages[i] = virt_to_page(buf->page_list[i].buf);
|
H A D | eq.c | 97 return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE; 639 eq->page_list = kmalloc(npages * sizeof *eq->page_list, 641 if (!eq->page_list) 645 eq->page_list[i].buf = NULL; 657 eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev, 659 if (!eq->page_list[i].buf) 663 eq->page_list[i].map = t; 665 memset(eq->page_list[i].buf, 0, PAGE_SIZE); 718 if (eq->page_list[ [all...] |
/drivers/md/ |
H A D | dm-kcopyd.c | 40 struct page_list *pages; 69 static struct page_list zero_page_list; 79 static struct page_list *alloc_pl(gfp_t gfp) 81 struct page_list *pl; 96 static void free_pl(struct page_list *pl) 106 static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl) 108 struct page_list *next; 126 unsigned int nr, struct page_list **pages) 128 struct page_list *pl; 157 static void drop_pages(struct page_list *p [all...] |
H A D | dm-io.c | 180 struct page_list *pl = (struct page_list *) dp->context_ptr; 189 struct page_list *pl = (struct page_list *) dp->context_ptr; 194 static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
|
/drivers/infiniband/hw/cxgb3/ |
H A D | iwch_mem.c | 140 __be64 **page_list) 184 *page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNEL); 185 if (!*page_list) 193 (*page_list)[n++] = cpu_to_be64(buffer_list[i].addr + 134 build_phys_page_list(struct ib_phys_buf *buffer_list, int num_phys_buf, u64 *iova_start, u64 *total_size, int *npages, int *shift, __be64 **page_list) argument
|
H A D | iwch_provider.c | 477 __be64 *page_list; local 509 &total_size, &npages, &shift, &page_list); 515 kfree(page_list); 519 ret = iwch_write_pbl(mhp, page_list, npages, 0); 520 kfree(page_list); 559 __be64 *page_list = NULL; local 589 &shift, &page_list); 595 kfree(page_list); 665 len = sg_dma_len(&chunk->page_list[j]) >> shift; 668 &chunk->page_list[ 841 struct ib_fast_reg_page_list *page_list; local 854 iwch_free_fastreg_pbl(struct ib_fast_reg_page_list *page_list) argument [all...] |
/drivers/infiniband/hw/qib/ |
H A D | qib_keys.c | 321 u64 *page_list; local 344 page_list = wr->wr.fast_reg.page_list->page_list; 348 mr->map[m]->segs[n].vaddr = (void *) page_list[i];
|
H A D | qib_mr.c | 242 vaddr = page_address(sg_page(&chunk->page_list[i])); 329 pl->page_list = kmalloc(size, GFP_KERNEL); 330 if (!pl->page_list) 342 kfree(pl->page_list); 414 * @page_list: the list of pages to associate with the fast memory region 421 int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, argument 447 fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i];
|
/drivers/infiniband/hw/cxgb4/ |
H A D | mem.c | 290 int *shift, __be64 **page_list) 334 *page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNEL); 335 if (!*page_list) 343 (*page_list)[n++] = cpu_to_be64(buffer_list[i].addr + 363 __be64 *page_list = NULL; local 396 &shift, &page_list); 402 kfree(page_list); 424 __be64 *page_list; local 457 &page_list); 463 kfree(page_list); 287 build_phys_page_list(struct ib_phys_buf *buffer_list, int num_phys_buf, u64 *iova_start, u64 *total_size, int *npages, int *shift, __be64 **page_list) argument [all...] |
/drivers/gpu/drm/ttm/ |
H A D | ttm_page_alloc_dma.c | 122 * @page_list: The link to the 'page_list' in 'struct dma_pool'. 128 struct list_head page_list; member in struct:dma_page 388 list_for_each_entry_safe(d_page, tmp, d_pages, page_list) { 389 list_del(&d_page->page_list); 401 list_del(&d_page->page_list); 446 page_list) { 451 list_move(&dma_p->page_list, &d_pages); 684 list_for_each_entry_safe(d_page, tmp, d_pages, page_list) { 688 list_del(&d_page->page_list); [all...] |
/drivers/infiniband/hw/ehca/ |
H A D | ehca_mrmw.h | 107 u64 *page_list,
|
/drivers/infiniband/hw/amso1100/ |
H A D | c2_provider.c | 337 u64 *page_list; local 370 page_list = vmalloc(sizeof(u64) * pbl_depth); 371 if (!page_list) { 372 pr_debug("couldn't vmalloc page_list of size %zd\n", 384 page_list[j++] = (buffer_list[i].addr + 390 vfree(page_list); 400 (unsigned long long) page_list[0], 401 (unsigned long long) page_list[pbl_depth-1]); 402 err = c2_nsmr_register_phys_kern(to_c2dev(ib_pd->device), page_list, 406 vfree(page_list); [all...] |
/drivers/infiniband/hw/ipath/ |
H A D | ipath_mr.c | 231 vaddr = page_address(sg_page(&chunk->page_list[i])); 345 * @page_list: the list of pages to associate with the fast memory region 352 int ipath_map_phys_fmr(struct ib_fmr *ibfmr, u64 * page_list, argument 376 fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i];
|