Searched refs:num_pages (Results 1 - 25 of 72) sorted by relevance

123

/drivers/infiniband/hw/qib/
H A Dqib_user_pages.c39 static void __qib_release_user_pages(struct page **p, size_t num_pages, argument
44 for (i = 0; i < num_pages; i++) {
54 static int __qib_get_user_pages(unsigned long start_page, size_t num_pages, argument
63 if (num_pages > lock_limit && !capable(CAP_IPC_LOCK)) {
68 for (got = 0; got < num_pages; got += ret) {
71 num_pages - got, 1, 1,
77 current->mm->pinned_vm += num_pages;
123 * @num_pages: the number of pages
128 * now, num_pages is always 1, but that will probably change at some point
132 int qib_get_user_pages(unsigned long start_page, size_t num_pages, argument
146 qib_release_user_pages(struct page **p, size_t num_pages) argument
[all...]
/drivers/infiniband/hw/ipath/
H A Dipath_user_pages.c41 static void __ipath_release_user_pages(struct page **p, size_t num_pages, argument
46 for (i = 0; i < num_pages; i++) {
48 (unsigned long) num_pages, p[i]);
56 static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages, argument
65 if (num_pages > lock_limit) {
71 (unsigned long) num_pages, start_page);
73 for (got = 0; got < num_pages; got += ret) {
76 num_pages - got, 1, 1,
82 current->mm->pinned_vm += num_pages;
152 * @num_pages
161 ipath_get_user_pages(unsigned long start_page, size_t num_pages, struct page **p) argument
175 ipath_release_user_pages(struct page **p, size_t num_pages) argument
189 unsigned long num_pages; member in struct:ipath_user_pages_work
204 ipath_release_user_pages_on_close(struct page **p, size_t num_pages) argument
[all...]
/drivers/gpu/drm/
H A Ddrm_cache.c51 unsigned long num_pages)
56 for (i = 0; i < num_pages; i++)
69 drm_clflush_pages(struct page *pages[], unsigned long num_pages) argument
74 drm_cache_flush_clflush(pages, num_pages);
83 for (i = 0; i < num_pages; i++) {
50 drm_cache_flush_clflush(struct page *pages[], unsigned long num_pages) argument
H A Ddrm_memory.c44 unsigned long i, num_pages = local
70 /* note: use vmalloc() because num_pages could be large... */
71 page_map = vmalloc(num_pages * sizeof(struct page *));
76 for (i = 0; i < num_pages; ++i)
78 addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
H A Ddrm_agpsupport.c436 unsigned long num_pages,
445 mem = agp_allocate_memory(dev->agp->bridge, num_pages,
449 num_pages);
453 for (i = 0; i < num_pages; i++)
455 mem->page_count = num_pages;
434 drm_agp_bind_pages(struct drm_device *dev, struct page **pages, unsigned long num_pages, uint32_t gtt_offset, u32 type) argument
/drivers/gpu/drm/vmwgfx/
H A Dvmwgfx_gmr.c36 unsigned long num_pages,
42 uint32_t remap_size = VMW_PPN_SIZE * num_pages + sizeof(remap_cmd) + 4;
52 define_cmd.numPages = num_pages;
58 remap_cmd.numPages = num_pages;
68 for (i = 0; i < num_pages; ++i) {
110 unsigned long num_pages)
123 while (likely(num_pages != 0)) {
146 while (likely(num_pages != 0)) {
156 desc_virtual->num_pages = cpu_to_le32(1);
159 le32_to_cpu(desc_virtual->num_pages);
34 vmw_gmr2_bind(struct vmw_private *dev_priv, struct page *pages[], unsigned long num_pages, int gmr_id) argument
108 vmw_gmr_build_descriptors(struct list_head *desc_pages, struct page *pages[], unsigned long num_pages) argument
219 vmw_gmr_count_descriptors(struct page *pages[], unsigned long num_pages) argument
236 vmw_gmr_bind(struct vmw_private *dev_priv, struct page *pages[], unsigned long num_pages, int gmr_id) argument
[all...]
H A Dvmwgfx_reg.h43 __le32 num_pages; member in struct:svga_guest_mem_descriptor
H A Dvmwgfx_gmrid_manager.c62 gman->used_gmr_pages += bo->num_pages;
86 mem->num_pages = bo->num_pages;
96 gman->used_gmr_pages -= bo->num_pages;
110 gman->used_gmr_pages -= mem->num_pages;
H A Dvmwgfx_dmabuf.c210 placement.lpfn = bo->num_pages;
225 bo->mem.start < bo->num_pages &&
/drivers/media/video/
H A Dvideobuf2-dma-sg.c49 buf->sg_desc.num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
51 buf->sg_desc.sglist = vzalloc(buf->sg_desc.num_pages *
55 sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages);
57 buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *),
62 for (i = 0; i < buf->sg_desc.num_pages; ++i) {
77 __func__, buf->sg_desc.num_pages);
96 int i = buf->sg_desc.num_pages;
100 buf->sg_desc.num_pages);
102 vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages);
129 buf->sg_desc.num_pages
[all...]
/drivers/gpu/drm/ttm/
H A Dttm_tt.c53 ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*));
58 ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, sizeof(void*));
59 ttm->dma_address = drm_calloc_large(ttm->ttm.num_pages,
120 drm_clflush_pages(ttm->pages, ttm->num_pages);
122 for (i = 0; i < ttm->num_pages; ++i) {
191 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
223 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
303 for (i = 0; i < ttm->num_pages; ++i) {
349 ttm->num_pages << PAGE_SHIFT,
360 for (i = 0; i < ttm->num_pages;
[all...]
H A Dttm_bo_util.c357 add = new_mem->num_pages - 1;
360 for (i = 0; i < new_mem->num_pages; ++i) {
507 unsigned long num_pages,
522 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
540 map->virtual = vmap(ttm->pages + start_page, num_pages,
547 unsigned long start_page, unsigned long num_pages,
558 if (num_pages > bo->num_pages)
560 if (start_page > bo->num_pages)
563 if (num_pages >
505 ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, unsigned long start_page, unsigned long num_pages, struct ttm_bo_kmap_obj *map) argument
546 ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, unsigned long num_pages, struct ttm_bo_kmap_obj *map) argument
[all...]
H A Dttm_agp_backend.c59 mem = agp_allocate_memory(agp_be->bridge, ttm->num_pages, AGP_USER_MEMORY);
64 for (i = 0; i < ttm->num_pages; i++) {
H A Dttm_bo_manager.c71 mem->num_pages, mem->page_alignment,
77 node = drm_mm_get_block_atomic_range(node, mem->num_pages,
H A Dttm_bo_vm.c45 unsigned long num_pages)
67 if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
68 (page_start + num_pages)))
153 if (unlikely(page_offset >= bo->num_pages)) {
347 if (unlikely(kmap_offset >= bo->num_pages)) {
353 io_size = bo->num_pages - kmap_offset;
418 if (unlikely(kmap_offset >= bo->num_pages))
422 io_size = bo->num_pages - kmap_offset;
43 ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev, unsigned long page_start, unsigned long num_pages) argument
/drivers/virtio/
H A Dvirtio_balloon.c53 unsigned int num_pages; member in struct:virtio_balloon
57 * to num_pages above.
146 vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE;
181 vb->num_pages -= VIRTIO_BALLOON_PAGES_PER_PAGE;
272 offsetof(struct virtio_balloon_config, num_pages),
275 return target - vb->num_pages;
280 __le32 actual = cpu_to_le32(vb->num_pages);
359 vb->num_pages = 0;
391 while (vb->num_pages)
392 leak_balloon(vb, vb->num_pages);
[all...]
/drivers/gpu/drm/gma500/
H A Dmmu.c139 struct page *page[], unsigned long num_pages)
146 for (i = 0; i < num_pages; i++)
455 int num_pages = gtt_pages; local
468 /*ttm_tt_cache_flush(&pd->p, num_pages);*/
469 psb_pages_clflush(pd->driver, &pd->p, num_pages);
549 unsigned long address, uint32_t num_pages,
565 /*ttm_tt_cache_flush(&pd->p, num_pages);*/
566 psb_pages_clflush(pd->driver, &pd->p, num_pages);
571 rows = num_pages / desired_tile_stride;
573 desired_tile_stride = num_pages;
138 psb_pages_clflush(struct psb_mmu_driver *driver, struct page *page[], unsigned long num_pages) argument
548 psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address, uint32_t num_pages, uint32_t desired_tile_stride, uint32_t hw_tile_stride) argument
602 psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd, unsigned long address, uint32_t num_pages) argument
641 psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address, uint32_t num_pages, uint32_t desired_tile_stride, uint32_t hw_tile_stride) argument
697 psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn, unsigned long address, uint32_t num_pages, int type) argument
742 psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages, unsigned long address, uint32_t num_pages, uint32_t desired_tile_stride, uint32_t hw_tile_stride, int type) argument
[all...]
/drivers/scsi/be2iscsi/
H A Dbe_cmds.c538 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
612 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
672 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
769 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
813 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
827 u32 page_offset, u32 num_pages)
834 u32 temp_num_pages = num_pages;
836 if (num_pages == 0xff)
837 num_pages = 1;
848 req->num_pages
825 be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem, u32 page_offset, u32 num_pages) argument
[all...]
/drivers/gpu/drm/radeon/
H A Dradeon_object.h78 return bo->tbo.num_pages << PAGE_SHIFT;
88 return (bo->tbo.num_pages << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
/drivers/net/ethernet/8390/
H A Dsmc-mca.c128 unsigned char num_pages; member in struct:__anon1856
206 unsigned char reg4, num_pages; local
296 num_pages = 40;
307 num_pages = mem_table[i].num_pages;
319 num_pages = 0x40;
331 num_pages = 0x20 + (2 * (pos3 & 0x10));
384 ei_status.stop_page = num_pages;
/drivers/gpu/drm/via/
H A Dvia_dmablit.h42 unsigned long num_pages; member in struct:_drm_via_sg_info
/drivers/gpu/drm/nouveau/
H A Dnouveau_sgdma.c44 for (i = 0; i < ttm->num_pages; i++) {
72 for (i = 0; i < ttm->num_pages; i++) {
106 u32 cnt = ttm->num_pages;
126 u32 cnt = ttm->num_pages;
149 nv_wr32(dev, 0x100814, (ttm->num_pages - 1) << 12);
215 u32 cnt = ttm->num_pages;
254 u32 cnt = ttm->num_pages;
H A Dnouveau_bo.c116 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
156 nvbo->bo.mem.num_pages < vram_pages / 4) {
277 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
493 u32 page_count = new_mem->num_pages;
496 page_count = new_mem->num_pages;
531 u64 length = (new_mem->num_pages << PAGE_SHIFT);
630 u32 page_count = new_mem->num_pages;
641 page_count = new_mem->num_pages;
677 ret = nouveau_vm_get(chan->vm, mem->num_pages << PAGE_SHIFT,
685 nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIF
[all...]
/drivers/scsi/bfa/
H A Dbfa_fcbuild.c671 int num_pages = 0; local
677 num_pages = (be16_to_cpu(prlo->payload_len) - 4) / 16;
680 num_pages = (be16_to_cpu(tprlo->payload_len) - 4) / 16;
682 return num_pages;
687 u32 d_id, u32 s_id, __be16 ox_id, int num_pages)
693 memset(tprlo_acc, 0, (num_pages * 16) + 4);
697 tprlo_acc->payload_len = cpu_to_be16((num_pages * 16) + 4);
699 for (page = 0; page < num_pages; page++) {
711 u32 s_id, __be16 ox_id, int num_pages)
717 memset(prlo_acc, 0, (num_pages * 1
686 fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc, u32 d_id, u32 s_id, __be16 ox_id, int num_pages) argument
710 fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc, u32 d_id, u32 s_id, __be16 ox_id, int num_pages) argument
882 fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id, int num_pages) argument
909 int num_pages = 0; local
940 fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id, int num_pages, enum fc_tprlo_type tprlo_type, u32 tpr_id) argument
973 int num_pages = 0; local
[all...]
/drivers/virt/
H A Dfsl_hypervisor.c154 unsigned int num_pages; local
217 num_pages = (param.count + lb_offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
225 pages = kzalloc(num_pages * sizeof(struct page *), GFP_KERNEL);
235 sg_list_unaligned = kmalloc(num_pages * sizeof(struct fh_sg_list) +
247 param.local_vaddr - lb_offset, num_pages,
252 if (num_pinned != num_pages) {
275 for (i = 1; i < num_pages; i++) {
292 virt_to_phys(sg_list), num_pages);
296 for (i = 0; i < num_pages; i++)

Completed in 467 milliseconds

123