/drivers/staging/lustre/lustre/include/linux/ |
H A D | lustre_patchless_compat.h | 47 #define ll_delete_from_page_cache(page) delete_from_page_cache(page) 50 truncate_complete_page(struct address_space *mapping, struct page *page) argument 52 if (page->mapping != mapping) 55 if (PagePrivate(page)) 56 page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE); 58 cancel_dirty_page(page, PAGE_SIZE); 59 ClearPageMappedToDisk(page); [all...] |
/drivers/staging/lustre/lustre/lov/ |
H A D | lovsub_page.c | 49 * Lovsub page operations. 63 struct cl_page *page, struct page *unused) 65 struct lovsub_page *lsb = cl_object_page_slice(obj, page); 67 cl_page_slice_add(page, &lsb->lsb_cl, obj, &lovsub_page_ops); 62 lovsub_page_init(const struct lu_env *env, struct cl_object *obj, struct cl_page *page, struct page *unused) argument
|
/drivers/staging/lustre/lustre/ptlrpc/ |
H A D | pers.c | 65 void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, struct page *page, argument 70 kiov->kiov_page = page;
|
/drivers/gpu/drm/armada/ |
H A D | armada_gem.h | 20 struct page *page; /* for page backed */ member in struct:armada_gem_object
|
/drivers/gpu/drm/radeon/ |
H A D | drm_buffer.c | 29 * Multipart buffer for coping data which is larger than the page size. 71 DRM_ERROR("Failed to allocate %dth page for drm" 116 " (%p) %dth page.\n", 159 int page = drm_buffer_page(buf); local 163 obj = &buf->data[page][idx]; 167 memcpy(stack_obj, &buf->data[page][idx], beginsz); 169 memcpy(stack_obj + beginsz, &buf->data[page + 1][0],
|
/drivers/hwmon/pmbus/ |
H A D | max16064.c | 31 static int max16064_read_word_data(struct i2c_client *client, int page, int reg) argument 37 ret = pmbus_read_word_data(client, page, 41 ret = pmbus_read_word_data(client, page, 55 static int max16064_write_word_data(struct i2c_client *client, int page, argument 62 ret = pmbus_write_word_data(client, page, 66 ret = pmbus_write_word_data(client, page,
|
H A D | pmbus.c | 31 * Find sensor groups and status registers on each page. 36 int page; local 38 /* Sensors detected on page 0 only */ 75 for (page = 0; page < info->pages; page++) { 76 if (pmbus_check_word_register(client, page, PMBUS_READ_VOUT)) { 77 info->func[page] |= PMBUS_HAVE_VOUT; 78 if (pmbus_check_byte_register(client, page, 80 info->func[page] | 109 int page; local [all...] |
H A D | max8688.c | 43 static int max8688_read_word_data(struct i2c_client *client, int page, int reg) argument 47 if (page) 73 static int max8688_write_word_data(struct i2c_client *client, int page, int reg, argument 99 static int max8688_read_byte_data(struct i2c_client *client, int page, int reg) argument 104 if (page > 0)
|
/drivers/infiniband/hw/ehca/ |
H A D | ehca_pd.c | 89 struct ipz_small_queue_page *page, *tmp; local 93 list_for_each_entry_safe(page, tmp, &my_pd->free[i], list) { 95 free_page(page->page); 96 kmem_cache_free(small_qp_cache, page);
|
/drivers/infiniband/hw/mlx4/ |
H A D | doorbell.c | 47 struct mlx4_ib_user_db_page *page; local 52 list_for_each_entry(page, &context->db_page_list, list) 53 if (page->user_virt == (virt & PAGE_MASK)) 56 page = kmalloc(sizeof *page, GFP_KERNEL); 57 if (!page) { 62 page->user_virt = (virt & PAGE_MASK); 63 page->refcnt = 0; 64 page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, 66 if (IS_ERR(page [all...] |
/drivers/infiniband/hw/mlx5/ |
H A D | doorbell.c | 49 struct mlx5_ib_user_db_page *page; local 54 list_for_each_entry(page, &context->db_page_list, list) 55 if (page->user_virt == (virt & PAGE_MASK)) 58 page = kmalloc(sizeof(*page), GFP_KERNEL); 59 if (!page) { 64 page->user_virt = (virt & PAGE_MASK); 65 page->refcnt = 0; 66 page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, 68 if (IS_ERR(page [all...] |
/drivers/media/dvb-frontends/ |
H A D | rtl2830_priv.h | 36 u8 page; /* active register page */ member in struct:rtl2830_priv
|
/drivers/gpu/drm/ |
H A D | drm_cache.c | 42 drm_clflush_page(struct page *page) argument 48 if (unlikely(page == NULL)) 51 page_virtual = kmap_atomic(page); 57 static void drm_cache_flush_clflush(struct page *pages[], 76 drm_clflush_pages(struct page *pages[], unsigned long num_pages) 91 struct page *page = pages[i]; 94 if (unlikely(page == NULL)) 97 page_virtual = kmap_atomic(page); [all...] |
H A D | drm_scatter.c | 52 struct page *page; local 56 page = entry->pagelist[i]; 57 if (page) 58 ClearPageReserved(page); 129 /* This also forces the mapping of COW pages, so our page list 152 /* Verify that each page points to its virtual address, and vice
|
/drivers/gpu/drm/i915/ |
H A D | i915_gem_render_state.c | 78 struct page *page; local 86 page = sg_page(so->obj->pages->sgl); 87 d = kmap(page); 109 kunmap(page);
|
/drivers/infiniband/hw/qib/ |
H A D | qib_user_pages.c | 39 static void __qib_release_user_pages(struct page **p, size_t num_pages, 55 struct page **p) 96 * have to bother with retries or mapping a dummy page to insure we 101 dma_addr_t qib_map_page(struct pci_dev *hwdev, struct page *page, argument 106 phys = pci_map_page(hwdev, page, offset, size, direction); 110 phys = pci_map_page(hwdev, page, offset, size, direction); 112 * FIXME: If we get 0 again, we should keep this page, 113 * map another, then free the 0 page. 122 * @start_page: the start page [all...] |
/drivers/md/ |
H A D | dm-sysfs.c | 22 char *page) 36 ret = dm_attr->show(md, page); 21 dm_attr_show(struct kobject *kobj, struct attribute *attr, char *page) argument
|
/drivers/auxdisplay/ |
H A D | ks0108.c | 101 void ks0108_page(unsigned char page) argument 103 ks0108_writedata(min(page,(unsigned char)7) | bit(3) | bit(4) | bit(5) | bit(7));
|
/drivers/gpu/drm/nouveau/core/engine/dmaobj/ |
H A D | nvd0.c | 89 u32 kind, page; local 100 nv_ioctl(parent, "create gf100 dma vers %d page %d kind %02x\n", 101 args->v0.version, args->v0.page, args->v0.kind); 103 page = args->v0.page; 108 page = GF110_DMA_V0_PAGE_SP; 111 page = GF110_DMA_V0_PAGE_LP; 116 if (page > 1) 118 priv->flags0 = (kind << 20) | (page << 6);
|
/drivers/gpu/drm/nouveau/core/subdev/vm/ |
H A D | nv41.c | 46 u32 page = PAGE_SIZE / NV41_GART_PAGE; local 48 while (cnt && page--) {
|
/drivers/gpu/drm/ttm/ |
H A D | ttm_agp_backend.c | 65 struct page *page = ttm->pages[i]; local 67 if (!page) 68 page = ttm->dummy_read_page; 70 mem->pages[mem->page_count++] = page; 116 struct page *dummy_read_page)
|
/drivers/infiniband/hw/ipath/ |
H A D | ipath_user_pages.c | 41 static void __ipath_release_user_pages(struct page **p, size_t num_pages, 57 struct page **p) 101 * have to bother with retries or mapping a dummy page to insure we 106 dma_addr_t ipath_map_page(struct pci_dev *hwdev, struct page *page, argument 111 phys = pci_map_page(hwdev, page, offset, size, direction); 115 phys = pci_map_page(hwdev, page, offset, size, direction); 117 * FIXME: If we get 0 again, we should keep this page, 118 * map another, then free the 0 page. 141 * FIXME: If we get 0 again, we should keep this page, [all...] |
/drivers/staging/android/ion/ |
H A D | ion_carveout_heap.c | 64 struct page *page = sg_page(table->sgl); local 65 ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page)); 113 struct page *page = sg_page(table->sgl); local 114 ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page)); 154 struct page *page; local 157 page = pfn_to_page(PFN_DOWN(heap_data->base)); 160 ion_pages_sync_for_device(NULL, page, siz [all...] |
H A D | ion_chunk_heap.c | 145 struct page *page; local 148 page = pfn_to_page(PFN_DOWN(heap_data->base)); 151 ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL); 153 ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
|
H A D | ion_page_pool.c | 29 struct page *page = alloc_pages(pool->gfp_mask, pool->order); local 31 if (!page) 33 ion_page_pool_alloc_set_cache_policy(pool, page); 35 ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order, 37 return page; 41 struct page *page) 43 ion_page_pool_free_set_cache_policy(pool, page); 44 __free_pages(page, poo 40 ion_page_pool_free_pages(struct ion_page_pool *pool, struct page *page) argument 47 ion_page_pool_add(struct ion_page_pool *pool, struct page *page) argument 63 struct page *page; local 81 struct page *page = NULL; local 98 ion_page_pool_free(struct ion_page_pool *pool, struct page *page) argument 109 ion_page_pool_free_immediate(struct ion_page_pool *pool, struct page *page) argument 139 struct page *page; local [all...] |