Searched refs:page (Results 51 - 75 of 783) sorted by relevance

1234567891011>>

/drivers/char/agp/
H A Dbackend.c145 struct page *page = bridge->driver->agp_alloc_page(bridge); local
147 if (!page) {
149 "can't get memory for scratch page\n");
153 bridge->scratch_page_page = page;
154 bridge->scratch_page_dma = page_to_phys(page);
197 struct page *page = bridge->scratch_page_page; local
199 bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_UNMAP);
200 bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_FRE
224 struct page *page = bridge->scratch_page_page; local
[all...]
H A Dgeneric.c57 * They use the basic page allocation routines to do the brunt of the work.
84 * Use kmalloc if possible for the page list. Otherwise fall back to
115 unsigned long alloc_size = num_agp_pages*sizeof(struct page *);
117 if (INT_MAX/sizeof(struct page *) < num_agp_pages)
280 struct page *page = bridge->driver->agp_alloc_page(bridge); local
282 if (page == NULL) {
286 new->pages[i] = page;
873 struct page *page; local
992 struct page *page; local
1208 struct page * page; local
1238 struct page * page; local
1255 struct page *page; local
1278 agp_generic_destroy_page(struct page *page, int flags) argument
[all...]
/drivers/net/ethernet/sfc/
H A Drx.c33 /* Number of RX buffers to recycle pages for. When creating the RX page recycle
34 * ring, this number is divided by the number of buffers per page to calculate
35 * the number of pages to store in the RX page recycle ring.
62 return page_address(buf->page) + buf->page_offset;
108 /* Check the RX page recycle ring for a page that can be reused. */
109 static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
112 struct page *page; local
117 page
156 struct page *page; local
216 struct page *page = rx_buf->page; local
242 struct page *page = rx_buf->page; local
798 struct page *page = rx_queue->page_ring[i]; local
[all...]
/drivers/s390/char/
H A Dsclp_con.c50 void *page; local
53 page = sclp_unmake_buffer(buffer);
58 list_add_tail((struct list_head *) page, &sclp_con_pages);
140 void *page; local
152 page = sclp_unmake_buffer(buffer);
153 list_add_tail((struct list_head *) page, &sclp_con_pages);
165 void *page; local
189 page = sclp_con_pages.next;
190 list_del((struct list_head *) page);
191 sclp_conbuf = sclp_make_buffer(page, sclp_con_column
318 void *page; local
[all...]
/drivers/gpu/drm/nouveau/core/engine/dmaobj/
H A Dnvd0.c89 u32 kind, page; local
100 nv_ioctl(parent, "create gf100 dma vers %d page %d kind %02x\n",
101 args->v0.version, args->v0.page, args->v0.kind);
103 page = args->v0.page;
108 page = GF110_DMA_V0_PAGE_SP;
111 page = GF110_DMA_V0_PAGE_LP;
116 if (page > 1)
118 priv->flags0 = (kind << 20) | (page << 6);
/drivers/gpu/drm/exynos/
H A Dexynos_drm_gem.h32 * @sgt: sg table to transfer page data.
43 struct page **pages;
61 * in page unit.
76 struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
142 /* page fault handler and mmap fault address(virtual) to physical memory. */
162 struct page **pages,
166 void exynos_gem_put_pages_to_userptr(struct page **pages,
/drivers/gpu/drm/nouveau/
H A Dnouveau_ttm.h16 struct page *dummy_read_page);
/drivers/gpu/drm/radeon/
H A Ddrm_buffer.c29 * Multipart buffer for coping data which is larger than the page size.
71 DRM_ERROR("Failed to allocate %dth page for drm"
116 " (%p) %dth page.\n",
159 int page = drm_buffer_page(buf); local
163 obj = &buf->data[page][idx];
167 memcpy(stack_obj, &buf->data[page][idx], beginsz);
169 memcpy(stack_obj + beginsz, &buf->data[page + 1][0],
/drivers/xen/
H A Dbiomerge.c4 #include <xen/page.h>
H A Dprivcmd.c36 #include <xen/page.h>
69 struct page *p, *n;
98 struct page *page = alloc_page(GFP_KERNEL); local
101 if (page == NULL)
104 pagedata = page_address(page);
106 list_add_tail(&page->lru, pagelist);
144 struct page *page; local
146 page
222 struct page *page = list_first_entry(&pagelist, local
[all...]
/drivers/hwmon/pmbus/
H A Dmax34440.c53 static int max34440_read_word_data(struct i2c_client *client, int page, int reg) argument
61 ret = pmbus_read_word_data(client, page,
65 ret = pmbus_read_word_data(client, page,
71 ret = pmbus_read_word_data(client, page,
75 ret = pmbus_read_word_data(client, page,
81 ret = pmbus_read_word_data(client, page,
87 ret = pmbus_read_word_data(client, page,
94 ret = pmbus_read_word_data(client, page,
98 ret = pmbus_read_word_data(client, page,
118 static int max34440_write_word_data(struct i2c_client *client, int page, argument
165 max34440_read_byte_data(struct i2c_client *client, int page, int reg) argument
[all...]
H A Dpmbus.h344 u32 func[PMBUS_PAGES]; /* Functionality, per page */
356 int (*read_byte_data)(struct i2c_client *client, int page, int reg);
357 int (*read_word_data)(struct i2c_client *client, int page, int reg);
358 int (*write_word_data)(struct i2c_client *client, int page, int reg,
360 int (*write_byte)(struct i2c_client *client, int page, u8 value);
373 int pmbus_set_page(struct i2c_client *client, u8 page);
374 int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg);
375 int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, u16 word);
376 int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg);
377 int pmbus_write_byte(struct i2c_client *client, int page, u
[all...]
/drivers/staging/android/ion/
H A Dion_heap.c37 struct page **pages = vmalloc(sizeof(struct page *) * npages);
38 struct page **tmp = pages;
50 struct page *page = sg_page(sg); local
54 *(tmp++) = page++;
82 struct page *page = sg_page(sg); local
90 page += offset / PAGE_SIZE;
95 ret = remap_pfn_range(vma, addr, page_to_pfn(page), le
154 ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot) argument
[all...]
/drivers/target/tcm_fc/
H A Dtfc_io.c76 struct page *page = NULL; local
103 page = sg_page(sg);
121 page = sg_page(sg);
150 BUG_ON(!page);
151 get_page(page);
154 page, off_in_page, tlen);
158 PAGE_SIZE << compound_order(page);
160 BUG_ON(!page);
161 from = kmap_atomic(page
228 struct page *page = NULL; local
[all...]
/drivers/gpu/drm/i915/
H A Di915_gem_render_state.c78 struct page *page; local
86 page = sg_page(so->obj->pages->sgl);
87 d = kmap(page);
109 kunmap(page);
/drivers/staging/lustre/lustre/llite/
H A Ddir.c64 * pages on the client. These pages were indexed in client page cache by
95 * special processing is needed for "page hash chains" (i.e., sequences of
99 * client the hash of the first entry on the page next to one returned. When
101 * returned page, page hash collision has to be handled. Pages in the
105 * pages. Instead, when page hash collision is detected, all overflow pages
109 * invocation finishes, overflow pages are discarded. If page hash collision
111 * page hash collision, again read overflow pages in, process next portion of
113 * because, given reasonable hash, page hash collisions are extremely rare.
130 * page forma
153 struct page *page; local
243 ll_check_page(struct inode *dir, struct page *page) argument
249 ll_release_page(struct page *page, int remove) argument
275 struct page *page; local
342 struct page *page; local
485 struct page *page; local
[all...]
/drivers/video/adf/
H A Dadf_memblock.c29 struct page *page = pfn_to_page(pfn); local
41 sg_set_page(table->sgl, page, attach->dmabuf->size, 0);
80 struct page *page = pfn_to_page(pfn); local
83 return kmap_atomic(page);
85 return kmap(page);
137 * @base and @size must be page-aligned.
/drivers/crypto/qat/qat_common/
H A Dqat_uclo.c67 struct icp_qat_uclo_page *page = NULL; local
85 ae_slice->page = kzalloc(sizeof(*ae_slice->page), GFP_KERNEL);
86 if (!ae_slice->page)
88 page = ae_slice->page;
89 page->encap_page = encap_image->page;
90 ae_slice->page->region = ae_slice->region;
111 kfree(ae_data->ae_slices[i].page);
404 struct icp_qat_uclo_encap_page *page; local
591 qat_uclo_map_image_page(struct icp_qat_uof_encap_obj *encap_uof_obj, struct icp_qat_uof_image *img, struct icp_qat_uclo_encap_page *page) argument
1128 struct icp_qat_uclo_page *page; local
[all...]
/drivers/md/
H A Dbitmap.c39 * check a page and, if necessary, allocate it (or hijack it if the alloc fails)
41 * 1) check to see if this page is allocated, if it's not then try to alloc
42 * 2) if the alloc fails, set the page's hijacked flag so we'll use the
43 * page pointer directly as a counter
45 * if we find our page, we increment the page's refcount so that it stays
49 unsigned long page, int create)
55 if (page >= bitmap->pages) {
57 * End-of-device while looking for a whole page.
63 if (bitmap->bp[page]
102 bitmap_checkfree(struct bitmap_counts *bitmap, unsigned long page) argument
132 read_sb_page(struct mddev *mddev, loff_t offset, struct page *page, unsigned long index, int size) argument
189 write_sb_page(struct bitmap *bitmap, struct page *page, int wait) argument
263 write_page(struct bitmap *bitmap, struct page *page, int wait) argument
304 __clear_page_buffers(struct page *page) argument
310 free_buffers(struct page *page) argument
334 read_page(struct file *file, unsigned long index, struct bitmap *bitmap, unsigned long count, struct page *page) argument
831 struct page *page; local
855 struct page *page; local
926 struct page *page = NULL; local
1069 unsigned long page = chunk >> PAGE_COUNTER_SHIFT; local
1077 unsigned long page = chunk >> PAGE_COUNTER_SHIFT; local
1226 unsigned long page = chunk >> PAGE_COUNTER_SHIFT; variable
1945 location_show(struct mddev *mddev, char *page) argument
2041 space_show(struct mddev *mddev, char *page) argument
2074 timeout_show(struct mddev *mddev, char *page) argument
2124 backlog_show(struct mddev *mddev, char *page) argument
2146 chunksize_show(struct mddev *mddev, char *page) argument
2172 metadata_show(struct mddev *mddev, char *page) argument
2196 can_clear_show(struct mddev *mddev, char *page) argument
2226 behind_writes_used_show(struct mddev *mddev, char *page) argument
[all...]
/drivers/block/drbd/
H A Ddrbd_bitmap.c62 * directly, but either by bitnumber, or by page index and offset.
66 * we need 32 MiB to store the array of page pointers.
96 struct page **bm_pages;
173 /* we store some "meta" info about our pages in page->private */
178 * Use 24 bits as page index, covers 2 peta byte storage
180 * Used to report the failed page idx on io error from the endio handlers.
183 /* this page is currently read in, or written back */
185 /* if there has been an IO error for this page */
198 * allocating the page. All other bm_set_page_* and bm_clear_page_* need to
202 static void bm_store_page_idx(struct page *pag argument
208 bm_page_to_idx(struct page *page) argument
233 bm_set_page_unchanged(struct page *page) argument
240 bm_set_page_need_writeout(struct page *page) argument
256 struct page *page; local
266 bm_test_page_unchanged(struct page *page) argument
272 bm_set_page_io_err(struct page *page) argument
277 bm_clear_page_io_err(struct page *page) argument
282 bm_set_page_lazy_writeout(struct page *page) argument
287 bm_test_page_lazy_writeout(struct page *page) argument
311 struct page *page = b->bm_pages[idx]; local
381 struct page **new_pages, *page; local
998 struct page *page; local
[all...]
/drivers/gpu/drm/
H A Ddrm_vm.c92 * \return pointer to the page structure.
94 * Find the right map and if it's AGP memory find the real physical page to
95 * map, get the page, increment the use count and return it.
130 struct page *page; local
140 * It's AGP memory - find the real physical page to map
152 * Get the page, inc the use count, and return it
155 page = agpmem->memory->pages[offset];
156 get_page(page);
157 vmf->page
192 struct page *page; local
296 struct page *page; local
332 struct page *page; local
[all...]
/drivers/s390/block/
H A Ddasd_fba.c478 char *page; local
482 page = (char *) get_zeroed_page(GFP_ATOMIC);
483 if (page == NULL) {
488 len = sprintf(page, PRINTK_HEADER
491 len += sprintf(page + len, PRINTK_HEADER
494 len += sprintf(page + len, PRINTK_HEADER
500 len += sprintf(page + len, PRINTK_HEADER
505 len += sprintf(page + len, " %02x",
508 len += sprintf(page + len, "\n");
511 len += sprintf(page
[all...]
/drivers/char/
H A Dbfin-otp.c37 * All reads must be in half page chunks (half page == 64 bits).
42 u32 page, flags, ret; local
54 page = *pos / (sizeof(u64) * 2);
57 stamp("processing page %i (0x%x:%s)", page, flags,
59 ret = bfrom_OtpRead(page, flags, &content);
70 ++page;
118 * All writes must be in half page chunks (half page
123 u32 timing, page, base_flags, flags, ret; local
[all...]
/drivers/staging/lustre/lustre/osc/
H A Dosc_page.c66 struct cl_page *page;
81 page = opg->ops_cl.cpl_page;
90 osc_index2policy(policy, page->cp_obj, page->cp_index, page->cp_index);
96 * Checks an invariant that a page in the cache is covered by a lock, as
105 struct cl_page *page;
111 page = opg->ops_cl.cpl_page;
112 if (page->cp_owner != NULL &&
113 cl_io_top(page
175 struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page); local
186 struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page); local
506 osc_page_init(const struct lu_env *env, struct cl_object *obj, struct cl_page *page, struct page *vmpage) argument
631 struct cl_page *page = pvec[i]; local
681 struct cl_page *page; local
[all...]
/drivers/staging/rtl8188eu/hal/
H A Dfw.c101 u32 page, const u8 *buffer, u32 size)
104 u8 u8page = (u8) (page & 0x07);
116 u32 page, offset; local
123 for (page = 0; page < page_no; page++) {
124 offset = page * FW_8192C_PAGE_SIZE;
125 _rtl88e_fw_page_write(adapt, page, (buf_ptr + offset),
131 page = page_no;
132 _rtl88e_fw_page_write(adapt, page, (buf_pt
100 _rtl88e_fw_page_write(struct adapter *adapt, u32 page, const u8 *buffer, u32 size) argument
[all...]

Completed in 563 milliseconds

1234567891011>>