Searched refs:pages (Results 1 - 25 of 28) sorted by relevance

12

/net/ceph/
H A Dpagevec.c13 * build a vector of user pages
18 struct page **pages; local
22 pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS);
23 if (!pages)
30 num_pages - got, write_page, 0, pages + got, NULL);
39 return pages;
42 ceph_put_page_vector(pages, got, false);
47 void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty) argument
53 set_page_dirty_lock(pages[
63 ceph_release_page_vector(struct page **pages, int num_pages) argument
78 struct page **pages; local
98 ceph_copy_user_to_page_vector(struct page **pages, const void __user *data, loff_t off, size_t len) argument
124 ceph_copy_to_page_vector(struct page **pages, const void *data, loff_t off, size_t len) argument
147 ceph_copy_from_page_vector(struct page **pages, void *data, loff_t off, size_t len) argument
174 ceph_zero_page_vector_range(int off, int len, struct page **pages) argument
[all...]
H A Dosd_client.c95 struct page **pages, u64 length, u32 alignment,
99 osd_data->pages = pages;
154 unsigned int which, struct page **pages,
161 ceph_osd_data_pages_init(osd_data, pages, length, alignment,
167 unsigned int which, struct page **pages,
174 ceph_osd_data_pages_init(osd_data, pages, length, alignment,
223 unsigned int which, struct page **pages, u64 length,
229 ceph_osd_data_pages_init(osd_data, pages, length, alignment,
235 unsigned int which, struct page **pages, u6
94 ceph_osd_data_pages_init(struct ceph_osd_data *osd_data, struct page **pages, u64 length, u32 alignment, bool pages_from_pool, bool own_pages) argument
153 osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req, unsigned int which, struct page **pages, u64 length, u32 alignment, bool pages_from_pool, bool own_pages) argument
166 osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req, unsigned int which, struct page **pages, u64 length, u32 alignment, bool pages_from_pool, bool own_pages) argument
222 osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req, unsigned int which, struct page **pages, u64 length, u32 alignment, bool pages_from_pool, bool own_pages) argument
234 osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req, unsigned int which, struct page **pages, u64 length, u32 alignment, bool pages_from_pool, bool own_pages) argument
2618 ceph_osdc_readpages(struct ceph_osd_client *osdc, struct ceph_vino vino, struct ceph_file_layout *layout, u64 off, u64 *plen, u32 truncate_seq, u64 truncate_size, struct page **pages, int num_pages, int page_align) argument
2659 ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino, struct ceph_file_layout *layout, struct ceph_snap_context *snapc, u64 off, u64 len, u32 truncate_seq, u64 truncate_size, struct timespec *mtime, struct page **pages, int num_pages) argument
[all...]
H A Dmessenger.c587 /* sendpage cannot properly handle pages with page_count == 0,
901 BUG_ON(!data->pages);
931 return data->pages[cursor->page_index];
1262 /* fill in crc (except data pages), footer */
1517 * need to map the page. If we have no pages, they have
2478 /* msg pages? */
3091 void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages, argument
3096 BUG_ON(!pages);
3101 data->pages = pages;
[all...]
/net/9p/
H A Dtrans_common.c23 * p9_release_req_pages - Release pages after the transaction.
25 void p9_release_pages(struct page **pages, int nr_pages) argument
30 if (pages[i])
31 put_page(pages[i]);
36 * p9_nr_pages - Return number of pages needed to accommodate the payload.
48 * payload_gup - Translates user buffer into kernel pages and
52 * @pdata_len: Total length of the IO. gup may not return requested # of pages.
53 * @nr_pages: number of pages to accommodate the payload
54 * @rw: Indicates if the pages are for read or write.
57 int p9_payload_gup(char *data, int *nr_pages, struct page **pages, in argument
[all...]
H A Dtrans_virtio.c215 * this takes a list of pages.
218 * @pdata: a list of pages to add into sg.
219 * @nr_pages: number of pages to pack into the scatter/gather list
317 struct page **pages, char *data,
332 err = p9_payload_gup(data, &nr_pages, pages, write);
337 /* kernel buffer, no need to pin pages */
343 pages[index++] = vmalloc_to_page(data);
345 pages[index++] = kmap_to_page(data);
490 /* wakeup anybody waiting for slots to pin pages */
316 p9_get_mapped_pages(struct virtio_chan *chan, struct page **pages, char *data, int nr_pages, int write, int kern_buf) argument
/net/rds/
H A Dinfo.c48 * buffer is big enough. The destination pages that make up the buffer
65 struct page **pages; member in struct:rds_info_iterator
113 * get_user_pages() called flush_dcache_page() on the pages for us.
122 iter->addr = kmap_atomic(*iter->pages);
127 "bytes %lu\n", *iter->pages, iter->addr,
140 iter->pages++;
167 struct page **pages = NULL; local
191 pages = kmalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
192 if (!pages) {
196 ret = get_user_pages_fast(start, nr_pages, 1, pages);
[all...]
H A Drdma.c49 * get the number of pages by looking at the page indices that the start and
155 * Helper function to pin user pages.
158 struct page **pages, int write)
162 ret = get_user_pages_fast(user_addr, nr_pages, write, pages);
166 put_page(pages[ret]);
178 struct page **pages = NULL; local
207 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
208 if (!pages) {
232 * Pin the pages that make up the user buffer and transfer the page
241 ret = rds_pin_pages(args->vec.addr, nr_pages, pages,
157 rds_pin_pages(unsigned long user_addr, unsigned int nr_pages, struct page **pages, int write) argument
553 struct page **pages = NULL; local
[all...]
/net/sunrpc/xprtrdma/
H A Dsvc_rdma_recvfrom.c55 * Replace the pages in the rq_argpages array with the pages from the SGE in
56 * the RDMA_RECV completion. The SGL should contain full pages up until the
68 page = ctxt->pages[0];
85 rqstp->rq_arg.pages = &rqstp->rq_pages[1];
88 page = ctxt->pages[sge_no];
106 /* If not all pages were used from the SGL, free the remaining ones */
109 page = ctxt->pages[sge_no++];
165 head->arg.pages[pg_no] = rqstp->rq_arg.pages[pg_n
[all...]
H A Dsvc_rdma_sendto.c74 /* pages SGE */
80 page_address(xdr->pages[page_no]) + page_off;
121 page = xdr->pages[xdr_off >> PAGE_SHIFT];
378 int pages; local
393 ctxt->pages[0] = page;
427 * respages array. They are our pages until the I/O
430 pages = rqstp->rq_next_page - rqstp->rq_respages;
431 for (page_no = 0; page_no < pages; page_no++) {
432 ctxt->pages[page_no+1] = rqstp->rq_respages[page_no];
436 * If there are more pages tha
[all...]
H A Drpc_rdma.c92 ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
351 ppages = rqst->rq_snd_buf.pages + (page_base >> PAGE_SHIFT);
430 * into pages; otherwise use reply chunks.
635 ppages = rqst->rq_rcv_buf.pages + (page_base >> PAGE_SHIFT);
H A Dsvc_rdma_transport.c146 put_page(ctxt->pages[i]);
518 ctxt->pages[sge_no] = page;
1324 ctxt->pages[0] = p;
H A Dverbs.c2048 unsigned int inline_size, pages; local
2053 pages = inline_size / sizeof(struct rpcrdma_segment);
2054 return pages << PAGE_SHIFT;
/net/sunrpc/
H A Dxdr.c125 kaddr = kmap_atomic(buf->pages[0]);
133 struct page **pages, unsigned int base, unsigned int len)
142 xdr->pages = pages;
159 * @pages: vector of pages containing both the source and dest memory area.
166 * if a memory area starts at byte 'base' in page 'pages[i]',
172 _shift_data_right_pages(struct page **pages, size_t pgto_base, argument
184 pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT);
185 pgfrom = pages
132 xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset, struct page **pages, unsigned int base, unsigned int len) argument
233 _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len) argument
277 _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len) argument
690 xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base, unsigned int len) argument
813 xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf, struct page **pages, unsigned int len) argument
[all...]
H A Dsvc.c565 * We allocate pages and place them in rq_argpages.
570 unsigned int pages, arghi; local
576 pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply.
580 WARN_ON_ONCE(pages > RPCSVC_MAXPAGES);
581 if (pages > RPCSVC_MAXPAGES)
582 pages = RPCSVC_MAXPAGES;
583 while (pages) {
588 pages--;
590 return pages == 0;
1309 rqstp->rq_res.pages
[all...]
H A Dsvc_xprt.c586 int pages; local
589 /* now allocate needed pages. If we get a failure, sleep briefly */
590 pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE;
591 WARN_ON_ONCE(pages >= RPCSVC_MAXPAGES);
592 if (pages >= RPCSVC_MAXPAGES)
593 /* use as many pages as possible */
594 pages = RPCSVC_MAXPAGES - 1;
595 for (i = 0; i < pages ; i++)
611 /* Make arg->head point to first page and arg->pages point to rest */
615 arg->pages
[all...]
H A Dsocklib.c74 struct page **ppage = xdr->pages;
105 /* ACL likes to be lazy in allocating pages - ACLs
H A Dsvcsock.c181 struct page **ppage = xdr->pages;
1055 static int copy_pages_to_kvecs(struct kvec *vec, struct page **pages, int len) argument
1061 vec[i].iov_base = page_address(pages[i]);
/net/sunrpc/auth_gss/
H A Dgss_krb5_wrap.c85 ptr = kmap_atomic(buf->pages[last]);
150 * The pages, however, may be real pages in the page cache and we replace
151 * them with scratch pages from **pages before writing to them. */
159 struct xdr_buf *buf, struct page **pages)
222 tmp_pages = buf->pages;
223 buf->pages = pages;
227 buf->pages
158 gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf, struct page **pages) argument
440 gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, struct page **pages) argument
591 gss_wrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf, struct page **pages) argument
[all...]
H A Dgss_krb5_crypto.c393 struct page **pages; member in struct:encryptor_desc
416 /* pages are not in place: */
418 in_page = desc->pages[i];
463 int offset, struct page **pages)
476 desc.pages = pages;
598 u32 offset, u8 *iv, struct page **pages, int encrypt)
614 * page cache pages, and write the encrypted data to
615 * the supplied xdr_buf pages.
617 save_pages = buf->pages;
462 gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf, int offset, struct page **pages) argument
597 gss_krb5_cts_crypt(struct crypto_blkcipher *cipher, struct xdr_buf *buf, u32 offset, u8 *iv, struct page **pages, int encrypt) argument
643 gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, struct page **pages) argument
[all...]
H A Dgss_rpc_xdr.h135 * as a set of pages */
137 struct page **pages; /* Array of contiguous pages */ member in struct:gssp_in_token
150 struct page **pages; member in struct:gssx_arg_accept_sec_context
245 /* grouplist not included; we allocate separate pages for that: */
H A Dgss_rpc_upcall.c218 for (i = 0; i < arg->npages && arg->pages[i]; i++)
219 __free_page(arg->pages[i]);
225 arg->pages = kzalloc(arg->npages * sizeof(struct page *), GFP_KERNEL);
227 * XXX: actual pages are allocated by xdr layer in
230 if (!arg->pages)
H A Dgss_rpc_xdr.c68 /* all we need to do is to write pages */
69 xdr_write_pages(xdr, in->pages, in->page_base, in->page_len);
784 arg->pages, 0 /* page base */, arg->npages * PAGE_SIZE);
/net/core/
H A Diovec.c166 int pages = 0, len, size; local
178 pages += size;
182 return pages;
H A Dskbuff.c802 * to userspace pages.
1478 * size of pulled pages. Superb.
1483 /* Estimate size of pulled pages. */
1667 * Callback from splice_to_pipe(), if we need to release some pages
1672 put_page(spd->pages[i]);
1699 spd->pages[spd->nr_pages - 1] == page &&
1705 * Fill page/offset/length into spd, if it can hold more pages.
1726 spd->pages[spd->nr_pages] = page;
1818 struct page *pages[MAX_SKB_FRAGS]; local
1820 .pages
[all...]
/net/ieee802154/
H A Dnl-phy.c42 int i, pages = 0; local
62 buf[pages++] = phy->channels_supported[i] | (i << 27);
64 if (pages &&
66 pages * sizeof(uint32_t), buf))

Completed in 226 milliseconds

12