/net/ceph/ |
H A D | pagelist.c | 11 struct page *page = list_entry(pl->head.prev, struct page, lru); local 12 kunmap(page); 23 struct page *page = list_first_entry(&pl->head, struct page, local 25 list_del(&page->lru); 26 __free_page(page); 35 struct page *pag local 89 struct page *page = __page_cache_alloc(GFP_NOFS); local 103 struct page *page = list_first_entry(&pl->free_list, local 132 struct page *page; local [all...] |
H A D | crypto.c | 138 struct page *page; local 142 page = vmalloc_to_page(buf); 144 page = virt_to_page(buf); 146 sg_set_page(sg, page, len, off);
|
H A D | messenger.c | 193 static struct page *zero_page; /* used in certain error cases */ 531 static int ceph_tcp_recvpage(struct socket *sock, struct page *page, argument 539 kaddr = kmap(page); 542 kunmap(page); 568 static int __ceph_tcp_sendpage(struct socket *sock, struct page *page, argument 574 ret = kernel_sendpage(sock, page, offset, size, flags); 581 static int ceph_tcp_sendpage(struct socket *sock, struct page *page, argument 970 struct page *page; local 1106 struct page *page; local 1480 ceph_crc32c_page(u32 crc, struct page *page, unsigned int page_offset, unsigned int length) argument 1522 struct page *page; local 2206 struct page *page; local [all...] |
/net/mac802154/ |
H A D | monitor.c | 41 u8 chan, page; local 47 page = priv->hw->phy->current_page; 52 if (WARN_ON(page >= WPAN_NUM_PAGES) || 60 return mac802154_tx(priv->hw, skb, page, chan); 116 priv->page = 0;
|
H A D | tx.c | 42 u8 page; member in struct:xmit_work 53 xw->priv->phy->current_page != xw->page) { 55 xw->page, 63 xw->priv->phy->current_page = xw->page; 85 u8 page, u8 chan) 90 if (!(priv->phy->channels_supported[page] & (1 << chan))) { 123 work->page = page; 84 mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb, u8 page, u8 chan) argument
|
H A D | mac_cmd.c | 38 u8 channel, u8 page, 51 mac802154_dev_set_page_channel(dev, page, channel); 36 mac802154_mlme_start_req(struct net_device *dev, struct ieee802154_addr *addr, u8 channel, u8 page, u8 bcn_ord, u8 sf_ord, u8 pan_coord, u8 blx, u8 coord_realign) argument
|
H A D | mac802154.h | 89 u8 page; member in struct:mac802154_sub_if_data 123 u8 page, u8 chan); 131 void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan);
|
H A D | mib.c | 177 res = hw->ops->set_channel(&hw->hw, priv->page, priv->chan); 182 priv->hw->phy->current_page = priv->page; 189 void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan) argument 197 priv->page = page; 203 priv->hw->phy->current_page != priv->page) {
|
H A D | wpan.c | 318 u8 chan, page; local 325 page = priv->page; 329 page >= WPAN_NUM_PAGES || 346 return mac802154_tx(priv->hw, skb, page, chan); 395 priv->page = 0;
|
/net/rds/ |
H A D | page.c | 41 struct page *r_page; 56 int rds_page_copy_user(struct page *page, unsigned long offset, argument 63 addr = kmap(page); 71 kunmap(page); 87 * If @bytes is at least a full page then this just returns a page from 90 * If @bytes is a partial page then this stores the unused region of the 91 * page in a per-cpu structure. Future partial-page allocation 102 struct page *page; local [all...] |
H A D | rdma.c | 49 * get the number of pages by looking at the page indices that the start and 158 struct page **pages, int write) 178 struct page **pages = NULL; 207 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); 232 * Pin the pages that make up the user buffer and transfer the page 234 * the whole region after transferring the partial page references 235 * to the sg array so that we can have one page ref cleanup path. 239 * the zero page. 281 * map page aligned regions. So we keep the offset, and build 448 struct page *pag local 467 struct page *page = sg_page(ao->op_sg); local 770 struct page *page = NULL; local [all...] |
H A D | ib_rdma.c | 484 struct page *page = sg_page(&ibmr->sg[i]); local 489 set_page_dirty(page); 490 put_page(page);
|
/net/xfrm/ |
H A D | xfrm_ipcomp.c | 73 struct page *page; local 80 page = alloc_page(GFP_ATOMIC); 83 if (!page) 86 __skb_frag_set_page(frag, page);
|
/net/atm/ |
H A D | mpoa_proc.c | 209 char *page, *p; local 218 page = (char *)__get_free_page(GFP_KERNEL); 219 if (!page) 222 for (p = page, len = 0; len < nbytes; p++, len++) { 224 free_page((unsigned long)page); 233 if (!parse_qos(page)) 234 printk("mpoa: proc_mpc_write: could not parse '%s'\n", page); 236 free_page((unsigned long)page);
|
H A D | proc.c | 380 unsigned long page; local 385 page = get_zeroed_page(GFP_KERNEL); 386 if (!page) 392 length = dev->ops->proc_read(dev, pos, (char *)page); 397 if (copy_to_user(buf, (char *)page, length)) 401 free_page(page);
|
/net/sunrpc/xprtrdma/ |
H A D | svc_rdma_recvfrom.c | 63 struct page *page; local 67 /* Swap the page in the SGE with the page in argpages */ 68 page = ctxt->pages[0]; 70 rqstp->rq_pages[0] = page; 73 rqstp->rq_arg.head[0].iov_base = page_address(page); 88 page = ctxt->pages[sge_no]; 90 rqstp->rq_pages[sge_no] = page; 109 page [all...] |
H A D | svc_rdma_sendto.c | 110 struct page *page; local 115 page = virt_to_page(xdr->head[0].iov_base); 119 /* This offset is in the page list */ 121 page = xdr->pages[xdr_off >> PAGE_SHIFT]; 128 page = virt_to_page(xdr->tail[0].iov_base); 131 dma_addr = ib_dma_map_page(xprt->sc_cm_id->device, page, xdr_off, 368 struct page *page, 393 ctxt->pages[0] = page; 366 send_reply(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp, struct page *page, struct rpcrdma_msg *rdma_resp, struct svc_rdma_op_ctxt *ctxt, struct svc_rdma_req_map *vec, int byte_count) argument [all...] |
H A D | svc_rdma_transport.c | 490 struct page *svc_rdma_get_page(void) 492 struct page *page; local 494 while ((page = alloc_page(GFP_KERNEL)) == NULL) { 499 return page; 506 struct page *page; local 517 page = svc_rdma_get_page(); 518 ctxt->pages[sge_no] = page; 520 page, [all...] |
/net/core/ |
H A D | datagram.c | 351 struct page *page = skb_frag_page(frag); local 355 vaddr = kmap(page); 358 kunmap(page); 437 struct page *page = skb_frag_page(frag); local 441 vaddr = kmap(page); 444 kunmap(page); 527 struct page *page local 607 struct page *page[MAX_SKB_FRAGS]; local 684 struct page *page = skb_frag_page(frag); local [all...] |
H A D | sock.c | 1826 if (pfrag->page) { 1827 if (atomic_read(&pfrag->page->_count) == 1) { 1833 put_page(pfrag->page); 1838 pfrag->page = alloc_pages(gfp | __GFP_COMP | 1841 if (likely(pfrag->page)) { 1846 pfrag->page = alloc_page(gfp); 1847 if (likely(pfrag->page)) { 2150 ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) argument 2155 char *kaddr = kmap(page); [all...] |
/net/ieee802154/ |
H A D | nl-mac.c | 211 u32 unscanned, u8 page, 229 nla_put_u8(msg, IEEE802154_ATTR_PAGE, page) || 362 u8 page; local 391 page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]); 393 page = 0; 397 page, 482 u8 page; local 517 page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]); 519 page = 0; 528 ret = ieee802154_mlme_ops(dev)->start_req(dev, &addr, channel, page, 209 ieee802154_nl_scan_confirm(struct net_device *dev, u8 status, u8 scan_type, u32 unscanned, u8 page, u8 *edl ) argument 543 u8 page; local [all...] |
/net/ipv4/ |
H A D | af_inet.c | 752 ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset, argument 765 return sk->sk_prot->sendpage(sk, page, offset, size, flags); 766 return sock_no_sendpage(sock, page, offset, size, flags);
|
H A D | ip_output.c | 773 csum_page(struct page *page, int offset, int copy) argument 777 kaddr = kmap(page); 779 kunmap(page); 1045 if (!skb_can_coalesce(skb, i, pfrag->page, 1051 __skb_fill_page_desc(skb, i, pfrag->page, 1054 get_page(pfrag->page); 1058 page_address(pfrag->page) + pfrag->offset, 1126 * until ip_push_pending_frames() is called. Each piece can be a page 1127 * or non-page dat 1160 ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page, int offset, size_t size, int flags) argument [all...] |
H A D | tcp.c | 895 static ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset, argument 945 can_coalesce = skb_can_coalesce(skb, i, page, offset); 956 get_page(page); 957 skb_fill_page_desc(skb, i, page, offset, copy); 1015 int tcp_sendpage(struct sock *sk, struct page *page, int offset, argument 1022 return sock_no_sendpage(sk->sk_socket, page, offset, size, 1026 res = do_tcp_sendpages(sk, page, offset, size, flags); 1039 /* Small frames wont use a full page 2988 struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT); local [all...] |
/net/sunrpc/ |
H A D | xdr.c | 133 struct page **pages, unsigned int base, unsigned int len) 160 * @pgto_base: page vector address of destination 161 * @pgfrom_base: page vector address of source 166 * if a memory area starts at byte 'base' in page 'pages[i]', 172 _shift_data_right_pages(struct page **pages, size_t pgto_base, 175 struct page **pgfrom, **pgto; 191 /* Are any pointers crossing a page boundary? */ 225 * @pgbase: page vector address of destination 233 _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len) 235 struct page **pgt 501 void *page; local [all...] |