gup.c revision 1674448345cdb56e724483a2a26622771f4e3a10
1#include <linux/kernel.h>
2#include <linux/errno.h>
3#include <linux/err.h>
4#include <linux/spinlock.h>
5
6#include <linux/hugetlb.h>
7#include <linux/mm.h>
8#include <linux/pagemap.h>
9#include <linux/rmap.h>
10#include <linux/swap.h>
11#include <linux/swapops.h>
12
13#include "internal.h"
14
15static struct page *no_page_table(struct vm_area_struct *vma,
16		unsigned int flags)
17{
18	/*
19	 * When core dumping an enormous anonymous area that nobody
20	 * has touched so far, we don't want to allocate unnecessary pages or
21	 * page tables.  Return error instead of NULL to skip handle_mm_fault,
22	 * then get_dump_page() will return NULL to leave a hole in the dump.
23	 * But we can only make this optimization where a hole would surely
24	 * be zero-filled if handle_mm_fault() actually did handle it.
25	 */
26	if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault))
27		return ERR_PTR(-EFAULT);
28	return NULL;
29}
30
31static struct page *follow_page_pte(struct vm_area_struct *vma,
32		unsigned long address, pmd_t *pmd, unsigned int flags)
33{
34	struct mm_struct *mm = vma->vm_mm;
35	struct page *page;
36	spinlock_t *ptl;
37	pte_t *ptep, pte;
38
39retry:
40	if (unlikely(pmd_bad(*pmd)))
41		return no_page_table(vma, flags);
42
43	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
44	pte = *ptep;
45	if (!pte_present(pte)) {
46		swp_entry_t entry;
47		/*
48		 * KSM's break_ksm() relies upon recognizing a ksm page
49		 * even while it is being migrated, so for that case we
50		 * need migration_entry_wait().
51		 */
52		if (likely(!(flags & FOLL_MIGRATION)))
53			goto no_page;
54		if (pte_none(pte) || pte_file(pte))
55			goto no_page;
56		entry = pte_to_swp_entry(pte);
57		if (!is_migration_entry(entry))
58			goto no_page;
59		pte_unmap_unlock(ptep, ptl);
60		migration_entry_wait(mm, pmd, address);
61		goto retry;
62	}
63	if ((flags & FOLL_NUMA) && pte_numa(pte))
64		goto no_page;
65	if ((flags & FOLL_WRITE) && !pte_write(pte)) {
66		pte_unmap_unlock(ptep, ptl);
67		return NULL;
68	}
69
70	page = vm_normal_page(vma, address, pte);
71	if (unlikely(!page)) {
72		if ((flags & FOLL_DUMP) ||
73		    !is_zero_pfn(pte_pfn(pte)))
74			goto bad_page;
75		page = pte_page(pte);
76	}
77
78	if (flags & FOLL_GET)
79		get_page_foll(page);
80	if (flags & FOLL_TOUCH) {
81		if ((flags & FOLL_WRITE) &&
82		    !pte_dirty(pte) && !PageDirty(page))
83			set_page_dirty(page);
84		/*
85		 * pte_mkyoung() would be more correct here, but atomic care
86		 * is needed to avoid losing the dirty bit: it is easier to use
87		 * mark_page_accessed().
88		 */
89		mark_page_accessed(page);
90	}
91	if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
92		/*
93		 * The preliminary mapping check is mainly to avoid the
94		 * pointless overhead of lock_page on the ZERO_PAGE
95		 * which might bounce very badly if there is contention.
96		 *
97		 * If the page is already locked, we don't need to
98		 * handle it now - vmscan will handle it later if and
99		 * when it attempts to reclaim the page.
100		 */
101		if (page->mapping && trylock_page(page)) {
102			lru_add_drain();  /* push cached pages to LRU */
103			/*
104			 * Because we lock page here, and migration is
105			 * blocked by the pte's page reference, and we
106			 * know the page is still mapped, we don't even
107			 * need to check for file-cache page truncation.
108			 */
109			mlock_vma_page(page);
110			unlock_page(page);
111		}
112	}
113	pte_unmap_unlock(ptep, ptl);
114	return page;
115bad_page:
116	pte_unmap_unlock(ptep, ptl);
117	return ERR_PTR(-EFAULT);
118
119no_page:
120	pte_unmap_unlock(ptep, ptl);
121	if (!pte_none(pte))
122		return NULL;
123	return no_page_table(vma, flags);
124}
125
126/**
127 * follow_page_mask - look up a page descriptor from a user-virtual address
128 * @vma: vm_area_struct mapping @address
129 * @address: virtual address to look up
130 * @flags: flags modifying lookup behaviour
131 * @page_mask: on output, *page_mask is set according to the size of the page
132 *
133 * @flags can have FOLL_ flags set, defined in <linux/mm.h>
134 *
135 * Returns the mapped (struct page *), %NULL if no mapping exists, or
136 * an error pointer if there is a mapping to something not represented
137 * by a page descriptor (see also vm_normal_page()).
138 */
139struct page *follow_page_mask(struct vm_area_struct *vma,
140			      unsigned long address, unsigned int flags,
141			      unsigned int *page_mask)
142{
143	pgd_t *pgd;
144	pud_t *pud;
145	pmd_t *pmd;
146	spinlock_t *ptl;
147	struct page *page;
148	struct mm_struct *mm = vma->vm_mm;
149
150	*page_mask = 0;
151
152	page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
153	if (!IS_ERR(page)) {
154		BUG_ON(flags & FOLL_GET);
155		return page;
156	}
157
158	pgd = pgd_offset(mm, address);
159	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
160		return no_page_table(vma, flags);
161
162	pud = pud_offset(pgd, address);
163	if (pud_none(*pud))
164		return no_page_table(vma, flags);
165	if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
166		if (flags & FOLL_GET)
167			return NULL;
168		page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE);
169		return page;
170	}
171	if (unlikely(pud_bad(*pud)))
172		return no_page_table(vma, flags);
173
174	pmd = pmd_offset(pud, address);
175	if (pmd_none(*pmd))
176		return no_page_table(vma, flags);
177	if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
178		page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
179		if (flags & FOLL_GET) {
180			/*
181			 * Refcount on tail pages are not well-defined and
182			 * shouldn't be taken. The caller should handle a NULL
183			 * return when trying to follow tail pages.
184			 */
185			if (PageHead(page))
186				get_page(page);
187			else
188				page = NULL;
189		}
190		return page;
191	}
192	if ((flags & FOLL_NUMA) && pmd_numa(*pmd))
193		return no_page_table(vma, flags);
194	if (pmd_trans_huge(*pmd)) {
195		if (flags & FOLL_SPLIT) {
196			split_huge_page_pmd(vma, address, pmd);
197			return follow_page_pte(vma, address, pmd, flags);
198		}
199		ptl = pmd_lock(mm, pmd);
200		if (likely(pmd_trans_huge(*pmd))) {
201			if (unlikely(pmd_trans_splitting(*pmd))) {
202				spin_unlock(ptl);
203				wait_split_huge_page(vma->anon_vma, pmd);
204			} else {
205				page = follow_trans_huge_pmd(vma, address,
206							     pmd, flags);
207				spin_unlock(ptl);
208				*page_mask = HPAGE_PMD_NR - 1;
209				return page;
210			}
211		} else
212			spin_unlock(ptl);
213	}
214	return follow_page_pte(vma, address, pmd, flags);
215}
216
217static int get_gate_page(struct mm_struct *mm, unsigned long address,
218		unsigned int gup_flags, struct vm_area_struct **vma,
219		struct page **page)
220{
221	pgd_t *pgd;
222	pud_t *pud;
223	pmd_t *pmd;
224	pte_t *pte;
225	int ret = -EFAULT;
226
227	/* user gate pages are read-only */
228	if (gup_flags & FOLL_WRITE)
229		return -EFAULT;
230	if (address > TASK_SIZE)
231		pgd = pgd_offset_k(address);
232	else
233		pgd = pgd_offset_gate(mm, address);
234	BUG_ON(pgd_none(*pgd));
235	pud = pud_offset(pgd, address);
236	BUG_ON(pud_none(*pud));
237	pmd = pmd_offset(pud, address);
238	if (pmd_none(*pmd))
239		return -EFAULT;
240	VM_BUG_ON(pmd_trans_huge(*pmd));
241	pte = pte_offset_map(pmd, address);
242	if (pte_none(*pte))
243		goto unmap;
244	*vma = get_gate_vma(mm);
245	if (!page)
246		goto out;
247	*page = vm_normal_page(*vma, address, *pte);
248	if (!*page) {
249		if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
250			goto unmap;
251		*page = pte_page(*pte);
252	}
253	get_page(*page);
254out:
255	ret = 0;
256unmap:
257	pte_unmap(pte);
258	return ret;
259}
260
261static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
262		unsigned long address, unsigned int *flags, int *nonblocking)
263{
264	struct mm_struct *mm = vma->vm_mm;
265	unsigned int fault_flags = 0;
266	int ret;
267
268	/* For mlock, just skip the stack guard page. */
269	if ((*flags & FOLL_MLOCK) &&
270			(stack_guard_page_start(vma, address) ||
271			 stack_guard_page_end(vma, address + PAGE_SIZE)))
272		return -ENOENT;
273	if (*flags & FOLL_WRITE)
274		fault_flags |= FAULT_FLAG_WRITE;
275	if (nonblocking)
276		fault_flags |= FAULT_FLAG_ALLOW_RETRY;
277	if (*flags & FOLL_NOWAIT)
278		fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
279
280	ret = handle_mm_fault(mm, vma, address, fault_flags);
281	if (ret & VM_FAULT_ERROR) {
282		if (ret & VM_FAULT_OOM)
283			return -ENOMEM;
284		if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
285			return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT;
286		if (ret & VM_FAULT_SIGBUS)
287			return -EFAULT;
288		BUG();
289	}
290
291	if (tsk) {
292		if (ret & VM_FAULT_MAJOR)
293			tsk->maj_flt++;
294		else
295			tsk->min_flt++;
296	}
297
298	if (ret & VM_FAULT_RETRY) {
299		if (nonblocking)
300			*nonblocking = 0;
301		return -EBUSY;
302	}
303
304	/*
305	 * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when
306	 * necessary, even if maybe_mkwrite decided not to set pte_write. We
307	 * can thus safely do subsequent page lookups as if they were reads.
308	 * But only do so when looping for pte_write is futile: in some cases
309	 * userspace may also be wanting to write to the gotten user page,
310	 * which a read fault here might prevent (a readonly page might get
311	 * reCOWed by userspace write).
312	 */
313	if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
314		*flags &= ~FOLL_WRITE;
315	return 0;
316}
317
318/**
319 * __get_user_pages() - pin user pages in memory
320 * @tsk:	task_struct of target task
321 * @mm:		mm_struct of target mm
322 * @start:	starting user address
323 * @nr_pages:	number of pages from start to pin
324 * @gup_flags:	flags modifying pin behaviour
325 * @pages:	array that receives pointers to the pages pinned.
326 *		Should be at least nr_pages long. Or NULL, if caller
327 *		only intends to ensure the pages are faulted in.
328 * @vmas:	array of pointers to vmas corresponding to each page.
329 *		Or NULL if the caller does not require them.
330 * @nonblocking: whether waiting for disk IO or mmap_sem contention
331 *
332 * Returns number of pages pinned. This may be fewer than the number
333 * requested. If nr_pages is 0 or negative, returns 0. If no pages
334 * were pinned, returns -errno. Each page returned must be released
335 * with a put_page() call when it is finished with. vmas will only
336 * remain valid while mmap_sem is held.
337 *
338 * Must be called with mmap_sem held for read or write.
339 *
340 * __get_user_pages walks a process's page tables and takes a reference to
341 * each struct page that each user address corresponds to at a given
342 * instant. That is, it takes the page that would be accessed if a user
343 * thread accesses the given user virtual address at that instant.
344 *
345 * This does not guarantee that the page exists in the user mappings when
346 * __get_user_pages returns, and there may even be a completely different
347 * page there in some cases (eg. if mmapped pagecache has been invalidated
348 * and subsequently re faulted). However it does guarantee that the page
349 * won't be freed completely. And mostly callers simply care that the page
350 * contains data that was valid *at some point in time*. Typically, an IO
351 * or similar operation cannot guarantee anything stronger anyway because
352 * locks can't be held over the syscall boundary.
353 *
354 * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
355 * the page is written to, set_page_dirty (or set_page_dirty_lock, as
356 * appropriate) must be called after the page is finished with, and
357 * before put_page is called.
358 *
359 * If @nonblocking != NULL, __get_user_pages will not wait for disk IO
360 * or mmap_sem contention, and if waiting is needed to pin all pages,
361 * *@nonblocking will be set to 0.
362 *
363 * In most cases, get_user_pages or get_user_pages_fast should be used
364 * instead of __get_user_pages. __get_user_pages should be used only if
365 * you need some special @gup_flags.
366 */
367long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
368		unsigned long start, unsigned long nr_pages,
369		unsigned int gup_flags, struct page **pages,
370		struct vm_area_struct **vmas, int *nonblocking)
371{
372	long i;
373	unsigned long vm_flags;
374	unsigned int page_mask;
375
376	if (!nr_pages)
377		return 0;
378
379	VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
380
381	/*
382	 * If FOLL_FORCE is set then do not force a full fault as the hinting
383	 * fault information is unrelated to the reference behaviour of a task
384	 * using the address space
385	 */
386	if (!(gup_flags & FOLL_FORCE))
387		gup_flags |= FOLL_NUMA;
388
389	i = 0;
390
391	do {
392		struct vm_area_struct *vma;
393
394		vma = find_extend_vma(mm, start);
395		if (!vma && in_gate_area(mm, start)) {
396			int ret;
397			ret = get_gate_page(mm, start & PAGE_MASK, gup_flags,
398					&vma, pages ? &pages[i] : NULL);
399			if (ret)
400				goto efault;
401			page_mask = 0;
402			goto next_page;
403		}
404
405		if (!vma)
406			goto efault;
407		vm_flags = vma->vm_flags;
408		if (vm_flags & (VM_IO | VM_PFNMAP))
409			goto efault;
410
411		if (gup_flags & FOLL_WRITE) {
412			if (!(vm_flags & VM_WRITE)) {
413				if (!(gup_flags & FOLL_FORCE))
414					goto efault;
415				/*
416				 * We used to let the write,force case do COW
417				 * in a VM_MAYWRITE VM_SHARED !VM_WRITE vma, so
418				 * ptrace could set a breakpoint in a read-only
419				 * mapping of an executable, without corrupting
420				 * the file (yet only when that file had been
421				 * opened for writing!).  Anon pages in shared
422				 * mappings are surprising: now just reject it.
423				 */
424				if (!is_cow_mapping(vm_flags)) {
425					WARN_ON_ONCE(vm_flags & VM_MAYWRITE);
426					goto efault;
427				}
428			}
429		} else {
430			if (!(vm_flags & VM_READ)) {
431				if (!(gup_flags & FOLL_FORCE))
432					goto efault;
433				/*
434				 * Is there actually any vma we can reach here
435				 * which does not have VM_MAYREAD set?
436				 */
437				if (!(vm_flags & VM_MAYREAD))
438					goto efault;
439			}
440		}
441
442		if (is_vm_hugetlb_page(vma)) {
443			i = follow_hugetlb_page(mm, vma, pages, vmas,
444					&start, &nr_pages, i, gup_flags);
445			continue;
446		}
447
448		do {
449			struct page *page;
450			unsigned int foll_flags = gup_flags;
451			unsigned int page_increm;
452
453			/*
454			 * If we have a pending SIGKILL, don't keep faulting
455			 * pages and potentially allocating memory.
456			 */
457			if (unlikely(fatal_signal_pending(current)))
458				return i ? i : -ERESTARTSYS;
459
460			cond_resched();
461			while (!(page = follow_page_mask(vma, start,
462						foll_flags, &page_mask))) {
463				int ret;
464				ret = faultin_page(tsk, vma, start, &foll_flags,
465						nonblocking);
466				switch (ret) {
467				case 0:
468					break;
469				case -EFAULT:
470				case -ENOMEM:
471				case -EHWPOISON:
472					return i ? i : ret;
473				case -EBUSY:
474					return i;
475				case -ENOENT:
476					goto next_page;
477				default:
478					BUG();
479				}
480				cond_resched();
481			}
482			if (IS_ERR(page))
483				return i ? i : PTR_ERR(page);
484			if (pages) {
485				pages[i] = page;
486
487				flush_anon_page(vma, page, start);
488				flush_dcache_page(page);
489				page_mask = 0;
490			}
491next_page:
492			if (vmas) {
493				vmas[i] = vma;
494				page_mask = 0;
495			}
496			page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask);
497			if (page_increm > nr_pages)
498				page_increm = nr_pages;
499			i += page_increm;
500			start += page_increm * PAGE_SIZE;
501			nr_pages -= page_increm;
502		} while (nr_pages && start < vma->vm_end);
503	} while (nr_pages);
504	return i;
505efault:
506	return i ? : -EFAULT;
507}
508EXPORT_SYMBOL(__get_user_pages);
509
510/*
511 * fixup_user_fault() - manually resolve a user page fault
512 * @tsk:	the task_struct to use for page fault accounting, or
513 *		NULL if faults are not to be recorded.
514 * @mm:		mm_struct of target mm
515 * @address:	user address
516 * @fault_flags:flags to pass down to handle_mm_fault()
517 *
518 * This is meant to be called in the specific scenario where for locking reasons
519 * we try to access user memory in atomic context (within a pagefault_disable()
520 * section), this returns -EFAULT, and we want to resolve the user fault before
521 * trying again.
522 *
523 * Typically this is meant to be used by the futex code.
524 *
525 * The main difference with get_user_pages() is that this function will
526 * unconditionally call handle_mm_fault() which will in turn perform all the
527 * necessary SW fixup of the dirty and young bits in the PTE, while
528 * handle_mm_fault() only guarantees to update these in the struct page.
529 *
530 * This is important for some architectures where those bits also gate the
531 * access permission to the page because they are maintained in software.  On
532 * such architectures, gup() will not be enough to make a subsequent access
533 * succeed.
534 *
535 * This should be called with the mm_sem held for read.
536 */
537int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
538		     unsigned long address, unsigned int fault_flags)
539{
540	struct vm_area_struct *vma;
541	vm_flags_t vm_flags;
542	int ret;
543
544	vma = find_extend_vma(mm, address);
545	if (!vma || address < vma->vm_start)
546		return -EFAULT;
547
548	vm_flags = (fault_flags & FAULT_FLAG_WRITE) ? VM_WRITE : VM_READ;
549	if (!(vm_flags & vma->vm_flags))
550		return -EFAULT;
551
552	ret = handle_mm_fault(mm, vma, address, fault_flags);
553	if (ret & VM_FAULT_ERROR) {
554		if (ret & VM_FAULT_OOM)
555			return -ENOMEM;
556		if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
557			return -EHWPOISON;
558		if (ret & VM_FAULT_SIGBUS)
559			return -EFAULT;
560		BUG();
561	}
562	if (tsk) {
563		if (ret & VM_FAULT_MAJOR)
564			tsk->maj_flt++;
565		else
566			tsk->min_flt++;
567	}
568	return 0;
569}
570
571/*
572 * get_user_pages() - pin user pages in memory
573 * @tsk:	the task_struct to use for page fault accounting, or
574 *		NULL if faults are not to be recorded.
575 * @mm:		mm_struct of target mm
576 * @start:	starting user address
577 * @nr_pages:	number of pages from start to pin
578 * @write:	whether pages will be written to by the caller
579 * @force:	whether to force access even when user mapping is currently
580 *		protected (but never forces write access to shared mapping).
581 * @pages:	array that receives pointers to the pages pinned.
582 *		Should be at least nr_pages long. Or NULL, if caller
583 *		only intends to ensure the pages are faulted in.
584 * @vmas:	array of pointers to vmas corresponding to each page.
585 *		Or NULL if the caller does not require them.
586 *
587 * Returns number of pages pinned. This may be fewer than the number
588 * requested. If nr_pages is 0 or negative, returns 0. If no pages
589 * were pinned, returns -errno. Each page returned must be released
590 * with a put_page() call when it is finished with. vmas will only
591 * remain valid while mmap_sem is held.
592 *
593 * Must be called with mmap_sem held for read or write.
594 *
595 * get_user_pages walks a process's page tables and takes a reference to
596 * each struct page that each user address corresponds to at a given
597 * instant. That is, it takes the page that would be accessed if a user
598 * thread accesses the given user virtual address at that instant.
599 *
600 * This does not guarantee that the page exists in the user mappings when
601 * get_user_pages returns, and there may even be a completely different
602 * page there in some cases (eg. if mmapped pagecache has been invalidated
603 * and subsequently re faulted). However it does guarantee that the page
604 * won't be freed completely. And mostly callers simply care that the page
605 * contains data that was valid *at some point in time*. Typically, an IO
606 * or similar operation cannot guarantee anything stronger anyway because
607 * locks can't be held over the syscall boundary.
608 *
609 * If write=0, the page must not be written to. If the page is written to,
610 * set_page_dirty (or set_page_dirty_lock, as appropriate) must be called
611 * after the page is finished with, and before put_page is called.
612 *
613 * get_user_pages is typically used for fewer-copy IO operations, to get a
614 * handle on the memory by some means other than accesses via the user virtual
615 * addresses. The pages may be submitted for DMA to devices or accessed via
616 * their kernel linear mapping (via the kmap APIs). Care should be taken to
617 * use the correct cache flushing APIs.
618 *
619 * See also get_user_pages_fast, for performance critical applications.
620 */
621long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
622		unsigned long start, unsigned long nr_pages, int write,
623		int force, struct page **pages, struct vm_area_struct **vmas)
624{
625	int flags = FOLL_TOUCH;
626
627	if (pages)
628		flags |= FOLL_GET;
629	if (write)
630		flags |= FOLL_WRITE;
631	if (force)
632		flags |= FOLL_FORCE;
633
634	return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas,
635				NULL);
636}
637EXPORT_SYMBOL(get_user_pages);
638
639/**
640 * get_dump_page() - pin user page in memory while writing it to core dump
641 * @addr: user address
642 *
643 * Returns struct page pointer of user page pinned for dump,
644 * to be freed afterwards by page_cache_release() or put_page().
645 *
646 * Returns NULL on any kind of failure - a hole must then be inserted into
647 * the corefile, to preserve alignment with its headers; and also returns
648 * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
649 * allowing a hole to be left in the corefile to save diskspace.
650 *
651 * Called without mmap_sem, but after all other threads have been killed.
652 */
653#ifdef CONFIG_ELF_CORE
654struct page *get_dump_page(unsigned long addr)
655{
656	struct vm_area_struct *vma;
657	struct page *page;
658
659	if (__get_user_pages(current, current->mm, addr, 1,
660			     FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma,
661			     NULL) < 1)
662		return NULL;
663	flush_cache_page(vma, addr, page_to_pfn(page));
664	return page;
665}
666#endif /* CONFIG_ELF_CORE */
667