bootmem.c revision 72d7c3b33c980843e756681fb4867dc1efd62a76
1/*
2 *  bootmem - A boot-time physical memory allocator and configurator
3 *
4 *  Copyright (C) 1999 Ingo Molnar
5 *                1999 Kanoj Sarcar, SGI
6 *                2008 Johannes Weiner
7 *
8 * Access to this subsystem has to be serialized externally (which is true
9 * for the boot process anyway).
10 */
11#include <linux/init.h>
12#include <linux/pfn.h>
13#include <linux/slab.h>
14#include <linux/bootmem.h>
15#include <linux/module.h>
16#include <linux/kmemleak.h>
17#include <linux/range.h>
18#include <linux/memblock.h>
19
20#include <asm/bug.h>
21#include <asm/io.h>
22#include <asm/processor.h>
23
24#include "internal.h"
25
26unsigned long max_low_pfn;
27unsigned long min_low_pfn;
28unsigned long max_pfn;
29
30#ifdef CONFIG_CRASH_DUMP
31/*
32 * If we have booted due to a crash, max_pfn will be a very low value. We need
33 * to know the amount of memory that the previous kernel used.
34 */
35unsigned long saved_max_pfn;
36#endif
37
38#ifndef CONFIG_NO_BOOTMEM
39bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata;
40
41static struct list_head bdata_list __initdata = LIST_HEAD_INIT(bdata_list);
42
43static int bootmem_debug;
44
45static int __init bootmem_debug_setup(char *buf)
46{
47	bootmem_debug = 1;
48	return 0;
49}
50early_param("bootmem_debug", bootmem_debug_setup);
51
52#define bdebug(fmt, args...) ({				\
53	if (unlikely(bootmem_debug))			\
54		printk(KERN_INFO			\
55			"bootmem::%s " fmt,		\
56			__func__, ## args);		\
57})
58
59static unsigned long __init bootmap_bytes(unsigned long pages)
60{
61	unsigned long bytes = (pages + 7) / 8;
62
63	return ALIGN(bytes, sizeof(long));
64}
65
66/**
67 * bootmem_bootmap_pages - calculate bitmap size in pages
68 * @pages: number of pages the bitmap has to represent
69 */
70unsigned long __init bootmem_bootmap_pages(unsigned long pages)
71{
72	unsigned long bytes = bootmap_bytes(pages);
73
74	return PAGE_ALIGN(bytes) >> PAGE_SHIFT;
75}
76
77/*
78 * link bdata in order
79 */
80static void __init link_bootmem(bootmem_data_t *bdata)
81{
82	struct list_head *iter;
83
84	list_for_each(iter, &bdata_list) {
85		bootmem_data_t *ent;
86
87		ent = list_entry(iter, bootmem_data_t, list);
88		if (bdata->node_min_pfn < ent->node_min_pfn)
89			break;
90	}
91	list_add_tail(&bdata->list, iter);
92}
93
94/*
95 * Called once to set up the allocator itself.
96 */
97static unsigned long __init init_bootmem_core(bootmem_data_t *bdata,
98	unsigned long mapstart, unsigned long start, unsigned long end)
99{
100	unsigned long mapsize;
101
102	mminit_validate_memmodel_limits(&start, &end);
103	bdata->node_bootmem_map = phys_to_virt(PFN_PHYS(mapstart));
104	bdata->node_min_pfn = start;
105	bdata->node_low_pfn = end;
106	link_bootmem(bdata);
107
108	/*
109	 * Initially all pages are reserved - setup_arch() has to
110	 * register free RAM areas explicitly.
111	 */
112	mapsize = bootmap_bytes(end - start);
113	memset(bdata->node_bootmem_map, 0xff, mapsize);
114
115	bdebug("nid=%td start=%lx map=%lx end=%lx mapsize=%lx\n",
116		bdata - bootmem_node_data, start, mapstart, end, mapsize);
117
118	return mapsize;
119}
120
121/**
122 * init_bootmem_node - register a node as boot memory
123 * @pgdat: node to register
124 * @freepfn: pfn where the bitmap for this node is to be placed
125 * @startpfn: first pfn on the node
126 * @endpfn: first pfn after the node
127 *
128 * Returns the number of bytes needed to hold the bitmap for this node.
129 */
130unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn,
131				unsigned long startpfn, unsigned long endpfn)
132{
133	return init_bootmem_core(pgdat->bdata, freepfn, startpfn, endpfn);
134}
135
136/**
137 * init_bootmem - register boot memory
138 * @start: pfn where the bitmap is to be placed
139 * @pages: number of available physical pages
140 *
141 * Returns the number of bytes needed to hold the bitmap.
142 */
143unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
144{
145	max_low_pfn = pages;
146	min_low_pfn = start;
147	return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);
148}
149#endif
150/*
151 * free_bootmem_late - free bootmem pages directly to page allocator
152 * @addr: starting address of the range
153 * @size: size of the range in bytes
154 *
155 * This is only useful when the bootmem allocator has already been torn
156 * down, but we are still initializing the system.  Pages are given directly
157 * to the page allocator, no bootmem metadata is updated because it is gone.
158 */
159void __init free_bootmem_late(unsigned long addr, unsigned long size)
160{
161	unsigned long cursor, end;
162
163	kmemleak_free_part(__va(addr), size);
164
165	cursor = PFN_UP(addr);
166	end = PFN_DOWN(addr + size);
167
168	for (; cursor < end; cursor++) {
169		__free_pages_bootmem(pfn_to_page(cursor), 0);
170		totalram_pages++;
171	}
172}
173
174#ifdef CONFIG_NO_BOOTMEM
175static void __init __free_pages_memory(unsigned long start, unsigned long end)
176{
177	int i;
178	unsigned long start_aligned, end_aligned;
179	int order = ilog2(BITS_PER_LONG);
180
181	start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
182	end_aligned = end & ~(BITS_PER_LONG - 1);
183
184	if (end_aligned <= start_aligned) {
185		for (i = start; i < end; i++)
186			__free_pages_bootmem(pfn_to_page(i), 0);
187
188		return;
189	}
190
191	for (i = start; i < start_aligned; i++)
192		__free_pages_bootmem(pfn_to_page(i), 0);
193
194	for (i = start_aligned; i < end_aligned; i += BITS_PER_LONG)
195		__free_pages_bootmem(pfn_to_page(i), order);
196
197	for (i = end_aligned; i < end; i++)
198		__free_pages_bootmem(pfn_to_page(i), 0);
199}
200
201unsigned long __init free_all_memory_core_early(int nodeid)
202{
203	int i;
204	u64 start, end;
205	unsigned long count = 0;
206	struct range *range = NULL;
207	int nr_range;
208
209	nr_range = get_free_all_memory_range(&range, nodeid);
210
211	for (i = 0; i < nr_range; i++) {
212		start = range[i].start;
213		end = range[i].end;
214		count += end - start;
215		__free_pages_memory(start, end);
216	}
217
218	return count;
219}
220#else
221static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
222{
223	int aligned;
224	struct page *page;
225	unsigned long start, end, pages, count = 0;
226
227	if (!bdata->node_bootmem_map)
228		return 0;
229
230	start = bdata->node_min_pfn;
231	end = bdata->node_low_pfn;
232
233	/*
234	 * If the start is aligned to the machines wordsize, we might
235	 * be able to free pages in bulks of that order.
236	 */
237	aligned = !(start & (BITS_PER_LONG - 1));
238
239	bdebug("nid=%td start=%lx end=%lx aligned=%d\n",
240		bdata - bootmem_node_data, start, end, aligned);
241
242	while (start < end) {
243		unsigned long *map, idx, vec;
244
245		map = bdata->node_bootmem_map;
246		idx = start - bdata->node_min_pfn;
247		vec = ~map[idx / BITS_PER_LONG];
248
249		if (aligned && vec == ~0UL && start + BITS_PER_LONG < end) {
250			int order = ilog2(BITS_PER_LONG);
251
252			__free_pages_bootmem(pfn_to_page(start), order);
253			count += BITS_PER_LONG;
254		} else {
255			unsigned long off = 0;
256
257			while (vec && off < BITS_PER_LONG) {
258				if (vec & 1) {
259					page = pfn_to_page(start + off);
260					__free_pages_bootmem(page, 0);
261					count++;
262				}
263				vec >>= 1;
264				off++;
265			}
266		}
267		start += BITS_PER_LONG;
268	}
269
270	page = virt_to_page(bdata->node_bootmem_map);
271	pages = bdata->node_low_pfn - bdata->node_min_pfn;
272	pages = bootmem_bootmap_pages(pages);
273	count += pages;
274	while (pages--)
275		__free_pages_bootmem(page++, 0);
276
277	bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count);
278
279	return count;
280}
281#endif
282
283/**
284 * free_all_bootmem_node - release a node's free pages to the buddy allocator
285 * @pgdat: node to be released
286 *
287 * Returns the number of pages actually released.
288 */
289unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
290{
291	register_page_bootmem_info_node(pgdat);
292#ifdef CONFIG_NO_BOOTMEM
293	/* free_all_memory_core_early(MAX_NUMNODES) will be called later */
294	return 0;
295#else
296	return free_all_bootmem_core(pgdat->bdata);
297#endif
298}
299
300/**
301 * free_all_bootmem - release free pages to the buddy allocator
302 *
303 * Returns the number of pages actually released.
304 */
305unsigned long __init free_all_bootmem(void)
306{
307#ifdef CONFIG_NO_BOOTMEM
308	/*
309	 * We need to use MAX_NUMNODES instead of NODE_DATA(0)->node_id
310	 *  because in some case like Node0 doesnt have RAM installed
311	 *  low ram will be on Node1
312	 * Use MAX_NUMNODES will make sure all ranges in early_node_map[]
313	 *  will be used instead of only Node0 related
314	 */
315	return free_all_memory_core_early(MAX_NUMNODES);
316#else
317	unsigned long total_pages = 0;
318	bootmem_data_t *bdata;
319
320	list_for_each_entry(bdata, &bdata_list, list)
321		total_pages += free_all_bootmem_core(bdata);
322
323	return total_pages;
324#endif
325}
326
327#ifndef CONFIG_NO_BOOTMEM
328static void __init __free(bootmem_data_t *bdata,
329			unsigned long sidx, unsigned long eidx)
330{
331	unsigned long idx;
332
333	bdebug("nid=%td start=%lx end=%lx\n", bdata - bootmem_node_data,
334		sidx + bdata->node_min_pfn,
335		eidx + bdata->node_min_pfn);
336
337	if (bdata->hint_idx > sidx)
338		bdata->hint_idx = sidx;
339
340	for (idx = sidx; idx < eidx; idx++)
341		if (!test_and_clear_bit(idx, bdata->node_bootmem_map))
342			BUG();
343}
344
345static int __init __reserve(bootmem_data_t *bdata, unsigned long sidx,
346			unsigned long eidx, int flags)
347{
348	unsigned long idx;
349	int exclusive = flags & BOOTMEM_EXCLUSIVE;
350
351	bdebug("nid=%td start=%lx end=%lx flags=%x\n",
352		bdata - bootmem_node_data,
353		sidx + bdata->node_min_pfn,
354		eidx + bdata->node_min_pfn,
355		flags);
356
357	for (idx = sidx; idx < eidx; idx++)
358		if (test_and_set_bit(idx, bdata->node_bootmem_map)) {
359			if (exclusive) {
360				__free(bdata, sidx, idx);
361				return -EBUSY;
362			}
363			bdebug("silent double reserve of PFN %lx\n",
364				idx + bdata->node_min_pfn);
365		}
366	return 0;
367}
368
369static int __init mark_bootmem_node(bootmem_data_t *bdata,
370				unsigned long start, unsigned long end,
371				int reserve, int flags)
372{
373	unsigned long sidx, eidx;
374
375	bdebug("nid=%td start=%lx end=%lx reserve=%d flags=%x\n",
376		bdata - bootmem_node_data, start, end, reserve, flags);
377
378	BUG_ON(start < bdata->node_min_pfn);
379	BUG_ON(end > bdata->node_low_pfn);
380
381	sidx = start - bdata->node_min_pfn;
382	eidx = end - bdata->node_min_pfn;
383
384	if (reserve)
385		return __reserve(bdata, sidx, eidx, flags);
386	else
387		__free(bdata, sidx, eidx);
388	return 0;
389}
390
391static int __init mark_bootmem(unsigned long start, unsigned long end,
392				int reserve, int flags)
393{
394	unsigned long pos;
395	bootmem_data_t *bdata;
396
397	pos = start;
398	list_for_each_entry(bdata, &bdata_list, list) {
399		int err;
400		unsigned long max;
401
402		if (pos < bdata->node_min_pfn ||
403		    pos >= bdata->node_low_pfn) {
404			BUG_ON(pos != start);
405			continue;
406		}
407
408		max = min(bdata->node_low_pfn, end);
409
410		err = mark_bootmem_node(bdata, pos, max, reserve, flags);
411		if (reserve && err) {
412			mark_bootmem(start, pos, 0, 0);
413			return err;
414		}
415
416		if (max == end)
417			return 0;
418		pos = bdata->node_low_pfn;
419	}
420	BUG();
421}
422#endif
423
424/**
425 * free_bootmem_node - mark a page range as usable
426 * @pgdat: node the range resides on
427 * @physaddr: starting address of the range
428 * @size: size of the range in bytes
429 *
430 * Partial pages will be considered reserved and left as they are.
431 *
432 * The range must reside completely on the specified node.
433 */
434void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
435			      unsigned long size)
436{
437#ifdef CONFIG_NO_BOOTMEM
438	kmemleak_free_part(__va(physaddr), size);
439	free_early(physaddr, physaddr + size);
440#else
441	unsigned long start, end;
442
443	kmemleak_free_part(__va(physaddr), size);
444
445	start = PFN_UP(physaddr);
446	end = PFN_DOWN(physaddr + size);
447
448	mark_bootmem_node(pgdat->bdata, start, end, 0, 0);
449#endif
450}
451
452/**
453 * free_bootmem - mark a page range as usable
454 * @addr: starting address of the range
455 * @size: size of the range in bytes
456 *
457 * Partial pages will be considered reserved and left as they are.
458 *
459 * The range must be contiguous but may span node boundaries.
460 */
461void __init free_bootmem(unsigned long addr, unsigned long size)
462{
463#ifdef CONFIG_NO_BOOTMEM
464	kmemleak_free_part(__va(addr), size);
465	free_early(addr, addr + size);
466#else
467	unsigned long start, end;
468
469	kmemleak_free_part(__va(addr), size);
470
471	start = PFN_UP(addr);
472	end = PFN_DOWN(addr + size);
473
474	mark_bootmem(start, end, 0, 0);
475#endif
476}
477
478/**
479 * reserve_bootmem_node - mark a page range as reserved
480 * @pgdat: node the range resides on
481 * @physaddr: starting address of the range
482 * @size: size of the range in bytes
483 * @flags: reservation flags (see linux/bootmem.h)
484 *
485 * Partial pages will be reserved.
486 *
487 * The range must reside completely on the specified node.
488 */
489int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
490				 unsigned long size, int flags)
491{
492#ifdef CONFIG_NO_BOOTMEM
493	panic("no bootmem");
494	return 0;
495#else
496	unsigned long start, end;
497
498	start = PFN_DOWN(physaddr);
499	end = PFN_UP(physaddr + size);
500
501	return mark_bootmem_node(pgdat->bdata, start, end, 1, flags);
502#endif
503}
504
505/**
506 * reserve_bootmem - mark a page range as usable
507 * @addr: starting address of the range
508 * @size: size of the range in bytes
509 * @flags: reservation flags (see linux/bootmem.h)
510 *
511 * Partial pages will be reserved.
512 *
513 * The range must be contiguous but may span node boundaries.
514 */
515int __init reserve_bootmem(unsigned long addr, unsigned long size,
516			    int flags)
517{
518#ifdef CONFIG_NO_BOOTMEM
519	panic("no bootmem");
520	return 0;
521#else
522	unsigned long start, end;
523
524	start = PFN_DOWN(addr);
525	end = PFN_UP(addr + size);
526
527	return mark_bootmem(start, end, 1, flags);
528#endif
529}
530
531#ifndef CONFIG_NO_BOOTMEM
532int __weak __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
533				   int flags)
534{
535	return reserve_bootmem(phys, len, flags);
536}
537
538static unsigned long __init align_idx(struct bootmem_data *bdata,
539				      unsigned long idx, unsigned long step)
540{
541	unsigned long base = bdata->node_min_pfn;
542
543	/*
544	 * Align the index with respect to the node start so that the
545	 * combination of both satisfies the requested alignment.
546	 */
547
548	return ALIGN(base + idx, step) - base;
549}
550
551static unsigned long __init align_off(struct bootmem_data *bdata,
552				      unsigned long off, unsigned long align)
553{
554	unsigned long base = PFN_PHYS(bdata->node_min_pfn);
555
556	/* Same as align_idx for byte offsets */
557
558	return ALIGN(base + off, align) - base;
559}
560
561static void * __init alloc_bootmem_core(struct bootmem_data *bdata,
562					unsigned long size, unsigned long align,
563					unsigned long goal, unsigned long limit)
564{
565	unsigned long fallback = 0;
566	unsigned long min, max, start, sidx, midx, step;
567
568	bdebug("nid=%td size=%lx [%lu pages] align=%lx goal=%lx limit=%lx\n",
569		bdata - bootmem_node_data, size, PAGE_ALIGN(size) >> PAGE_SHIFT,
570		align, goal, limit);
571
572	BUG_ON(!size);
573	BUG_ON(align & (align - 1));
574	BUG_ON(limit && goal + size > limit);
575
576	if (!bdata->node_bootmem_map)
577		return NULL;
578
579	min = bdata->node_min_pfn;
580	max = bdata->node_low_pfn;
581
582	goal >>= PAGE_SHIFT;
583	limit >>= PAGE_SHIFT;
584
585	if (limit && max > limit)
586		max = limit;
587	if (max <= min)
588		return NULL;
589
590	step = max(align >> PAGE_SHIFT, 1UL);
591
592	if (goal && min < goal && goal < max)
593		start = ALIGN(goal, step);
594	else
595		start = ALIGN(min, step);
596
597	sidx = start - bdata->node_min_pfn;
598	midx = max - bdata->node_min_pfn;
599
600	if (bdata->hint_idx > sidx) {
601		/*
602		 * Handle the valid case of sidx being zero and still
603		 * catch the fallback below.
604		 */
605		fallback = sidx + 1;
606		sidx = align_idx(bdata, bdata->hint_idx, step);
607	}
608
609	while (1) {
610		int merge;
611		void *region;
612		unsigned long eidx, i, start_off, end_off;
613find_block:
614		sidx = find_next_zero_bit(bdata->node_bootmem_map, midx, sidx);
615		sidx = align_idx(bdata, sidx, step);
616		eidx = sidx + PFN_UP(size);
617
618		if (sidx >= midx || eidx > midx)
619			break;
620
621		for (i = sidx; i < eidx; i++)
622			if (test_bit(i, bdata->node_bootmem_map)) {
623				sidx = align_idx(bdata, i, step);
624				if (sidx == i)
625					sidx += step;
626				goto find_block;
627			}
628
629		if (bdata->last_end_off & (PAGE_SIZE - 1) &&
630				PFN_DOWN(bdata->last_end_off) + 1 == sidx)
631			start_off = align_off(bdata, bdata->last_end_off, align);
632		else
633			start_off = PFN_PHYS(sidx);
634
635		merge = PFN_DOWN(start_off) < sidx;
636		end_off = start_off + size;
637
638		bdata->last_end_off = end_off;
639		bdata->hint_idx = PFN_UP(end_off);
640
641		/*
642		 * Reserve the area now:
643		 */
644		if (__reserve(bdata, PFN_DOWN(start_off) + merge,
645				PFN_UP(end_off), BOOTMEM_EXCLUSIVE))
646			BUG();
647
648		region = phys_to_virt(PFN_PHYS(bdata->node_min_pfn) +
649				start_off);
650		memset(region, 0, size);
651		/*
652		 * The min_count is set to 0 so that bootmem allocated blocks
653		 * are never reported as leaks.
654		 */
655		kmemleak_alloc(region, size, 0, 0);
656		return region;
657	}
658
659	if (fallback) {
660		sidx = align_idx(bdata, fallback - 1, step);
661		fallback = 0;
662		goto find_block;
663	}
664
665	return NULL;
666}
667
668static void * __init alloc_arch_preferred_bootmem(bootmem_data_t *bdata,
669					unsigned long size, unsigned long align,
670					unsigned long goal, unsigned long limit)
671{
672	if (WARN_ON_ONCE(slab_is_available()))
673		return kzalloc(size, GFP_NOWAIT);
674
675#ifdef CONFIG_HAVE_ARCH_BOOTMEM
676	{
677		bootmem_data_t *p_bdata;
678
679		p_bdata = bootmem_arch_preferred_node(bdata, size, align,
680							goal, limit);
681		if (p_bdata)
682			return alloc_bootmem_core(p_bdata, size, align,
683							goal, limit);
684	}
685#endif
686	return NULL;
687}
688#endif
689
690static void * __init ___alloc_bootmem_nopanic(unsigned long size,
691					unsigned long align,
692					unsigned long goal,
693					unsigned long limit)
694{
695#ifdef CONFIG_NO_BOOTMEM
696	void *ptr;
697
698	if (WARN_ON_ONCE(slab_is_available()))
699		return kzalloc(size, GFP_NOWAIT);
700
701restart:
702
703	ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, goal, limit);
704
705	if (ptr)
706		return ptr;
707
708	if (goal != 0) {
709		goal = 0;
710		goto restart;
711	}
712
713	return NULL;
714#else
715	bootmem_data_t *bdata;
716	void *region;
717
718restart:
719	region = alloc_arch_preferred_bootmem(NULL, size, align, goal, limit);
720	if (region)
721		return region;
722
723	list_for_each_entry(bdata, &bdata_list, list) {
724		if (goal && bdata->node_low_pfn <= PFN_DOWN(goal))
725			continue;
726		if (limit && bdata->node_min_pfn >= PFN_DOWN(limit))
727			break;
728
729		region = alloc_bootmem_core(bdata, size, align, goal, limit);
730		if (region)
731			return region;
732	}
733
734	if (goal) {
735		goal = 0;
736		goto restart;
737	}
738
739	return NULL;
740#endif
741}
742
743/**
744 * __alloc_bootmem_nopanic - allocate boot memory without panicking
745 * @size: size of the request in bytes
746 * @align: alignment of the region
747 * @goal: preferred starting address of the region
748 *
749 * The goal is dropped if it can not be satisfied and the allocation will
750 * fall back to memory below @goal.
751 *
752 * Allocation may happen on any node in the system.
753 *
754 * Returns NULL on failure.
755 */
756void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
757					unsigned long goal)
758{
759	unsigned long limit = 0;
760
761#ifdef CONFIG_NO_BOOTMEM
762	limit = -1UL;
763#endif
764
765	return ___alloc_bootmem_nopanic(size, align, goal, limit);
766}
767
768static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
769					unsigned long goal, unsigned long limit)
770{
771	void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit);
772
773	if (mem)
774		return mem;
775	/*
776	 * Whoops, we cannot satisfy the allocation request.
777	 */
778	printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
779	panic("Out of memory");
780	return NULL;
781}
782
783/**
784 * __alloc_bootmem - allocate boot memory
785 * @size: size of the request in bytes
786 * @align: alignment of the region
787 * @goal: preferred starting address of the region
788 *
789 * The goal is dropped if it can not be satisfied and the allocation will
790 * fall back to memory below @goal.
791 *
792 * Allocation may happen on any node in the system.
793 *
794 * The function panics if the request can not be satisfied.
795 */
796void * __init __alloc_bootmem(unsigned long size, unsigned long align,
797			      unsigned long goal)
798{
799	unsigned long limit = 0;
800
801#ifdef CONFIG_NO_BOOTMEM
802	limit = -1UL;
803#endif
804
805	return ___alloc_bootmem(size, align, goal, limit);
806}
807
808#ifndef CONFIG_NO_BOOTMEM
809static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata,
810				unsigned long size, unsigned long align,
811				unsigned long goal, unsigned long limit)
812{
813	void *ptr;
814
815	ptr = alloc_arch_preferred_bootmem(bdata, size, align, goal, limit);
816	if (ptr)
817		return ptr;
818
819	ptr = alloc_bootmem_core(bdata, size, align, goal, limit);
820	if (ptr)
821		return ptr;
822
823	return ___alloc_bootmem(size, align, goal, limit);
824}
825#endif
826
827/**
828 * __alloc_bootmem_node - allocate boot memory from a specific node
829 * @pgdat: node to allocate from
830 * @size: size of the request in bytes
831 * @align: alignment of the region
832 * @goal: preferred starting address of the region
833 *
834 * The goal is dropped if it can not be satisfied and the allocation will
835 * fall back to memory below @goal.
836 *
837 * Allocation may fall back to any node in the system if the specified node
838 * can not hold the requested memory.
839 *
840 * The function panics if the request can not be satisfied.
841 */
842void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
843				   unsigned long align, unsigned long goal)
844{
845	void *ptr;
846
847	if (WARN_ON_ONCE(slab_is_available()))
848		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
849
850#ifdef CONFIG_NO_BOOTMEM
851	ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
852					 goal, -1ULL);
853	if (ptr)
854		return ptr;
855
856	ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align,
857					 goal, -1ULL);
858#else
859	ptr = ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0);
860#endif
861
862	return ptr;
863}
864
865void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
866				   unsigned long align, unsigned long goal)
867{
868#ifdef MAX_DMA32_PFN
869	unsigned long end_pfn;
870
871	if (WARN_ON_ONCE(slab_is_available()))
872		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
873
874	/* update goal according ...MAX_DMA32_PFN */
875	end_pfn = pgdat->node_start_pfn + pgdat->node_spanned_pages;
876
877	if (end_pfn > MAX_DMA32_PFN + (128 >> (20 - PAGE_SHIFT)) &&
878	    (goal >> PAGE_SHIFT) < MAX_DMA32_PFN) {
879		void *ptr;
880		unsigned long new_goal;
881
882		new_goal = MAX_DMA32_PFN << PAGE_SHIFT;
883#ifdef CONFIG_NO_BOOTMEM
884		ptr =  __alloc_memory_core_early(pgdat->node_id, size, align,
885						 new_goal, -1ULL);
886#else
887		ptr = alloc_bootmem_core(pgdat->bdata, size, align,
888						 new_goal, 0);
889#endif
890		if (ptr)
891			return ptr;
892	}
893#endif
894
895	return __alloc_bootmem_node(pgdat, size, align, goal);
896
897}
898
899#ifdef CONFIG_SPARSEMEM
900/**
901 * alloc_bootmem_section - allocate boot memory from a specific section
902 * @size: size of the request in bytes
903 * @section_nr: sparse map section to allocate from
904 *
905 * Return NULL on failure.
906 */
907void * __init alloc_bootmem_section(unsigned long size,
908				    unsigned long section_nr)
909{
910#ifdef CONFIG_NO_BOOTMEM
911	unsigned long pfn, goal, limit;
912
913	pfn = section_nr_to_pfn(section_nr);
914	goal = pfn << PAGE_SHIFT;
915	limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT;
916
917	return __alloc_memory_core_early(early_pfn_to_nid(pfn), size,
918					 SMP_CACHE_BYTES, goal, limit);
919#else
920	bootmem_data_t *bdata;
921	unsigned long pfn, goal, limit;
922
923	pfn = section_nr_to_pfn(section_nr);
924	goal = pfn << PAGE_SHIFT;
925	limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT;
926	bdata = &bootmem_node_data[early_pfn_to_nid(pfn)];
927
928	return alloc_bootmem_core(bdata, size, SMP_CACHE_BYTES, goal, limit);
929#endif
930}
931#endif
932
933void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
934				   unsigned long align, unsigned long goal)
935{
936	void *ptr;
937
938	if (WARN_ON_ONCE(slab_is_available()))
939		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
940
941#ifdef CONFIG_NO_BOOTMEM
942	ptr =  __alloc_memory_core_early(pgdat->node_id, size, align,
943						 goal, -1ULL);
944#else
945	ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size, align, goal, 0);
946	if (ptr)
947		return ptr;
948
949	ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0);
950#endif
951	if (ptr)
952		return ptr;
953
954	return __alloc_bootmem_nopanic(size, align, goal);
955}
956
957#ifndef ARCH_LOW_ADDRESS_LIMIT
958#define ARCH_LOW_ADDRESS_LIMIT	0xffffffffUL
959#endif
960
961/**
962 * __alloc_bootmem_low - allocate low boot memory
963 * @size: size of the request in bytes
964 * @align: alignment of the region
965 * @goal: preferred starting address of the region
966 *
967 * The goal is dropped if it can not be satisfied and the allocation will
968 * fall back to memory below @goal.
969 *
970 * Allocation may happen on any node in the system.
971 *
972 * The function panics if the request can not be satisfied.
973 */
974void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
975				  unsigned long goal)
976{
977	return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT);
978}
979
980/**
981 * __alloc_bootmem_low_node - allocate low boot memory from a specific node
982 * @pgdat: node to allocate from
983 * @size: size of the request in bytes
984 * @align: alignment of the region
985 * @goal: preferred starting address of the region
986 *
987 * The goal is dropped if it can not be satisfied and the allocation will
988 * fall back to memory below @goal.
989 *
990 * Allocation may fall back to any node in the system if the specified node
991 * can not hold the requested memory.
992 *
993 * The function panics if the request can not be satisfied.
994 */
995void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
996				       unsigned long align, unsigned long goal)
997{
998	void *ptr;
999
1000	if (WARN_ON_ONCE(slab_is_available()))
1001		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
1002
1003#ifdef CONFIG_NO_BOOTMEM
1004	ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
1005				goal, ARCH_LOW_ADDRESS_LIMIT);
1006	if (ptr)
1007		return ptr;
1008	ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align,
1009				goal, ARCH_LOW_ADDRESS_LIMIT);
1010#else
1011	ptr = ___alloc_bootmem_node(pgdat->bdata, size, align,
1012				goal, ARCH_LOW_ADDRESS_LIMIT);
1013#endif
1014	return ptr;
1015}
1016