bootmem.c revision 2e5237daf0cc3c8d87762f53f704dc54fa91dcf6
1/*
2 *  bootmem - A boot-time physical memory allocator and configurator
3 *
4 *  Copyright (C) 1999 Ingo Molnar
5 *                1999 Kanoj Sarcar, SGI
6 *                2008 Johannes Weiner
7 *
8 * Access to this subsystem has to be serialized externally (which is true
9 * for the boot process anyway).
10 */
11#include <linux/init.h>
12#include <linux/pfn.h>
13#include <linux/bootmem.h>
14#include <linux/module.h>
15
16#include <asm/bug.h>
17#include <asm/io.h>
18#include <asm/processor.h>
19
20#include "internal.h"
21
22unsigned long max_low_pfn;
23unsigned long min_low_pfn;
24unsigned long max_pfn;
25
26static LIST_HEAD(bdata_list);
27#ifdef CONFIG_CRASH_DUMP
28/*
29 * If we have booted due to a crash, max_pfn will be a very low value. We need
30 * to know the amount of memory that the previous kernel used.
31 */
32unsigned long saved_max_pfn;
33#endif
34
35bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata;
36
37static int bootmem_debug;
38
39static int __init bootmem_debug_setup(char *buf)
40{
41	bootmem_debug = 1;
42	return 0;
43}
44early_param("bootmem_debug", bootmem_debug_setup);
45
46#define bdebug(fmt, args...) ({				\
47	if (unlikely(bootmem_debug))			\
48		printk(KERN_INFO			\
49			"bootmem::%s " fmt,		\
50			__FUNCTION__, ## args);		\
51})
52
53/*
54 * Given an initialised bdata, it returns the size of the boot bitmap
55 */
56static unsigned long __init get_mapsize(bootmem_data_t *bdata)
57{
58	unsigned long mapsize;
59	unsigned long start = PFN_DOWN(bdata->node_boot_start);
60	unsigned long end = bdata->node_low_pfn;
61
62	mapsize = ((end - start) + 7) / 8;
63	return ALIGN(mapsize, sizeof(long));
64}
65
66/**
67 * bootmem_bootmap_pages - calculate bitmap size in pages
68 * @pages: number of pages the bitmap has to represent
69 */
70unsigned long __init bootmem_bootmap_pages(unsigned long pages)
71{
72	unsigned long mapsize;
73
74	mapsize = (pages+7)/8;
75	mapsize = (mapsize + ~PAGE_MASK) & PAGE_MASK;
76	mapsize >>= PAGE_SHIFT;
77
78	return mapsize;
79}
80
81/*
82 * link bdata in order
83 */
84static void __init link_bootmem(bootmem_data_t *bdata)
85{
86	bootmem_data_t *ent;
87
88	if (list_empty(&bdata_list)) {
89		list_add(&bdata->list, &bdata_list);
90		return;
91	}
92	/* insert in order */
93	list_for_each_entry(ent, &bdata_list, list) {
94		if (bdata->node_boot_start < ent->node_boot_start) {
95			list_add_tail(&bdata->list, &ent->list);
96			return;
97		}
98	}
99	list_add_tail(&bdata->list, &bdata_list);
100}
101
102/*
103 * Called once to set up the allocator itself.
104 */
105static unsigned long __init init_bootmem_core(bootmem_data_t *bdata,
106	unsigned long mapstart, unsigned long start, unsigned long end)
107{
108	unsigned long mapsize;
109
110	mminit_validate_memmodel_limits(&start, &end);
111	bdata->node_bootmem_map = phys_to_virt(PFN_PHYS(mapstart));
112	bdata->node_boot_start = PFN_PHYS(start);
113	bdata->node_low_pfn = end;
114	link_bootmem(bdata);
115
116	/*
117	 * Initially all pages are reserved - setup_arch() has to
118	 * register free RAM areas explicitly.
119	 */
120	mapsize = get_mapsize(bdata);
121	memset(bdata->node_bootmem_map, 0xff, mapsize);
122
123	bdebug("nid=%td start=%lx map=%lx end=%lx mapsize=%lx\n",
124		bdata - bootmem_node_data, start, mapstart, end, mapsize);
125
126	return mapsize;
127}
128
129/**
130 * init_bootmem_node - register a node as boot memory
131 * @pgdat: node to register
132 * @freepfn: pfn where the bitmap for this node is to be placed
133 * @startpfn: first pfn on the node
134 * @endpfn: first pfn after the node
135 *
136 * Returns the number of bytes needed to hold the bitmap for this node.
137 */
138unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn,
139				unsigned long startpfn, unsigned long endpfn)
140{
141	return init_bootmem_core(pgdat->bdata, freepfn, startpfn, endpfn);
142}
143
144/**
145 * init_bootmem - register boot memory
146 * @start: pfn where the bitmap is to be placed
147 * @pages: number of available physical pages
148 *
149 * Returns the number of bytes needed to hold the bitmap.
150 */
151unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
152{
153	max_low_pfn = pages;
154	min_low_pfn = start;
155	return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);
156}
157
158static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
159{
160	struct page *page;
161	unsigned long pfn;
162	unsigned long i, count;
163	unsigned long idx;
164	unsigned long *map;
165	int gofast = 0;
166
167	BUG_ON(!bdata->node_bootmem_map);
168
169	count = 0;
170	/* first extant page of the node */
171	pfn = PFN_DOWN(bdata->node_boot_start);
172	idx = bdata->node_low_pfn - pfn;
173	map = bdata->node_bootmem_map;
174	/*
175	 * Check if we are aligned to BITS_PER_LONG pages.  If so, we might
176	 * be able to free page orders of that size at once.
177	 */
178	if (!(pfn & (BITS_PER_LONG-1)))
179		gofast = 1;
180
181	for (i = 0; i < idx; ) {
182		unsigned long v = ~map[i / BITS_PER_LONG];
183
184		if (gofast && v == ~0UL) {
185			int order;
186
187			page = pfn_to_page(pfn);
188			count += BITS_PER_LONG;
189			order = ffs(BITS_PER_LONG) - 1;
190			__free_pages_bootmem(page, order);
191			i += BITS_PER_LONG;
192			page += BITS_PER_LONG;
193		} else if (v) {
194			unsigned long m;
195
196			page = pfn_to_page(pfn);
197			for (m = 1; m && i < idx; m<<=1, page++, i++) {
198				if (v & m) {
199					count++;
200					__free_pages_bootmem(page, 0);
201				}
202			}
203		} else {
204			i += BITS_PER_LONG;
205		}
206		pfn += BITS_PER_LONG;
207	}
208
209	/*
210	 * Now free the allocator bitmap itself, it's not
211	 * needed anymore:
212	 */
213	page = virt_to_page(bdata->node_bootmem_map);
214	idx = (get_mapsize(bdata) + PAGE_SIZE-1) >> PAGE_SHIFT;
215	for (i = 0; i < idx; i++, page++)
216		__free_pages_bootmem(page, 0);
217	count += i;
218	bdata->node_bootmem_map = NULL;
219
220	bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count);
221
222	return count;
223}
224
225/**
226 * free_all_bootmem_node - release a node's free pages to the buddy allocator
227 * @pgdat: node to be released
228 *
229 * Returns the number of pages actually released.
230 */
231unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
232{
233	register_page_bootmem_info_node(pgdat);
234	return free_all_bootmem_core(pgdat->bdata);
235}
236
237/**
238 * free_all_bootmem - release free pages to the buddy allocator
239 *
240 * Returns the number of pages actually released.
241 */
242unsigned long __init free_all_bootmem(void)
243{
244	return free_all_bootmem_core(NODE_DATA(0)->bdata);
245}
246
247static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr,
248				     unsigned long size)
249{
250	unsigned long sidx, eidx;
251	unsigned long i;
252
253	BUG_ON(!size);
254
255	/* out range */
256	if (addr + size < bdata->node_boot_start ||
257		PFN_DOWN(addr) > bdata->node_low_pfn)
258		return;
259	/*
260	 * round down end of usable mem, partially free pages are
261	 * considered reserved.
262	 */
263
264	if (addr >= bdata->node_boot_start && addr < bdata->last_success)
265		bdata->last_success = addr;
266
267	/*
268	 * Round up to index to the range.
269	 */
270	if (PFN_UP(addr) > PFN_DOWN(bdata->node_boot_start))
271		sidx = PFN_UP(addr) - PFN_DOWN(bdata->node_boot_start);
272	else
273		sidx = 0;
274
275	eidx = PFN_DOWN(addr + size - bdata->node_boot_start);
276	if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start))
277		eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
278
279	bdebug("nid=%td start=%lx end=%lx\n", bdata - bootmem_node_data,
280		sidx + PFN_DOWN(bdata->node_boot_start),
281		eidx + PFN_DOWN(bdata->node_boot_start));
282
283	for (i = sidx; i < eidx; i++) {
284		if (unlikely(!test_and_clear_bit(i, bdata->node_bootmem_map)))
285			BUG();
286	}
287}
288
289/**
290 * free_bootmem_node - mark a page range as usable
291 * @pgdat: node the range resides on
292 * @physaddr: starting address of the range
293 * @size: size of the range in bytes
294 *
295 * Partial pages will be considered reserved and left as they are.
296 *
297 * Only physical pages that actually reside on @pgdat are marked.
298 */
299void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
300			      unsigned long size)
301{
302	free_bootmem_core(pgdat->bdata, physaddr, size);
303}
304
305/**
306 * free_bootmem - mark a page range as usable
307 * @addr: starting address of the range
308 * @size: size of the range in bytes
309 *
310 * Partial pages will be considered reserved and left as they are.
311 *
312 * All physical pages within the range are marked, no matter what
313 * node they reside on.
314 */
315void __init free_bootmem(unsigned long addr, unsigned long size)
316{
317	bootmem_data_t *bdata;
318	list_for_each_entry(bdata, &bdata_list, list)
319		free_bootmem_core(bdata, addr, size);
320}
321
322/*
323 * Marks a particular physical memory range as unallocatable. Usable RAM
324 * might be used for boot-time allocations - or it might get added
325 * to the free page pool later on.
326 */
327static int __init can_reserve_bootmem_core(bootmem_data_t *bdata,
328			unsigned long addr, unsigned long size, int flags)
329{
330	unsigned long sidx, eidx;
331	unsigned long i;
332
333	BUG_ON(!size);
334
335	/* out of range, don't hold other */
336	if (addr + size < bdata->node_boot_start ||
337		PFN_DOWN(addr) > bdata->node_low_pfn)
338		return 0;
339
340	/*
341	 * Round up to index to the range.
342	 */
343	if (addr > bdata->node_boot_start)
344		sidx= PFN_DOWN(addr - bdata->node_boot_start);
345	else
346		sidx = 0;
347
348	eidx = PFN_UP(addr + size - bdata->node_boot_start);
349	if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start))
350		eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
351
352	for (i = sidx; i < eidx; i++) {
353		if (test_bit(i, bdata->node_bootmem_map)) {
354			if (flags & BOOTMEM_EXCLUSIVE)
355				return -EBUSY;
356		}
357	}
358
359	return 0;
360
361}
362
363static void __init reserve_bootmem_core(bootmem_data_t *bdata,
364			unsigned long addr, unsigned long size, int flags)
365{
366	unsigned long sidx, eidx;
367	unsigned long i;
368
369	BUG_ON(!size);
370
371	/* out of range */
372	if (addr + size < bdata->node_boot_start ||
373		PFN_DOWN(addr) > bdata->node_low_pfn)
374		return;
375
376	/*
377	 * Round up to index to the range.
378	 */
379	if (addr > bdata->node_boot_start)
380		sidx= PFN_DOWN(addr - bdata->node_boot_start);
381	else
382		sidx = 0;
383
384	eidx = PFN_UP(addr + size - bdata->node_boot_start);
385	if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start))
386		eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
387
388	bdebug("nid=%td start=%lx end=%lx flags=%x\n",
389		bdata - bootmem_node_data,
390		sidx + PFN_DOWN(bdata->node_boot_start),
391		eidx + PFN_DOWN(bdata->node_boot_start),
392		flags);
393
394	for (i = sidx; i < eidx; i++)
395		if (test_and_set_bit(i, bdata->node_bootmem_map))
396			bdebug("hm, page %lx reserved twice.\n",
397				PFN_DOWN(bdata->node_boot_start) + i);
398}
399
400/**
401 * reserve_bootmem_node - mark a page range as reserved
402 * @pgdat: node the range resides on
403 * @physaddr: starting address of the range
404 * @size: size of the range in bytes
405 * @flags: reservation flags (see linux/bootmem.h)
406 *
407 * Partial pages will be reserved.
408 *
409 * Only physical pages that actually reside on @pgdat are marked.
410 */
411int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
412				 unsigned long size, int flags)
413{
414	int ret;
415
416	ret = can_reserve_bootmem_core(pgdat->bdata, physaddr, size, flags);
417	if (ret < 0)
418		return -ENOMEM;
419	reserve_bootmem_core(pgdat->bdata, physaddr, size, flags);
420	return 0;
421}
422
423#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
424/**
425 * reserve_bootmem - mark a page range as usable
426 * @addr: starting address of the range
427 * @size: size of the range in bytes
428 * @flags: reservation flags (see linux/bootmem.h)
429 *
430 * Partial pages will be reserved.
431 *
432 * All physical pages within the range are marked, no matter what
433 * node they reside on.
434 */
435int __init reserve_bootmem(unsigned long addr, unsigned long size,
436			    int flags)
437{
438	bootmem_data_t *bdata;
439	int ret;
440
441	list_for_each_entry(bdata, &bdata_list, list) {
442		ret = can_reserve_bootmem_core(bdata, addr, size, flags);
443		if (ret < 0)
444			return ret;
445	}
446	list_for_each_entry(bdata, &bdata_list, list)
447		reserve_bootmem_core(bdata, addr, size, flags);
448
449	return 0;
450}
451#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
452
453/*
454 * We 'merge' subsequent allocations to save space. We might 'lose'
455 * some fraction of a page if allocations cannot be satisfied due to
456 * size constraints on boxes where there is physical RAM space
457 * fragmentation - in these cases (mostly large memory boxes) this
458 * is not a problem.
459 *
460 * On low memory boxes we get it right in 100% of the cases.
461 *
462 * alignment has to be a power of 2 value.
463 *
464 * NOTE:  This function is _not_ reentrant.
465 */
466static void * __init
467alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size,
468		unsigned long align, unsigned long goal, unsigned long limit)
469{
470	unsigned long areasize, preferred;
471	unsigned long i, start = 0, incr, eidx, end_pfn;
472	void *ret;
473	unsigned long node_boot_start;
474	void *node_bootmem_map;
475
476	if (!size) {
477		printk("alloc_bootmem_core(): zero-sized request\n");
478		BUG();
479	}
480	BUG_ON(align & (align-1));
481
482	/* on nodes without memory - bootmem_map is NULL */
483	if (!bdata->node_bootmem_map)
484		return NULL;
485
486	bdebug("nid=%td size=%lx [%lu pages] align=%lx goal=%lx limit=%lx\n",
487		bdata - bootmem_node_data, size, PAGE_ALIGN(size) >> PAGE_SHIFT,
488		align, goal, limit);
489
490	/* bdata->node_boot_start is supposed to be (12+6)bits alignment on x86_64 ? */
491	node_boot_start = bdata->node_boot_start;
492	node_bootmem_map = bdata->node_bootmem_map;
493	if (align) {
494		node_boot_start = ALIGN(bdata->node_boot_start, align);
495		if (node_boot_start > bdata->node_boot_start)
496			node_bootmem_map = (unsigned long *)bdata->node_bootmem_map +
497			    PFN_DOWN(node_boot_start - bdata->node_boot_start)/BITS_PER_LONG;
498	}
499
500	if (limit && node_boot_start >= limit)
501		return NULL;
502
503	end_pfn = bdata->node_low_pfn;
504	limit = PFN_DOWN(limit);
505	if (limit && end_pfn > limit)
506		end_pfn = limit;
507
508	eidx = end_pfn - PFN_DOWN(node_boot_start);
509
510	/*
511	 * We try to allocate bootmem pages above 'goal'
512	 * first, then we try to allocate lower pages.
513	 */
514	preferred = 0;
515	if (goal && PFN_DOWN(goal) < end_pfn) {
516		if (goal > node_boot_start)
517			preferred = goal - node_boot_start;
518
519		if (bdata->last_success > node_boot_start &&
520			bdata->last_success - node_boot_start >= preferred)
521			if (!limit || (limit && limit > bdata->last_success))
522				preferred = bdata->last_success - node_boot_start;
523	}
524
525	preferred = PFN_DOWN(ALIGN(preferred, align));
526	areasize = (size + PAGE_SIZE-1) / PAGE_SIZE;
527	incr = align >> PAGE_SHIFT ? : 1;
528
529restart_scan:
530	for (i = preferred; i < eidx;) {
531		unsigned long j;
532
533		i = find_next_zero_bit(node_bootmem_map, eidx, i);
534		i = ALIGN(i, incr);
535		if (i >= eidx)
536			break;
537		if (test_bit(i, node_bootmem_map)) {
538			i += incr;
539			continue;
540		}
541		for (j = i + 1; j < i + areasize; ++j) {
542			if (j >= eidx)
543				goto fail_block;
544			if (test_bit(j, node_bootmem_map))
545				goto fail_block;
546		}
547		start = i;
548		goto found;
549	fail_block:
550		i = ALIGN(j, incr);
551		if (i == j)
552			i += incr;
553	}
554
555	if (preferred > 0) {
556		preferred = 0;
557		goto restart_scan;
558	}
559	return NULL;
560
561found:
562	bdata->last_success = PFN_PHYS(start) + node_boot_start;
563	BUG_ON(start >= eidx);
564
565	/*
566	 * Is the next page of the previous allocation-end the start
567	 * of this allocation's buffer? If yes then we can 'merge'
568	 * the previous partial page with this allocation.
569	 */
570	if (align < PAGE_SIZE &&
571	    bdata->last_offset && bdata->last_pos+1 == start) {
572		unsigned long offset, remaining_size;
573		offset = ALIGN(bdata->last_offset, align);
574		BUG_ON(offset > PAGE_SIZE);
575		remaining_size = PAGE_SIZE - offset;
576		if (size < remaining_size) {
577			areasize = 0;
578			/* last_pos unchanged */
579			bdata->last_offset = offset + size;
580			ret = phys_to_virt(bdata->last_pos * PAGE_SIZE +
581					   offset + node_boot_start);
582		} else {
583			remaining_size = size - remaining_size;
584			areasize = (remaining_size + PAGE_SIZE-1) / PAGE_SIZE;
585			ret = phys_to_virt(bdata->last_pos * PAGE_SIZE +
586					   offset + node_boot_start);
587			bdata->last_pos = start + areasize - 1;
588			bdata->last_offset = remaining_size;
589		}
590		bdata->last_offset &= ~PAGE_MASK;
591	} else {
592		bdata->last_pos = start + areasize - 1;
593		bdata->last_offset = size & ~PAGE_MASK;
594		ret = phys_to_virt(start * PAGE_SIZE + node_boot_start);
595	}
596
597	bdebug("nid=%td start=%lx end=%lx\n",
598		bdata - bootmem_node_data,
599		start + PFN_DOWN(bdata->node_boot_start),
600		start + areasize + PFN_DOWN(bdata->node_boot_start));
601
602	/*
603	 * Reserve the area now:
604	 */
605	for (i = start; i < start + areasize; i++)
606		if (unlikely(test_and_set_bit(i, node_bootmem_map)))
607			BUG();
608	memset(ret, 0, size);
609	return ret;
610}
611
612/**
613 * __alloc_bootmem_nopanic - allocate boot memory without panicking
614 * @size: size of the request in bytes
615 * @align: alignment of the region
616 * @goal: preferred starting address of the region
617 *
618 * The goal is dropped if it can not be satisfied and the allocation will
619 * fall back to memory below @goal.
620 *
621 * Allocation may happen on any node in the system.
622 *
623 * Returns NULL on failure.
624 */
625void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
626				      unsigned long goal)
627{
628	bootmem_data_t *bdata;
629	void *ptr;
630
631	list_for_each_entry(bdata, &bdata_list, list) {
632		ptr = alloc_bootmem_core(bdata, size, align, goal, 0);
633		if (ptr)
634			return ptr;
635	}
636	return NULL;
637}
638
639/**
640 * __alloc_bootmem - allocate boot memory
641 * @size: size of the request in bytes
642 * @align: alignment of the region
643 * @goal: preferred starting address of the region
644 *
645 * The goal is dropped if it can not be satisfied and the allocation will
646 * fall back to memory below @goal.
647 *
648 * Allocation may happen on any node in the system.
649 *
650 * The function panics if the request can not be satisfied.
651 */
652void * __init __alloc_bootmem(unsigned long size, unsigned long align,
653			      unsigned long goal)
654{
655	void *mem = __alloc_bootmem_nopanic(size,align,goal);
656
657	if (mem)
658		return mem;
659	/*
660	 * Whoops, we cannot satisfy the allocation request.
661	 */
662	printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
663	panic("Out of memory");
664	return NULL;
665}
666
667/**
668 * __alloc_bootmem_node - allocate boot memory from a specific node
669 * @pgdat: node to allocate from
670 * @size: size of the request in bytes
671 * @align: alignment of the region
672 * @goal: preferred starting address of the region
673 *
674 * The goal is dropped if it can not be satisfied and the allocation will
675 * fall back to memory below @goal.
676 *
677 * Allocation may fall back to any node in the system if the specified node
678 * can not hold the requested memory.
679 *
680 * The function panics if the request can not be satisfied.
681 */
682void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
683				   unsigned long align, unsigned long goal)
684{
685	void *ptr;
686
687	ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0);
688	if (ptr)
689		return ptr;
690
691	return __alloc_bootmem(size, align, goal);
692}
693
694#ifdef CONFIG_SPARSEMEM
695/**
696 * alloc_bootmem_section - allocate boot memory from a specific section
697 * @size: size of the request in bytes
698 * @section_nr: sparse map section to allocate from
699 *
700 * Return NULL on failure.
701 */
702void * __init alloc_bootmem_section(unsigned long size,
703				    unsigned long section_nr)
704{
705	void *ptr;
706	unsigned long limit, goal, start_nr, end_nr, pfn;
707	struct pglist_data *pgdat;
708
709	pfn = section_nr_to_pfn(section_nr);
710	goal = PFN_PHYS(pfn);
711	limit = PFN_PHYS(section_nr_to_pfn(section_nr + 1)) - 1;
712	pgdat = NODE_DATA(early_pfn_to_nid(pfn));
713	ptr = alloc_bootmem_core(pgdat->bdata, size, SMP_CACHE_BYTES, goal,
714				limit);
715
716	if (!ptr)
717		return NULL;
718
719	start_nr = pfn_to_section_nr(PFN_DOWN(__pa(ptr)));
720	end_nr = pfn_to_section_nr(PFN_DOWN(__pa(ptr) + size));
721	if (start_nr != section_nr || end_nr != section_nr) {
722		printk(KERN_WARNING "alloc_bootmem failed on section %ld.\n",
723		       section_nr);
724		free_bootmem_core(pgdat->bdata, __pa(ptr), size);
725		ptr = NULL;
726	}
727
728	return ptr;
729}
730#endif
731
732void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
733				   unsigned long align, unsigned long goal)
734{
735	void *ptr;
736
737	ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0);
738	if (ptr)
739		return ptr;
740
741	return __alloc_bootmem_nopanic(size, align, goal);
742}
743
744#ifndef ARCH_LOW_ADDRESS_LIMIT
745#define ARCH_LOW_ADDRESS_LIMIT	0xffffffffUL
746#endif
747
748/**
749 * __alloc_bootmem_low - allocate low boot memory
750 * @size: size of the request in bytes
751 * @align: alignment of the region
752 * @goal: preferred starting address of the region
753 *
754 * The goal is dropped if it can not be satisfied and the allocation will
755 * fall back to memory below @goal.
756 *
757 * Allocation may happen on any node in the system.
758 *
759 * The function panics if the request can not be satisfied.
760 */
761void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
762				  unsigned long goal)
763{
764	bootmem_data_t *bdata;
765	void *ptr;
766
767	list_for_each_entry(bdata, &bdata_list, list) {
768		ptr = alloc_bootmem_core(bdata, size, align, goal,
769					ARCH_LOW_ADDRESS_LIMIT);
770		if (ptr)
771			return ptr;
772	}
773
774	/*
775	 * Whoops, we cannot satisfy the allocation request.
776	 */
777	printk(KERN_ALERT "low bootmem alloc of %lu bytes failed!\n", size);
778	panic("Out of low memory");
779	return NULL;
780}
781
782/**
783 * __alloc_bootmem_low_node - allocate low boot memory from a specific node
784 * @pgdat: node to allocate from
785 * @size: size of the request in bytes
786 * @align: alignment of the region
787 * @goal: preferred starting address of the region
788 *
789 * The goal is dropped if it can not be satisfied and the allocation will
790 * fall back to memory below @goal.
791 *
792 * Allocation may fall back to any node in the system if the specified node
793 * can not hold the requested memory.
794 *
795 * The function panics if the request can not be satisfied.
796 */
797void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
798				       unsigned long align, unsigned long goal)
799{
800	return alloc_bootmem_core(pgdat->bdata, size, align, goal,
801				ARCH_LOW_ADDRESS_LIMIT);
802}
803