memblock.c revision 348968eb151e2569ad0ebe19b2f9c3c25b5c816a
1/*
2 * Procedures for maintaining information about logical memory blocks.
3 *
4 * Peter Bergner, IBM Corp.	June 2001.
5 * Copyright (C) 2001 Peter Bergner.
6 *
7 *      This program is free software; you can redistribute it and/or
8 *      modify it under the terms of the GNU General Public License
9 *      as published by the Free Software Foundation; either version
10 *      2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/kernel.h>
14#include <linux/slab.h>
15#include <linux/init.h>
16#include <linux/bitops.h>
17#include <linux/poison.h>
18#include <linux/pfn.h>
19#include <linux/debugfs.h>
20#include <linux/seq_file.h>
21#include <linux/memblock.h>
22
23struct memblock memblock __initdata_memblock;
24
25int memblock_debug __initdata_memblock;
26int memblock_can_resize __initdata_memblock;
27static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock;
28static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock;
29
30/* inline so we don't get a warning when pr_debug is compiled out */
31static inline const char *memblock_type_name(struct memblock_type *type)
32{
33	if (type == &memblock.memory)
34		return "memory";
35	else if (type == &memblock.reserved)
36		return "reserved";
37	else
38		return "unknown";
39}
40
41/*
42 * Address comparison utilities
43 */
44static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
45				       phys_addr_t base2, phys_addr_t size2)
46{
47	return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
48}
49
50long __init_memblock memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size)
51{
52	unsigned long i;
53
54	for (i = 0; i < type->cnt; i++) {
55		phys_addr_t rgnbase = type->regions[i].base;
56		phys_addr_t rgnsize = type->regions[i].size;
57		if (memblock_addrs_overlap(base, size, rgnbase, rgnsize))
58			break;
59	}
60
61	return (i < type->cnt) ? i : -1;
62}
63
64/*
65 * Find, allocate, deallocate or reserve unreserved regions. All allocations
66 * are top-down.
67 */
68
69static phys_addr_t __init_memblock memblock_find_region(phys_addr_t start, phys_addr_t end,
70					  phys_addr_t size, phys_addr_t align)
71{
72	phys_addr_t base, res_base;
73	long j;
74
75	/* In case, huge size is requested */
76	if (end < size)
77		return MEMBLOCK_ERROR;
78
79	base = round_down(end - size, align);
80
81	/* Prevent allocations returning 0 as it's also used to
82	 * indicate an allocation failure
83	 */
84	if (start == 0)
85		start = PAGE_SIZE;
86
87	while (start <= base) {
88		j = memblock_overlaps_region(&memblock.reserved, base, size);
89		if (j < 0)
90			return base;
91		res_base = memblock.reserved.regions[j].base;
92		if (res_base < size)
93			break;
94		base = round_down(res_base - size, align);
95	}
96
97	return MEMBLOCK_ERROR;
98}
99
100static phys_addr_t __init_memblock memblock_find_base(phys_addr_t size,
101			phys_addr_t align, phys_addr_t start, phys_addr_t end)
102{
103	long i;
104
105	BUG_ON(0 == size);
106
107	/* Pump up max_addr */
108	if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
109		end = memblock.current_limit;
110
111	/* We do a top-down search, this tends to limit memory
112	 * fragmentation by keeping early boot allocs near the
113	 * top of memory
114	 */
115	for (i = memblock.memory.cnt - 1; i >= 0; i--) {
116		phys_addr_t memblockbase = memblock.memory.regions[i].base;
117		phys_addr_t memblocksize = memblock.memory.regions[i].size;
118		phys_addr_t bottom, top, found;
119
120		if (memblocksize < size)
121			continue;
122		if ((memblockbase + memblocksize) <= start)
123			break;
124		bottom = max(memblockbase, start);
125		top = min(memblockbase + memblocksize, end);
126		if (bottom >= top)
127			continue;
128		found = memblock_find_region(bottom, top, size, align);
129		if (found != MEMBLOCK_ERROR)
130			return found;
131	}
132	return MEMBLOCK_ERROR;
133}
134
135/*
136 * Find a free area with specified alignment in a specific range.
137 */
138u64 __init_memblock memblock_find_in_range(u64 start, u64 end, u64 size, u64 align)
139{
140	return memblock_find_base(size, align, start, end);
141}
142
143/*
144 * Free memblock.reserved.regions
145 */
146int __init_memblock memblock_free_reserved_regions(void)
147{
148	if (memblock.reserved.regions == memblock_reserved_init_regions)
149		return 0;
150
151	return memblock_free(__pa(memblock.reserved.regions),
152		 sizeof(struct memblock_region) * memblock.reserved.max);
153}
154
155/*
156 * Reserve memblock.reserved.regions
157 */
158int __init_memblock memblock_reserve_reserved_regions(void)
159{
160	if (memblock.reserved.regions == memblock_reserved_init_regions)
161		return 0;
162
163	return memblock_reserve(__pa(memblock.reserved.regions),
164		 sizeof(struct memblock_region) * memblock.reserved.max);
165}
166
167static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
168{
169	unsigned long i;
170
171	for (i = r; i < type->cnt - 1; i++) {
172		type->regions[i].base = type->regions[i + 1].base;
173		type->regions[i].size = type->regions[i + 1].size;
174	}
175	type->cnt--;
176
177	/* Special case for empty arrays */
178	if (type->cnt == 0) {
179		type->cnt = 1;
180		type->regions[0].base = 0;
181		type->regions[0].size = 0;
182	}
183}
184
185/* Defined below but needed now */
186static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size);
187
188static int __init_memblock memblock_double_array(struct memblock_type *type)
189{
190	struct memblock_region *new_array, *old_array;
191	phys_addr_t old_size, new_size, addr;
192	int use_slab = slab_is_available();
193
194	/* We don't allow resizing until we know about the reserved regions
195	 * of memory that aren't suitable for allocation
196	 */
197	if (!memblock_can_resize)
198		return -1;
199
200	/* Calculate new doubled size */
201	old_size = type->max * sizeof(struct memblock_region);
202	new_size = old_size << 1;
203
204	/* Try to find some space for it.
205	 *
206	 * WARNING: We assume that either slab_is_available() and we use it or
207	 * we use MEMBLOCK for allocations. That means that this is unsafe to use
208	 * when bootmem is currently active (unless bootmem itself is implemented
209	 * on top of MEMBLOCK which isn't the case yet)
210	 *
211	 * This should however not be an issue for now, as we currently only
212	 * call into MEMBLOCK while it's still active, or much later when slab is
213	 * active for memory hotplug operations
214	 */
215	if (use_slab) {
216		new_array = kmalloc(new_size, GFP_KERNEL);
217		addr = new_array == NULL ? MEMBLOCK_ERROR : __pa(new_array);
218	} else
219		addr = memblock_find_base(new_size, sizeof(phys_addr_t), 0, MEMBLOCK_ALLOC_ACCESSIBLE);
220	if (addr == MEMBLOCK_ERROR) {
221		pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
222		       memblock_type_name(type), type->max, type->max * 2);
223		return -1;
224	}
225	new_array = __va(addr);
226
227	memblock_dbg("memblock: %s array is doubled to %ld at [%#010llx-%#010llx]",
228		 memblock_type_name(type), type->max * 2, (u64)addr, (u64)addr + new_size - 1);
229
230	/* Found space, we now need to move the array over before
231	 * we add the reserved region since it may be our reserved
232	 * array itself that is full.
233	 */
234	memcpy(new_array, type->regions, old_size);
235	memset(new_array + type->max, 0, old_size);
236	old_array = type->regions;
237	type->regions = new_array;
238	type->max <<= 1;
239
240	/* If we use SLAB that's it, we are done */
241	if (use_slab)
242		return 0;
243
244	/* Add the new reserved region now. Should not fail ! */
245	BUG_ON(memblock_add_region(&memblock.reserved, addr, new_size));
246
247	/* If the array wasn't our static init one, then free it. We only do
248	 * that before SLAB is available as later on, we don't know whether
249	 * to use kfree or free_bootmem_pages(). Shouldn't be a big deal
250	 * anyways
251	 */
252	if (old_array != memblock_memory_init_regions &&
253	    old_array != memblock_reserved_init_regions)
254		memblock_free(__pa(old_array), old_size);
255
256	return 0;
257}
258
259extern int __init_memblock __weak memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1,
260					  phys_addr_t addr2, phys_addr_t size2)
261{
262	return 1;
263}
264
265static long __init_memblock memblock_add_region(struct memblock_type *type,
266						phys_addr_t base, phys_addr_t size)
267{
268	phys_addr_t end = base + size;
269	int i, slot = -1;
270
271	/* First try and coalesce this MEMBLOCK with others */
272	for (i = 0; i < type->cnt; i++) {
273		struct memblock_region *rgn = &type->regions[i];
274		phys_addr_t rend = rgn->base + rgn->size;
275
276		/* Exit if there's no possible hits */
277		if (rgn->base > end || rgn->size == 0)
278			break;
279
280		/* Check if we are fully enclosed within an existing
281		 * block
282		 */
283		if (rgn->base <= base && rend >= end)
284			return 0;
285
286		/* Check if we overlap or are adjacent with the bottom
287		 * of a block.
288		 */
289		if (base < rgn->base && end >= rgn->base) {
290			/* If we can't coalesce, create a new block */
291			if (!memblock_memory_can_coalesce(base, size,
292							  rgn->base,
293							  rgn->size)) {
294				/* Overlap & can't coalesce are mutually
295				 * exclusive, if you do that, be prepared
296				 * for trouble
297				 */
298				WARN_ON(end != rgn->base);
299				goto new_block;
300			}
301			/* We extend the bottom of the block down to our
302			 * base
303			 */
304			rgn->base = base;
305			rgn->size = rend - base;
306
307			/* Return if we have nothing else to allocate
308			 * (fully coalesced)
309			 */
310			if (rend >= end)
311				return 0;
312
313			/* We continue processing from the end of the
314			 * coalesced block.
315			 */
316			base = rend;
317			size = end - base;
318		}
319
320		/* Now check if we overlap or are adjacent with the
321		 * top of a block
322		 */
323		if (base <= rend && end >= rend) {
324			/* If we can't coalesce, create a new block */
325			if (!memblock_memory_can_coalesce(rgn->base,
326							  rgn->size,
327							  base, size)) {
328				/* Overlap & can't coalesce are mutually
329				 * exclusive, if you do that, be prepared
330				 * for trouble
331				 */
332				WARN_ON(rend != base);
333				goto new_block;
334			}
335			/* We adjust our base down to enclose the
336			 * original block and destroy it. It will be
337			 * part of our new allocation. Since we've
338			 * freed an entry, we know we won't fail
339			 * to allocate one later, so we won't risk
340			 * losing the original block allocation.
341			 */
342			size += (base - rgn->base);
343			base = rgn->base;
344			memblock_remove_region(type, i--);
345		}
346	}
347
348	/* If the array is empty, special case, replace the fake
349	 * filler region and return
350	 */
351	if ((type->cnt == 1) && (type->regions[0].size == 0)) {
352		type->regions[0].base = base;
353		type->regions[0].size = size;
354		return 0;
355	}
356
357 new_block:
358	/* If we are out of space, we fail. It's too late to resize the array
359	 * but then this shouldn't have happened in the first place.
360	 */
361	if (WARN_ON(type->cnt >= type->max))
362		return -1;
363
364	/* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */
365	for (i = type->cnt - 1; i >= 0; i--) {
366		if (base < type->regions[i].base) {
367			type->regions[i+1].base = type->regions[i].base;
368			type->regions[i+1].size = type->regions[i].size;
369		} else {
370			type->regions[i+1].base = base;
371			type->regions[i+1].size = size;
372			slot = i + 1;
373			break;
374		}
375	}
376	if (base < type->regions[0].base) {
377		type->regions[0].base = base;
378		type->regions[0].size = size;
379		slot = 0;
380	}
381	type->cnt++;
382
383	/* The array is full ? Try to resize it. If that fails, we undo
384	 * our allocation and return an error
385	 */
386	if (type->cnt == type->max && memblock_double_array(type)) {
387		BUG_ON(slot < 0);
388		memblock_remove_region(type, slot);
389		return -1;
390	}
391
392	return 0;
393}
394
395long __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
396{
397	return memblock_add_region(&memblock.memory, base, size);
398
399}
400
401static long __init_memblock __memblock_remove(struct memblock_type *type,
402					      phys_addr_t base, phys_addr_t size)
403{
404	phys_addr_t end = base + size;
405	int i;
406
407	/* Walk through the array for collisions */
408	for (i = 0; i < type->cnt; i++) {
409		struct memblock_region *rgn = &type->regions[i];
410		phys_addr_t rend = rgn->base + rgn->size;
411
412		/* Nothing more to do, exit */
413		if (rgn->base > end || rgn->size == 0)
414			break;
415
416		/* If we fully enclose the block, drop it */
417		if (base <= rgn->base && end >= rend) {
418			memblock_remove_region(type, i--);
419			continue;
420		}
421
422		/* If we are fully enclosed within a block
423		 * then we need to split it and we are done
424		 */
425		if (base > rgn->base && end < rend) {
426			rgn->size = base - rgn->base;
427			if (!memblock_add_region(type, end, rend - end))
428				return 0;
429			/* Failure to split is bad, we at least
430			 * restore the block before erroring
431			 */
432			rgn->size = rend - rgn->base;
433			WARN_ON(1);
434			return -1;
435		}
436
437		/* Check if we need to trim the bottom of a block */
438		if (rgn->base < end && rend > end) {
439			rgn->size -= end - rgn->base;
440			rgn->base = end;
441			break;
442		}
443
444		/* And check if we need to trim the top of a block */
445		if (base < rend)
446			rgn->size -= rend - base;
447
448	}
449	return 0;
450}
451
452long __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
453{
454	return __memblock_remove(&memblock.memory, base, size);
455}
456
457long __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
458{
459	return __memblock_remove(&memblock.reserved, base, size);
460}
461
462long __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
463{
464	struct memblock_type *_rgn = &memblock.reserved;
465
466	BUG_ON(0 == size);
467
468	return memblock_add_region(_rgn, base, size);
469}
470
471phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
472{
473	phys_addr_t found;
474
475	/* We align the size to limit fragmentation. Without this, a lot of
476	 * small allocs quickly eat up the whole reserve array on sparc
477	 */
478	size = round_up(size, align);
479
480	found = memblock_find_base(size, align, 0, max_addr);
481	if (found != MEMBLOCK_ERROR &&
482	    !memblock_add_region(&memblock.reserved, found, size))
483		return found;
484
485	return 0;
486}
487
488phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
489{
490	phys_addr_t alloc;
491
492	alloc = __memblock_alloc_base(size, align, max_addr);
493
494	if (alloc == 0)
495		panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
496		      (unsigned long long) size, (unsigned long long) max_addr);
497
498	return alloc;
499}
500
501phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
502{
503	return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
504}
505
506
507/*
508 * Additional node-local allocators. Search for node memory is bottom up
509 * and walks memblock regions within that node bottom-up as well, but allocation
510 * within an memblock region is top-down. XXX I plan to fix that at some stage
511 *
512 * WARNING: Only available after early_node_map[] has been populated,
513 * on some architectures, that is after all the calls to add_active_range()
514 * have been done to populate it.
515 */
516
517phys_addr_t __weak __init memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid)
518{
519#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
520	/*
521	 * This code originates from sparc which really wants use to walk by addresses
522	 * and returns the nid. This is not very convenient for early_pfn_map[] users
523	 * as the map isn't sorted yet, and it really wants to be walked by nid.
524	 *
525	 * For now, I implement the inefficient method below which walks the early
526	 * map multiple times. Eventually we may want to use an ARCH config option
527	 * to implement a completely different method for both case.
528	 */
529	unsigned long start_pfn, end_pfn;
530	int i;
531
532	for (i = 0; i < MAX_NUMNODES; i++) {
533		get_pfn_range_for_nid(i, &start_pfn, &end_pfn);
534		if (start < PFN_PHYS(start_pfn) || start >= PFN_PHYS(end_pfn))
535			continue;
536		*nid = i;
537		return min(end, PFN_PHYS(end_pfn));
538	}
539#endif
540	*nid = 0;
541
542	return end;
543}
544
545static phys_addr_t __init memblock_alloc_nid_region(struct memblock_region *mp,
546					       phys_addr_t size,
547					       phys_addr_t align, int nid)
548{
549	phys_addr_t start, end;
550
551	start = mp->base;
552	end = start + mp->size;
553
554	start = round_up(start, align);
555	while (start < end) {
556		phys_addr_t this_end;
557		int this_nid;
558
559		this_end = memblock_nid_range(start, end, &this_nid);
560		if (this_nid == nid) {
561			phys_addr_t ret = memblock_find_region(start, this_end, size, align);
562			if (ret != MEMBLOCK_ERROR &&
563			    !memblock_add_region(&memblock.reserved, ret, size))
564				return ret;
565		}
566		start = this_end;
567	}
568
569	return MEMBLOCK_ERROR;
570}
571
572phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
573{
574	struct memblock_type *mem = &memblock.memory;
575	int i;
576
577	BUG_ON(0 == size);
578
579	/* We align the size to limit fragmentation. Without this, a lot of
580	 * small allocs quickly eat up the whole reserve array on sparc
581	 */
582	size = round_up(size, align);
583
584	/* We do a bottom-up search for a region with the right
585	 * nid since that's easier considering how memblock_nid_range()
586	 * works
587	 */
588	for (i = 0; i < mem->cnt; i++) {
589		phys_addr_t ret = memblock_alloc_nid_region(&mem->regions[i],
590					       size, align, nid);
591		if (ret != MEMBLOCK_ERROR)
592			return ret;
593	}
594
595	return 0;
596}
597
598phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
599{
600	phys_addr_t res = memblock_alloc_nid(size, align, nid);
601
602	if (res)
603		return res;
604	return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
605}
606
607
608/*
609 * Remaining API functions
610 */
611
612/* You must call memblock_analyze() before this. */
613phys_addr_t __init memblock_phys_mem_size(void)
614{
615	return memblock.memory_size;
616}
617
618phys_addr_t __init_memblock memblock_end_of_DRAM(void)
619{
620	int idx = memblock.memory.cnt - 1;
621
622	return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
623}
624
625/* You must call memblock_analyze() after this. */
626void __init memblock_enforce_memory_limit(phys_addr_t memory_limit)
627{
628	unsigned long i;
629	phys_addr_t limit;
630	struct memblock_region *p;
631
632	if (!memory_limit)
633		return;
634
635	/* Truncate the memblock regions to satisfy the memory limit. */
636	limit = memory_limit;
637	for (i = 0; i < memblock.memory.cnt; i++) {
638		if (limit > memblock.memory.regions[i].size) {
639			limit -= memblock.memory.regions[i].size;
640			continue;
641		}
642
643		memblock.memory.regions[i].size = limit;
644		memblock.memory.cnt = i + 1;
645		break;
646	}
647
648	memory_limit = memblock_end_of_DRAM();
649
650	/* And truncate any reserves above the limit also. */
651	for (i = 0; i < memblock.reserved.cnt; i++) {
652		p = &memblock.reserved.regions[i];
653
654		if (p->base > memory_limit)
655			p->size = 0;
656		else if ((p->base + p->size) > memory_limit)
657			p->size = memory_limit - p->base;
658
659		if (p->size == 0) {
660			memblock_remove_region(&memblock.reserved, i);
661			i--;
662		}
663	}
664}
665
666static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
667{
668	unsigned int left = 0, right = type->cnt;
669
670	do {
671		unsigned int mid = (right + left) / 2;
672
673		if (addr < type->regions[mid].base)
674			right = mid;
675		else if (addr >= (type->regions[mid].base +
676				  type->regions[mid].size))
677			left = mid + 1;
678		else
679			return mid;
680	} while (left < right);
681	return -1;
682}
683
684int __init memblock_is_reserved(phys_addr_t addr)
685{
686	return memblock_search(&memblock.reserved, addr) != -1;
687}
688
689int __init_memblock memblock_is_memory(phys_addr_t addr)
690{
691	return memblock_search(&memblock.memory, addr) != -1;
692}
693
694int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
695{
696	int idx = memblock_search(&memblock.memory, base);
697
698	if (idx == -1)
699		return 0;
700	return memblock.memory.regions[idx].base <= base &&
701		(memblock.memory.regions[idx].base +
702		 memblock.memory.regions[idx].size) >= (base + size);
703}
704
705int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
706{
707	return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
708}
709
710
711void __init_memblock memblock_set_current_limit(phys_addr_t limit)
712{
713	memblock.current_limit = limit;
714}
715
716static void __init_memblock memblock_dump(struct memblock_type *region, char *name)
717{
718	unsigned long long base, size;
719	int i;
720
721	pr_info(" %s.cnt  = 0x%lx\n", name, region->cnt);
722
723	for (i = 0; i < region->cnt; i++) {
724		base = region->regions[i].base;
725		size = region->regions[i].size;
726
727		pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes\n",
728		    name, i, base, base + size - 1, size);
729	}
730}
731
732void __init_memblock memblock_dump_all(void)
733{
734	if (!memblock_debug)
735		return;
736
737	pr_info("MEMBLOCK configuration:\n");
738	pr_info(" memory size = 0x%llx\n", (unsigned long long)memblock.memory_size);
739
740	memblock_dump(&memblock.memory, "memory");
741	memblock_dump(&memblock.reserved, "reserved");
742}
743
744void __init memblock_analyze(void)
745{
746	int i;
747
748	/* Check marker in the unused last array entry */
749	WARN_ON(memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS].base
750		!= (phys_addr_t)RED_INACTIVE);
751	WARN_ON(memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS].base
752		!= (phys_addr_t)RED_INACTIVE);
753
754	memblock.memory_size = 0;
755
756	for (i = 0; i < memblock.memory.cnt; i++)
757		memblock.memory_size += memblock.memory.regions[i].size;
758
759	/* We allow resizing from there */
760	memblock_can_resize = 1;
761}
762
763void __init memblock_init(void)
764{
765	static int init_done __initdata = 0;
766
767	if (init_done)
768		return;
769	init_done = 1;
770
771	/* Hookup the initial arrays */
772	memblock.memory.regions	= memblock_memory_init_regions;
773	memblock.memory.max		= INIT_MEMBLOCK_REGIONS;
774	memblock.reserved.regions	= memblock_reserved_init_regions;
775	memblock.reserved.max	= INIT_MEMBLOCK_REGIONS;
776
777	/* Write a marker in the unused last array entry */
778	memblock.memory.regions[INIT_MEMBLOCK_REGIONS].base = (phys_addr_t)RED_INACTIVE;
779	memblock.reserved.regions[INIT_MEMBLOCK_REGIONS].base = (phys_addr_t)RED_INACTIVE;
780
781	/* Create a dummy zero size MEMBLOCK which will get coalesced away later.
782	 * This simplifies the memblock_add() code below...
783	 */
784	memblock.memory.regions[0].base = 0;
785	memblock.memory.regions[0].size = 0;
786	memblock.memory.cnt = 1;
787
788	/* Ditto. */
789	memblock.reserved.regions[0].base = 0;
790	memblock.reserved.regions[0].size = 0;
791	memblock.reserved.cnt = 1;
792
793	memblock.current_limit = MEMBLOCK_ALLOC_ANYWHERE;
794}
795
796static int __init early_memblock(char *p)
797{
798	if (p && strstr(p, "debug"))
799		memblock_debug = 1;
800	return 0;
801}
802early_param("memblock", early_memblock);
803
804#if defined(CONFIG_DEBUG_FS) && !defined(ARCH_DISCARD_MEMBLOCK)
805
806static int memblock_debug_show(struct seq_file *m, void *private)
807{
808	struct memblock_type *type = m->private;
809	struct memblock_region *reg;
810	int i;
811
812	for (i = 0; i < type->cnt; i++) {
813		reg = &type->regions[i];
814		seq_printf(m, "%4d: ", i);
815		if (sizeof(phys_addr_t) == 4)
816			seq_printf(m, "0x%08lx..0x%08lx\n",
817				   (unsigned long)reg->base,
818				   (unsigned long)(reg->base + reg->size - 1));
819		else
820			seq_printf(m, "0x%016llx..0x%016llx\n",
821				   (unsigned long long)reg->base,
822				   (unsigned long long)(reg->base + reg->size - 1));
823
824	}
825	return 0;
826}
827
828static int memblock_debug_open(struct inode *inode, struct file *file)
829{
830	return single_open(file, memblock_debug_show, inode->i_private);
831}
832
833static const struct file_operations memblock_debug_fops = {
834	.open = memblock_debug_open,
835	.read = seq_read,
836	.llseek = seq_lseek,
837	.release = single_release,
838};
839
840static int __init memblock_init_debugfs(void)
841{
842	struct dentry *root = debugfs_create_dir("memblock", NULL);
843	if (!root)
844		return -ENXIO;
845	debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops);
846	debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops);
847
848	return 0;
849}
850__initcall(memblock_init_debugfs);
851
852#endif /* CONFIG_DEBUG_FS */
853