memblock.c revision 10d0643988e976360eb3497dcafb55b393b8e480
1/*
2 * Procedures for maintaining information about logical memory blocks.
3 *
4 * Peter Bergner, IBM Corp.	June 2001.
5 * Copyright (C) 2001 Peter Bergner.
6 *
7 *      This program is free software; you can redistribute it and/or
8 *      modify it under the terms of the GNU General Public License
9 *      as published by the Free Software Foundation; either version
10 *      2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/kernel.h>
14#include <linux/slab.h>
15#include <linux/init.h>
16#include <linux/bitops.h>
17#include <linux/poison.h>
18#include <linux/pfn.h>
19#include <linux/debugfs.h>
20#include <linux/seq_file.h>
21#include <linux/memblock.h>
22
23struct memblock memblock __initdata_memblock;
24
25int memblock_debug __initdata_memblock;
26int memblock_can_resize __initdata_memblock;
27static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock;
28static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock;
29
30/* inline so we don't get a warning when pr_debug is compiled out */
31static inline const char *memblock_type_name(struct memblock_type *type)
32{
33	if (type == &memblock.memory)
34		return "memory";
35	else if (type == &memblock.reserved)
36		return "reserved";
37	else
38		return "unknown";
39}
40
41/*
42 * Address comparison utilities
43 */
44
45static phys_addr_t __init_memblock memblock_align_down(phys_addr_t addr, phys_addr_t size)
46{
47	return addr & ~(size - 1);
48}
49
50static phys_addr_t __init_memblock memblock_align_up(phys_addr_t addr, phys_addr_t size)
51{
52	return (addr + (size - 1)) & ~(size - 1);
53}
54
55static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
56				       phys_addr_t base2, phys_addr_t size2)
57{
58	return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
59}
60
61static long __init_memblock memblock_addrs_adjacent(phys_addr_t base1, phys_addr_t size1,
62			       phys_addr_t base2, phys_addr_t size2)
63{
64	if (base2 == base1 + size1)
65		return 1;
66	else if (base1 == base2 + size2)
67		return -1;
68
69	return 0;
70}
71
72static long __init_memblock memblock_regions_adjacent(struct memblock_type *type,
73				 unsigned long r1, unsigned long r2)
74{
75	phys_addr_t base1 = type->regions[r1].base;
76	phys_addr_t size1 = type->regions[r1].size;
77	phys_addr_t base2 = type->regions[r2].base;
78	phys_addr_t size2 = type->regions[r2].size;
79
80	return memblock_addrs_adjacent(base1, size1, base2, size2);
81}
82
83long __init_memblock memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size)
84{
85	unsigned long i;
86
87	for (i = 0; i < type->cnt; i++) {
88		phys_addr_t rgnbase = type->regions[i].base;
89		phys_addr_t rgnsize = type->regions[i].size;
90		if (memblock_addrs_overlap(base, size, rgnbase, rgnsize))
91			break;
92	}
93
94	return (i < type->cnt) ? i : -1;
95}
96
97/*
98 * Find, allocate, deallocate or reserve unreserved regions. All allocations
99 * are top-down.
100 */
101
102static phys_addr_t __init memblock_find_region(phys_addr_t start, phys_addr_t end,
103					  phys_addr_t size, phys_addr_t align)
104{
105	phys_addr_t base, res_base;
106	long j;
107
108	/* Prevent allocations returning 0 as it's also used to
109	 * indicate an allocation failure
110	 */
111	if (start == 0)
112		start = PAGE_SIZE;
113
114	base = memblock_align_down((end - size), align);
115	while (start <= base) {
116		j = memblock_overlaps_region(&memblock.reserved, base, size);
117		if (j < 0)
118			return base;
119		res_base = memblock.reserved.regions[j].base;
120		if (res_base < size)
121			break;
122		base = memblock_align_down(res_base - size, align);
123	}
124
125	return MEMBLOCK_ERROR;
126}
127
128static phys_addr_t __init memblock_find_base(phys_addr_t size, phys_addr_t align,
129					phys_addr_t start, phys_addr_t end)
130{
131	long i;
132
133	BUG_ON(0 == size);
134
135	size = memblock_align_up(size, align);
136
137	/* Pump up max_addr */
138	if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
139		end = memblock.current_limit;
140
141	/* We do a top-down search, this tends to limit memory
142	 * fragmentation by keeping early boot allocs near the
143	 * top of memory
144	 */
145	for (i = memblock.memory.cnt - 1; i >= 0; i--) {
146		phys_addr_t memblockbase = memblock.memory.regions[i].base;
147		phys_addr_t memblocksize = memblock.memory.regions[i].size;
148		phys_addr_t bottom, top, found;
149
150		if (memblocksize < size)
151			continue;
152		if ((memblockbase + memblocksize) <= start)
153			break;
154		bottom = max(memblockbase, start);
155		top = min(memblockbase + memblocksize, end);
156		if (bottom >= top)
157			continue;
158		found = memblock_find_region(bottom, top, size, align);
159		if (found != MEMBLOCK_ERROR)
160			return found;
161	}
162	return MEMBLOCK_ERROR;
163}
164
165static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
166{
167	unsigned long i;
168
169	for (i = r; i < type->cnt - 1; i++) {
170		type->regions[i].base = type->regions[i + 1].base;
171		type->regions[i].size = type->regions[i + 1].size;
172	}
173	type->cnt--;
174}
175
176/* Assumption: base addr of region 1 < base addr of region 2 */
177static void __init_memblock memblock_coalesce_regions(struct memblock_type *type,
178		unsigned long r1, unsigned long r2)
179{
180	type->regions[r1].size += type->regions[r2].size;
181	memblock_remove_region(type, r2);
182}
183
184/* Defined below but needed now */
185static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size);
186
187static int __init_memblock memblock_double_array(struct memblock_type *type)
188{
189	struct memblock_region *new_array, *old_array;
190	phys_addr_t old_size, new_size, addr;
191	int use_slab = slab_is_available();
192
193	/* We don't allow resizing until we know about the reserved regions
194	 * of memory that aren't suitable for allocation
195	 */
196	if (!memblock_can_resize)
197		return -1;
198
199	/* Calculate new doubled size */
200	old_size = type->max * sizeof(struct memblock_region);
201	new_size = old_size << 1;
202
203	/* Try to find some space for it.
204	 *
205	 * WARNING: We assume that either slab_is_available() and we use it or
206	 * we use MEMBLOCK for allocations. That means that this is unsafe to use
207	 * when bootmem is currently active (unless bootmem itself is implemented
208	 * on top of MEMBLOCK which isn't the case yet)
209	 *
210	 * This should however not be an issue for now, as we currently only
211	 * call into MEMBLOCK while it's still active, or much later when slab is
212	 * active for memory hotplug operations
213	 */
214	if (use_slab) {
215		new_array = kmalloc(new_size, GFP_KERNEL);
216		addr = new_array == NULL ? MEMBLOCK_ERROR : __pa(new_array);
217	} else
218		addr = memblock_find_base(new_size, sizeof(phys_addr_t), 0, MEMBLOCK_ALLOC_ACCESSIBLE);
219	if (addr == MEMBLOCK_ERROR) {
220		pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
221		       memblock_type_name(type), type->max, type->max * 2);
222		return -1;
223	}
224	new_array = __va(addr);
225
226	memblock_dbg("memblock: %s array is doubled to %ld at [%#010llx-%#010llx]",
227		 memblock_type_name(type), type->max * 2, (u64)addr, (u64)addr + new_size - 1);
228
229	/* Found space, we now need to move the array over before
230	 * we add the reserved region since it may be our reserved
231	 * array itself that is full.
232	 */
233	memcpy(new_array, type->regions, old_size);
234	memset(new_array + type->max, 0, old_size);
235	old_array = type->regions;
236	type->regions = new_array;
237	type->max <<= 1;
238
239	/* If we use SLAB that's it, we are done */
240	if (use_slab)
241		return 0;
242
243	/* Add the new reserved region now. Should not fail ! */
244	BUG_ON(memblock_add_region(&memblock.reserved, addr, new_size) < 0);
245
246	/* If the array wasn't our static init one, then free it. We only do
247	 * that before SLAB is available as later on, we don't know whether
248	 * to use kfree or free_bootmem_pages(). Shouldn't be a big deal
249	 * anyways
250	 */
251	if (old_array != memblock_memory_init_regions &&
252	    old_array != memblock_reserved_init_regions)
253		memblock_free(__pa(old_array), old_size);
254
255	return 0;
256}
257
258extern int __init_memblock __weak memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1,
259					  phys_addr_t addr2, phys_addr_t size2)
260{
261	return 1;
262}
263
264static long __init_memblock memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size)
265{
266	unsigned long coalesced = 0;
267	long adjacent, i;
268
269	if ((type->cnt == 1) && (type->regions[0].size == 0)) {
270		type->regions[0].base = base;
271		type->regions[0].size = size;
272		return 0;
273	}
274
275	/* First try and coalesce this MEMBLOCK with another. */
276	for (i = 0; i < type->cnt; i++) {
277		phys_addr_t rgnbase = type->regions[i].base;
278		phys_addr_t rgnsize = type->regions[i].size;
279
280		if ((rgnbase == base) && (rgnsize == size))
281			/* Already have this region, so we're done */
282			return 0;
283
284		adjacent = memblock_addrs_adjacent(base, size, rgnbase, rgnsize);
285		/* Check if arch allows coalescing */
286		if (adjacent != 0 && type == &memblock.memory &&
287		    !memblock_memory_can_coalesce(base, size, rgnbase, rgnsize))
288			break;
289		if (adjacent > 0) {
290			type->regions[i].base -= size;
291			type->regions[i].size += size;
292			coalesced++;
293			break;
294		} else if (adjacent < 0) {
295			type->regions[i].size += size;
296			coalesced++;
297			break;
298		}
299	}
300
301	/* If we plugged a hole, we may want to also coalesce with the
302	 * next region
303	 */
304	if ((i < type->cnt - 1) && memblock_regions_adjacent(type, i, i+1) &&
305	    ((type != &memblock.memory || memblock_memory_can_coalesce(type->regions[i].base,
306							     type->regions[i].size,
307							     type->regions[i+1].base,
308							     type->regions[i+1].size)))) {
309		memblock_coalesce_regions(type, i, i+1);
310		coalesced++;
311	}
312
313	if (coalesced)
314		return coalesced;
315
316	/* If we are out of space, we fail. It's too late to resize the array
317	 * but then this shouldn't have happened in the first place.
318	 */
319	if (WARN_ON(type->cnt >= type->max))
320		return -1;
321
322	/* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */
323	for (i = type->cnt - 1; i >= 0; i--) {
324		if (base < type->regions[i].base) {
325			type->regions[i+1].base = type->regions[i].base;
326			type->regions[i+1].size = type->regions[i].size;
327		} else {
328			type->regions[i+1].base = base;
329			type->regions[i+1].size = size;
330			break;
331		}
332	}
333
334	if (base < type->regions[0].base) {
335		type->regions[0].base = base;
336		type->regions[0].size = size;
337	}
338	type->cnt++;
339
340	/* The array is full ? Try to resize it. If that fails, we undo
341	 * our allocation and return an error
342	 */
343	if (type->cnt == type->max && memblock_double_array(type)) {
344		type->cnt--;
345		return -1;
346	}
347
348	return 0;
349}
350
351long __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
352{
353	return memblock_add_region(&memblock.memory, base, size);
354
355}
356
357static long __init_memblock __memblock_remove(struct memblock_type *type, phys_addr_t base, phys_addr_t size)
358{
359	phys_addr_t rgnbegin, rgnend;
360	phys_addr_t end = base + size;
361	int i;
362
363	rgnbegin = rgnend = 0; /* supress gcc warnings */
364
365	/* Find the region where (base, size) belongs to */
366	for (i=0; i < type->cnt; i++) {
367		rgnbegin = type->regions[i].base;
368		rgnend = rgnbegin + type->regions[i].size;
369
370		if ((rgnbegin <= base) && (end <= rgnend))
371			break;
372	}
373
374	/* Didn't find the region */
375	if (i == type->cnt)
376		return -1;
377
378	/* Check to see if we are removing entire region */
379	if ((rgnbegin == base) && (rgnend == end)) {
380		memblock_remove_region(type, i);
381		return 0;
382	}
383
384	/* Check to see if region is matching at the front */
385	if (rgnbegin == base) {
386		type->regions[i].base = end;
387		type->regions[i].size -= size;
388		return 0;
389	}
390
391	/* Check to see if the region is matching at the end */
392	if (rgnend == end) {
393		type->regions[i].size -= size;
394		return 0;
395	}
396
397	/*
398	 * We need to split the entry -  adjust the current one to the
399	 * beginging of the hole and add the region after hole.
400	 */
401	type->regions[i].size = base - type->regions[i].base;
402	return memblock_add_region(type, end, rgnend - end);
403}
404
405long __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
406{
407	return __memblock_remove(&memblock.memory, base, size);
408}
409
410long __init memblock_free(phys_addr_t base, phys_addr_t size)
411{
412	return __memblock_remove(&memblock.reserved, base, size);
413}
414
415long __init memblock_reserve(phys_addr_t base, phys_addr_t size)
416{
417	struct memblock_type *_rgn = &memblock.reserved;
418
419	BUG_ON(0 == size);
420
421	return memblock_add_region(_rgn, base, size);
422}
423
424phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
425{
426	phys_addr_t found;
427
428	/* We align the size to limit fragmentation. Without this, a lot of
429	 * small allocs quickly eat up the whole reserve array on sparc
430	 */
431	size = memblock_align_up(size, align);
432
433	found = memblock_find_base(size, align, 0, max_addr);
434	if (found != MEMBLOCK_ERROR &&
435	    memblock_add_region(&memblock.reserved, found, size) >= 0)
436		return found;
437
438	return 0;
439}
440
441phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
442{
443	phys_addr_t alloc;
444
445	alloc = __memblock_alloc_base(size, align, max_addr);
446
447	if (alloc == 0)
448		panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
449		      (unsigned long long) size, (unsigned long long) max_addr);
450
451	return alloc;
452}
453
454phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
455{
456	return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
457}
458
459
460/*
461 * Additional node-local allocators. Search for node memory is bottom up
462 * and walks memblock regions within that node bottom-up as well, but allocation
463 * within an memblock region is top-down. XXX I plan to fix that at some stage
464 *
465 * WARNING: Only available after early_node_map[] has been populated,
466 * on some architectures, that is after all the calls to add_active_range()
467 * have been done to populate it.
468 */
469
470phys_addr_t __weak __init memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid)
471{
472#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
473	/*
474	 * This code originates from sparc which really wants use to walk by addresses
475	 * and returns the nid. This is not very convenient for early_pfn_map[] users
476	 * as the map isn't sorted yet, and it really wants to be walked by nid.
477	 *
478	 * For now, I implement the inefficient method below which walks the early
479	 * map multiple times. Eventually we may want to use an ARCH config option
480	 * to implement a completely different method for both case.
481	 */
482	unsigned long start_pfn, end_pfn;
483	int i;
484
485	for (i = 0; i < MAX_NUMNODES; i++) {
486		get_pfn_range_for_nid(i, &start_pfn, &end_pfn);
487		if (start < PFN_PHYS(start_pfn) || start >= PFN_PHYS(end_pfn))
488			continue;
489		*nid = i;
490		return min(end, PFN_PHYS(end_pfn));
491	}
492#endif
493	*nid = 0;
494
495	return end;
496}
497
498static phys_addr_t __init memblock_alloc_nid_region(struct memblock_region *mp,
499					       phys_addr_t size,
500					       phys_addr_t align, int nid)
501{
502	phys_addr_t start, end;
503
504	start = mp->base;
505	end = start + mp->size;
506
507	start = memblock_align_up(start, align);
508	while (start < end) {
509		phys_addr_t this_end;
510		int this_nid;
511
512		this_end = memblock_nid_range(start, end, &this_nid);
513		if (this_nid == nid) {
514			phys_addr_t ret = memblock_find_region(start, this_end, size, align);
515			if (ret != MEMBLOCK_ERROR &&
516			    memblock_add_region(&memblock.reserved, ret, size) >= 0)
517				return ret;
518		}
519		start = this_end;
520	}
521
522	return MEMBLOCK_ERROR;
523}
524
525phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
526{
527	struct memblock_type *mem = &memblock.memory;
528	int i;
529
530	BUG_ON(0 == size);
531
532	/* We align the size to limit fragmentation. Without this, a lot of
533	 * small allocs quickly eat up the whole reserve array on sparc
534	 */
535	size = memblock_align_up(size, align);
536
537	/* We do a bottom-up search for a region with the right
538	 * nid since that's easier considering how memblock_nid_range()
539	 * works
540	 */
541	for (i = 0; i < mem->cnt; i++) {
542		phys_addr_t ret = memblock_alloc_nid_region(&mem->regions[i],
543					       size, align, nid);
544		if (ret != MEMBLOCK_ERROR)
545			return ret;
546	}
547
548	return 0;
549}
550
551phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
552{
553	phys_addr_t res = memblock_alloc_nid(size, align, nid);
554
555	if (res)
556		return res;
557	return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE);
558}
559
560
561/*
562 * Remaining API functions
563 */
564
565/* You must call memblock_analyze() before this. */
566phys_addr_t __init memblock_phys_mem_size(void)
567{
568	return memblock.memory_size;
569}
570
571phys_addr_t __init_memblock memblock_end_of_DRAM(void)
572{
573	int idx = memblock.memory.cnt - 1;
574
575	return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
576}
577
578/* You must call memblock_analyze() after this. */
579void __init memblock_enforce_memory_limit(phys_addr_t memory_limit)
580{
581	unsigned long i;
582	phys_addr_t limit;
583	struct memblock_region *p;
584
585	if (!memory_limit)
586		return;
587
588	/* Truncate the memblock regions to satisfy the memory limit. */
589	limit = memory_limit;
590	for (i = 0; i < memblock.memory.cnt; i++) {
591		if (limit > memblock.memory.regions[i].size) {
592			limit -= memblock.memory.regions[i].size;
593			continue;
594		}
595
596		memblock.memory.regions[i].size = limit;
597		memblock.memory.cnt = i + 1;
598		break;
599	}
600
601	memory_limit = memblock_end_of_DRAM();
602
603	/* And truncate any reserves above the limit also. */
604	for (i = 0; i < memblock.reserved.cnt; i++) {
605		p = &memblock.reserved.regions[i];
606
607		if (p->base > memory_limit)
608			p->size = 0;
609		else if ((p->base + p->size) > memory_limit)
610			p->size = memory_limit - p->base;
611
612		if (p->size == 0) {
613			memblock_remove_region(&memblock.reserved, i);
614			i--;
615		}
616	}
617}
618
619static int memblock_search(struct memblock_type *type, phys_addr_t addr)
620{
621	unsigned int left = 0, right = type->cnt;
622
623	do {
624		unsigned int mid = (right + left) / 2;
625
626		if (addr < type->regions[mid].base)
627			right = mid;
628		else if (addr >= (type->regions[mid].base +
629				  type->regions[mid].size))
630			left = mid + 1;
631		else
632			return mid;
633	} while (left < right);
634	return -1;
635}
636
637int __init memblock_is_reserved(phys_addr_t addr)
638{
639	return memblock_search(&memblock.reserved, addr) != -1;
640}
641
642int memblock_is_memory(phys_addr_t addr)
643{
644	return memblock_search(&memblock.memory, addr) != -1;
645}
646
647int memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
648{
649	int idx = memblock_search(&memblock.reserved, base);
650
651	if (idx == -1)
652		return 0;
653	return memblock.reserved.regions[idx].base <= base &&
654		(memblock.reserved.regions[idx].base +
655		 memblock.reserved.regions[idx].size) >= (base + size);
656}
657
658int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
659{
660	return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
661}
662
663
664void __init memblock_set_current_limit(phys_addr_t limit)
665{
666	memblock.current_limit = limit;
667}
668
669static void __init_memblock memblock_dump(struct memblock_type *region, char *name)
670{
671	unsigned long long base, size;
672	int i;
673
674	pr_info(" %s.cnt  = 0x%lx\n", name, region->cnt);
675
676	for (i = 0; i < region->cnt; i++) {
677		base = region->regions[i].base;
678		size = region->regions[i].size;
679
680		pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes\n",
681		    name, i, base, base + size - 1, size);
682	}
683}
684
685void __init_memblock memblock_dump_all(void)
686{
687	if (!memblock_debug)
688		return;
689
690	pr_info("MEMBLOCK configuration:\n");
691	pr_info(" memory size = 0x%llx\n", (unsigned long long)memblock.memory_size);
692
693	memblock_dump(&memblock.memory, "memory");
694	memblock_dump(&memblock.reserved, "reserved");
695}
696
697void __init memblock_analyze(void)
698{
699	int i;
700
701	/* Check marker in the unused last array entry */
702	WARN_ON(memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS].base
703		!= (phys_addr_t)RED_INACTIVE);
704	WARN_ON(memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS].base
705		!= (phys_addr_t)RED_INACTIVE);
706
707	memblock.memory_size = 0;
708
709	for (i = 0; i < memblock.memory.cnt; i++)
710		memblock.memory_size += memblock.memory.regions[i].size;
711
712	/* We allow resizing from there */
713	memblock_can_resize = 1;
714}
715
716void __init memblock_init(void)
717{
718	/* Hookup the initial arrays */
719	memblock.memory.regions	= memblock_memory_init_regions;
720	memblock.memory.max		= INIT_MEMBLOCK_REGIONS;
721	memblock.reserved.regions	= memblock_reserved_init_regions;
722	memblock.reserved.max	= INIT_MEMBLOCK_REGIONS;
723
724	/* Write a marker in the unused last array entry */
725	memblock.memory.regions[INIT_MEMBLOCK_REGIONS].base = (phys_addr_t)RED_INACTIVE;
726	memblock.reserved.regions[INIT_MEMBLOCK_REGIONS].base = (phys_addr_t)RED_INACTIVE;
727
728	/* Create a dummy zero size MEMBLOCK which will get coalesced away later.
729	 * This simplifies the memblock_add() code below...
730	 */
731	memblock.memory.regions[0].base = 0;
732	memblock.memory.regions[0].size = 0;
733	memblock.memory.cnt = 1;
734
735	/* Ditto. */
736	memblock.reserved.regions[0].base = 0;
737	memblock.reserved.regions[0].size = 0;
738	memblock.reserved.cnt = 1;
739
740	memblock.current_limit = MEMBLOCK_ALLOC_ANYWHERE;
741}
742
743static int __init early_memblock(char *p)
744{
745	if (p && strstr(p, "debug"))
746		memblock_debug = 1;
747	return 0;
748}
749early_param("memblock", early_memblock);
750
751#if defined(CONFIG_DEBUG_FS) && !defined(ARCH_DISCARD_MEMBLOCK)
752
753static int memblock_debug_show(struct seq_file *m, void *private)
754{
755	struct memblock_type *type = m->private;
756	struct memblock_region *reg;
757	int i;
758
759	for (i = 0; i < type->cnt; i++) {
760		reg = &type->regions[i];
761		seq_printf(m, "%4d: ", i);
762		if (sizeof(phys_addr_t) == 4)
763			seq_printf(m, "0x%08lx..0x%08lx\n",
764				   (unsigned long)reg->base,
765				   (unsigned long)(reg->base + reg->size - 1));
766		else
767			seq_printf(m, "0x%016llx..0x%016llx\n",
768				   (unsigned long long)reg->base,
769				   (unsigned long long)(reg->base + reg->size - 1));
770
771	}
772	return 0;
773}
774
775static int memblock_debug_open(struct inode *inode, struct file *file)
776{
777	return single_open(file, memblock_debug_show, inode->i_private);
778}
779
780static const struct file_operations memblock_debug_fops = {
781	.open = memblock_debug_open,
782	.read = seq_read,
783	.llseek = seq_lseek,
784	.release = single_release,
785};
786
787static int __init memblock_init_debugfs(void)
788{
789	struct dentry *root = debugfs_create_dir("memblock", NULL);
790	if (!root)
791		return -ENXIO;
792	debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops);
793	debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops);
794
795	return 0;
796}
797__initcall(memblock_init_debugfs);
798
799#endif /* CONFIG_DEBUG_FS */
800