bootmem.c revision 5a982cbc7b3fe6cf72266f319286f29963c71b9e
1/* 2 * linux/mm/bootmem.c 3 * 4 * Copyright (C) 1999 Ingo Molnar 5 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 6 * 7 * simple boot-time physical memory area allocator and 8 * free memory collector. It's used to deal with reserved 9 * system memory and memory holes as well. 10 */ 11#include <linux/init.h> 12#include <linux/pfn.h> 13#include <linux/bootmem.h> 14#include <linux/module.h> 15 16#include <asm/bug.h> 17#include <asm/io.h> 18#include <asm/processor.h> 19 20#include "internal.h" 21 22/* 23 * Access to this subsystem has to be serialized externally. (this is 24 * true for the boot process anyway) 25 */ 26unsigned long max_low_pfn; 27unsigned long min_low_pfn; 28unsigned long max_pfn; 29 30static LIST_HEAD(bdata_list); 31#ifdef CONFIG_CRASH_DUMP 32/* 33 * If we have booted due to a crash, max_pfn will be a very low value. We need 34 * to know the amount of memory that the previous kernel used. 35 */ 36unsigned long saved_max_pfn; 37#endif 38 39/* return the number of _pages_ that will be allocated for the boot bitmap */ 40unsigned long __init bootmem_bootmap_pages(unsigned long pages) 41{ 42 unsigned long mapsize; 43 44 mapsize = (pages+7)/8; 45 mapsize = (mapsize + ~PAGE_MASK) & PAGE_MASK; 46 mapsize >>= PAGE_SHIFT; 47 48 return mapsize; 49} 50 51/* 52 * link bdata in order 53 */ 54static void __init link_bootmem(bootmem_data_t *bdata) 55{ 56 bootmem_data_t *ent; 57 58 if (list_empty(&bdata_list)) { 59 list_add(&bdata->list, &bdata_list); 60 return; 61 } 62 /* insert in order */ 63 list_for_each_entry(ent, &bdata_list, list) { 64 if (bdata->node_boot_start < ent->node_boot_start) { 65 list_add_tail(&bdata->list, &ent->list); 66 return; 67 } 68 } 69 list_add_tail(&bdata->list, &bdata_list); 70} 71 72/* 73 * Given an initialised bdata, it returns the size of the boot bitmap 74 */ 75static unsigned long __init get_mapsize(bootmem_data_t *bdata) 76{ 77 unsigned long mapsize; 78 unsigned long start = PFN_DOWN(bdata->node_boot_start); 79 unsigned long end = bdata->node_low_pfn; 80 81 mapsize = ((end - start) + 7) / 8; 82 return ALIGN(mapsize, sizeof(long)); 83} 84 85/* 86 * Called once to set up the allocator itself. 87 */ 88static unsigned long __init init_bootmem_core(pg_data_t *pgdat, 89 unsigned long mapstart, unsigned long start, unsigned long end) 90{ 91 bootmem_data_t *bdata = pgdat->bdata; 92 unsigned long mapsize; 93 94 bdata->node_bootmem_map = phys_to_virt(PFN_PHYS(mapstart)); 95 bdata->node_boot_start = PFN_PHYS(start); 96 bdata->node_low_pfn = end; 97 link_bootmem(bdata); 98 99 /* 100 * Initially all pages are reserved - setup_arch() has to 101 * register free RAM areas explicitly. 102 */ 103 mapsize = get_mapsize(bdata); 104 memset(bdata->node_bootmem_map, 0xff, mapsize); 105 106 return mapsize; 107} 108 109/* 110 * Marks a particular physical memory range as unallocatable. Usable RAM 111 * might be used for boot-time allocations - or it might get added 112 * to the free page pool later on. 113 */ 114static int __init reserve_bootmem_core(bootmem_data_t *bdata, 115 unsigned long addr, unsigned long size, int flags) 116{ 117 unsigned long sidx, eidx; 118 unsigned long i; 119 int ret; 120 121 /* 122 * round up, partially reserved pages are considered 123 * fully reserved. 124 */ 125 BUG_ON(!size); 126 BUG_ON(PFN_DOWN(addr) >= bdata->node_low_pfn); 127 BUG_ON(PFN_UP(addr + size) > bdata->node_low_pfn); 128 BUG_ON(addr < bdata->node_boot_start); 129 130 sidx = PFN_DOWN(addr - bdata->node_boot_start); 131 eidx = PFN_UP(addr + size - bdata->node_boot_start); 132 133 for (i = sidx; i < eidx; i++) 134 if (test_and_set_bit(i, bdata->node_bootmem_map)) { 135#ifdef CONFIG_DEBUG_BOOTMEM 136 printk("hm, page %08lx reserved twice.\n", i*PAGE_SIZE); 137#endif 138 if (flags & BOOTMEM_EXCLUSIVE) { 139 ret = -EBUSY; 140 goto err; 141 } 142 } 143 144 return 0; 145 146err: 147 /* unreserve memory we accidentally reserved */ 148 for (i--; i >= sidx; i--) 149 clear_bit(i, bdata->node_bootmem_map); 150 151 return ret; 152} 153 154static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr, 155 unsigned long size) 156{ 157 unsigned long sidx, eidx; 158 unsigned long i; 159 160 BUG_ON(!size); 161 162 /* out range */ 163 if (addr + size < bdata->node_boot_start || 164 PFN_DOWN(addr) > bdata->node_low_pfn) 165 return; 166 /* 167 * round down end of usable mem, partially free pages are 168 * considered reserved. 169 */ 170 171 if (addr >= bdata->node_boot_start && addr < bdata->last_success) 172 bdata->last_success = addr; 173 174 /* 175 * Round up to index to the range. 176 */ 177 if (PFN_UP(addr) > PFN_DOWN(bdata->node_boot_start)) 178 sidx = PFN_UP(addr) - PFN_DOWN(bdata->node_boot_start); 179 else 180 sidx = 0; 181 182 eidx = PFN_DOWN(addr + size - bdata->node_boot_start); 183 if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start)) 184 eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start); 185 186 for (i = sidx; i < eidx; i++) { 187 if (unlikely(!test_and_clear_bit(i, bdata->node_bootmem_map))) 188 BUG(); 189 } 190} 191 192/* 193 * We 'merge' subsequent allocations to save space. We might 'lose' 194 * some fraction of a page if allocations cannot be satisfied due to 195 * size constraints on boxes where there is physical RAM space 196 * fragmentation - in these cases (mostly large memory boxes) this 197 * is not a problem. 198 * 199 * On low memory boxes we get it right in 100% of the cases. 200 * 201 * alignment has to be a power of 2 value. 202 * 203 * NOTE: This function is _not_ reentrant. 204 */ 205void * __init 206__alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size, 207 unsigned long align, unsigned long goal, unsigned long limit) 208{ 209 unsigned long offset, remaining_size, areasize, preferred; 210 unsigned long i, start = 0, incr, eidx, end_pfn; 211 void *ret; 212 213 if (!size) { 214 printk("__alloc_bootmem_core(): zero-sized request\n"); 215 BUG(); 216 } 217 BUG_ON(align & (align-1)); 218 219 if (limit && bdata->node_boot_start >= limit) 220 return NULL; 221 222 /* on nodes without memory - bootmem_map is NULL */ 223 if (!bdata->node_bootmem_map) 224 return NULL; 225 226 end_pfn = bdata->node_low_pfn; 227 limit = PFN_DOWN(limit); 228 if (limit && end_pfn > limit) 229 end_pfn = limit; 230 231 eidx = end_pfn - PFN_DOWN(bdata->node_boot_start); 232 offset = 0; 233 if (align && (bdata->node_boot_start & (align - 1UL)) != 0) 234 offset = align - (bdata->node_boot_start & (align - 1UL)); 235 offset = PFN_DOWN(offset); 236 237 /* 238 * We try to allocate bootmem pages above 'goal' 239 * first, then we try to allocate lower pages. 240 */ 241 if (goal && goal >= bdata->node_boot_start && PFN_DOWN(goal) < end_pfn) { 242 preferred = goal - bdata->node_boot_start; 243 244 if (bdata->last_success >= preferred) 245 if (!limit || (limit && limit > bdata->last_success)) 246 preferred = bdata->last_success; 247 } else 248 preferred = 0; 249 250 preferred = PFN_DOWN(ALIGN(preferred, align)) + offset; 251 areasize = (size + PAGE_SIZE-1) / PAGE_SIZE; 252 incr = align >> PAGE_SHIFT ? : 1; 253 254restart_scan: 255 for (i = preferred; i < eidx; i += incr) { 256 unsigned long j; 257 i = find_next_zero_bit(bdata->node_bootmem_map, eidx, i); 258 i = ALIGN(i, incr); 259 if (i >= eidx) 260 break; 261 if (test_bit(i, bdata->node_bootmem_map)) 262 continue; 263 for (j = i + 1; j < i + areasize; ++j) { 264 if (j >= eidx) 265 goto fail_block; 266 if (test_bit(j, bdata->node_bootmem_map)) 267 goto fail_block; 268 } 269 start = i; 270 goto found; 271 fail_block: 272 i = ALIGN(j, incr); 273 } 274 275 if (preferred > offset) { 276 preferred = offset; 277 goto restart_scan; 278 } 279 return NULL; 280 281found: 282 bdata->last_success = PFN_PHYS(start); 283 BUG_ON(start >= eidx); 284 285 /* 286 * Is the next page of the previous allocation-end the start 287 * of this allocation's buffer? If yes then we can 'merge' 288 * the previous partial page with this allocation. 289 */ 290 if (align < PAGE_SIZE && 291 bdata->last_offset && bdata->last_pos+1 == start) { 292 offset = ALIGN(bdata->last_offset, align); 293 BUG_ON(offset > PAGE_SIZE); 294 remaining_size = PAGE_SIZE - offset; 295 if (size < remaining_size) { 296 areasize = 0; 297 /* last_pos unchanged */ 298 bdata->last_offset = offset + size; 299 ret = phys_to_virt(bdata->last_pos * PAGE_SIZE + 300 offset + 301 bdata->node_boot_start); 302 } else { 303 remaining_size = size - remaining_size; 304 areasize = (remaining_size + PAGE_SIZE-1) / PAGE_SIZE; 305 ret = phys_to_virt(bdata->last_pos * PAGE_SIZE + 306 offset + 307 bdata->node_boot_start); 308 bdata->last_pos = start + areasize - 1; 309 bdata->last_offset = remaining_size; 310 } 311 bdata->last_offset &= ~PAGE_MASK; 312 } else { 313 bdata->last_pos = start + areasize - 1; 314 bdata->last_offset = size & ~PAGE_MASK; 315 ret = phys_to_virt(start * PAGE_SIZE + bdata->node_boot_start); 316 } 317 318 /* 319 * Reserve the area now: 320 */ 321 for (i = start; i < start + areasize; i++) 322 if (unlikely(test_and_set_bit(i, bdata->node_bootmem_map))) 323 BUG(); 324 memset(ret, 0, size); 325 return ret; 326} 327 328static unsigned long __init free_all_bootmem_core(pg_data_t *pgdat) 329{ 330 struct page *page; 331 unsigned long pfn; 332 bootmem_data_t *bdata = pgdat->bdata; 333 unsigned long i, count, total = 0; 334 unsigned long idx; 335 unsigned long *map; 336 int gofast = 0; 337 338 BUG_ON(!bdata->node_bootmem_map); 339 340 count = 0; 341 /* first extant page of the node */ 342 pfn = PFN_DOWN(bdata->node_boot_start); 343 idx = bdata->node_low_pfn - pfn; 344 map = bdata->node_bootmem_map; 345 /* Check physaddr is O(LOG2(BITS_PER_LONG)) page aligned */ 346 if (bdata->node_boot_start == 0 || 347 ffs(bdata->node_boot_start) - PAGE_SHIFT > ffs(BITS_PER_LONG)) 348 gofast = 1; 349 for (i = 0; i < idx; ) { 350 unsigned long v = ~map[i / BITS_PER_LONG]; 351 352 if (gofast && v == ~0UL) { 353 int order; 354 355 page = pfn_to_page(pfn); 356 count += BITS_PER_LONG; 357 order = ffs(BITS_PER_LONG) - 1; 358 __free_pages_bootmem(page, order); 359 i += BITS_PER_LONG; 360 page += BITS_PER_LONG; 361 } else if (v) { 362 unsigned long m; 363 364 page = pfn_to_page(pfn); 365 for (m = 1; m && i < idx; m<<=1, page++, i++) { 366 if (v & m) { 367 count++; 368 __free_pages_bootmem(page, 0); 369 } 370 } 371 } else { 372 i += BITS_PER_LONG; 373 } 374 pfn += BITS_PER_LONG; 375 } 376 total += count; 377 378 /* 379 * Now free the allocator bitmap itself, it's not 380 * needed anymore: 381 */ 382 page = virt_to_page(bdata->node_bootmem_map); 383 count = 0; 384 idx = (get_mapsize(bdata) + PAGE_SIZE-1) >> PAGE_SHIFT; 385 for (i = 0; i < idx; i++, page++) { 386 __free_pages_bootmem(page, 0); 387 count++; 388 } 389 total += count; 390 bdata->node_bootmem_map = NULL; 391 392 return total; 393} 394 395unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn, 396 unsigned long startpfn, unsigned long endpfn) 397{ 398 return init_bootmem_core(pgdat, freepfn, startpfn, endpfn); 399} 400 401void __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, 402 unsigned long size, int flags) 403{ 404 reserve_bootmem_core(pgdat->bdata, physaddr, size, flags); 405} 406 407void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, 408 unsigned long size) 409{ 410 free_bootmem_core(pgdat->bdata, physaddr, size); 411} 412 413unsigned long __init free_all_bootmem_node(pg_data_t *pgdat) 414{ 415 return free_all_bootmem_core(pgdat); 416} 417 418unsigned long __init init_bootmem(unsigned long start, unsigned long pages) 419{ 420 max_low_pfn = pages; 421 min_low_pfn = start; 422 return init_bootmem_core(NODE_DATA(0), start, 0, pages); 423} 424 425#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE 426int __init reserve_bootmem(unsigned long addr, unsigned long size, 427 int flags) 428{ 429 return reserve_bootmem_core(NODE_DATA(0)->bdata, addr, size, flags); 430} 431#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */ 432 433void __init free_bootmem(unsigned long addr, unsigned long size) 434{ 435 bootmem_data_t *bdata; 436 list_for_each_entry(bdata, &bdata_list, list) 437 free_bootmem_core(bdata, addr, size); 438} 439 440unsigned long __init free_all_bootmem(void) 441{ 442 return free_all_bootmem_core(NODE_DATA(0)); 443} 444 445void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align, 446 unsigned long goal) 447{ 448 bootmem_data_t *bdata; 449 void *ptr; 450 451 list_for_each_entry(bdata, &bdata_list, list) { 452 ptr = __alloc_bootmem_core(bdata, size, align, goal, 0); 453 if (ptr) 454 return ptr; 455 } 456 return NULL; 457} 458 459void * __init __alloc_bootmem(unsigned long size, unsigned long align, 460 unsigned long goal) 461{ 462 void *mem = __alloc_bootmem_nopanic(size,align,goal); 463 464 if (mem) 465 return mem; 466 /* 467 * Whoops, we cannot satisfy the allocation request. 468 */ 469 printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size); 470 panic("Out of memory"); 471 return NULL; 472} 473 474 475void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, 476 unsigned long align, unsigned long goal) 477{ 478 void *ptr; 479 480 ptr = __alloc_bootmem_core(pgdat->bdata, size, align, goal, 0); 481 if (ptr) 482 return ptr; 483 484 return __alloc_bootmem(size, align, goal); 485} 486 487#ifndef ARCH_LOW_ADDRESS_LIMIT 488#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL 489#endif 490 491void * __init __alloc_bootmem_low(unsigned long size, unsigned long align, 492 unsigned long goal) 493{ 494 bootmem_data_t *bdata; 495 void *ptr; 496 497 list_for_each_entry(bdata, &bdata_list, list) { 498 ptr = __alloc_bootmem_core(bdata, size, align, goal, 499 ARCH_LOW_ADDRESS_LIMIT); 500 if (ptr) 501 return ptr; 502 } 503 504 /* 505 * Whoops, we cannot satisfy the allocation request. 506 */ 507 printk(KERN_ALERT "low bootmem alloc of %lu bytes failed!\n", size); 508 panic("Out of low memory"); 509 return NULL; 510} 511 512void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size, 513 unsigned long align, unsigned long goal) 514{ 515 return __alloc_bootmem_core(pgdat->bdata, size, align, goal, 516 ARCH_LOW_ADDRESS_LIMIT); 517} 518