bootmem.c revision 337998587f802535896e9ed16d19f97915ccd368
1/* 2 * bootmem - A boot-time physical memory allocator and configurator 3 * 4 * Copyright (C) 1999 Ingo Molnar 5 * 1999 Kanoj Sarcar, SGI 6 * 2008 Johannes Weiner 7 * 8 * Access to this subsystem has to be serialized externally (which is true 9 * for the boot process anyway). 10 */ 11#include <linux/init.h> 12#include <linux/pfn.h> 13#include <linux/bootmem.h> 14#include <linux/module.h> 15#include <linux/kmemleak.h> 16#include <linux/range.h> 17 18#include <asm/bug.h> 19#include <asm/io.h> 20#include <asm/processor.h> 21 22#include "internal.h" 23 24unsigned long max_low_pfn; 25unsigned long min_low_pfn; 26unsigned long max_pfn; 27 28#ifdef CONFIG_CRASH_DUMP 29/* 30 * If we have booted due to a crash, max_pfn will be a very low value. We need 31 * to know the amount of memory that the previous kernel used. 32 */ 33unsigned long saved_max_pfn; 34#endif 35 36#ifndef CONFIG_NO_BOOTMEM 37bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata; 38 39static struct list_head bdata_list __initdata = LIST_HEAD_INIT(bdata_list); 40 41static int bootmem_debug; 42 43static int __init bootmem_debug_setup(char *buf) 44{ 45 bootmem_debug = 1; 46 return 0; 47} 48early_param("bootmem_debug", bootmem_debug_setup); 49 50#define bdebug(fmt, args...) ({ \ 51 if (unlikely(bootmem_debug)) \ 52 printk(KERN_INFO \ 53 "bootmem::%s " fmt, \ 54 __func__, ## args); \ 55}) 56 57static unsigned long __init bootmap_bytes(unsigned long pages) 58{ 59 unsigned long bytes = (pages + 7) / 8; 60 61 return ALIGN(bytes, sizeof(long)); 62} 63 64/** 65 * bootmem_bootmap_pages - calculate bitmap size in pages 66 * @pages: number of pages the bitmap has to represent 67 */ 68unsigned long __init bootmem_bootmap_pages(unsigned long pages) 69{ 70 unsigned long bytes = bootmap_bytes(pages); 71 72 return PAGE_ALIGN(bytes) >> PAGE_SHIFT; 73} 74 75/* 76 * link bdata in order 77 */ 78static void __init link_bootmem(bootmem_data_t *bdata) 79{ 80 struct list_head *iter; 81 82 list_for_each(iter, &bdata_list) { 83 bootmem_data_t *ent; 84 85 ent = list_entry(iter, bootmem_data_t, list); 86 if (bdata->node_min_pfn < ent->node_min_pfn) 87 break; 88 } 89 list_add_tail(&bdata->list, iter); 90} 91 92/* 93 * Called once to set up the allocator itself. 94 */ 95static unsigned long __init init_bootmem_core(bootmem_data_t *bdata, 96 unsigned long mapstart, unsigned long start, unsigned long end) 97{ 98 unsigned long mapsize; 99 100 mminit_validate_memmodel_limits(&start, &end); 101 bdata->node_bootmem_map = phys_to_virt(PFN_PHYS(mapstart)); 102 bdata->node_min_pfn = start; 103 bdata->node_low_pfn = end; 104 link_bootmem(bdata); 105 106 /* 107 * Initially all pages are reserved - setup_arch() has to 108 * register free RAM areas explicitly. 109 */ 110 mapsize = bootmap_bytes(end - start); 111 memset(bdata->node_bootmem_map, 0xff, mapsize); 112 113 bdebug("nid=%td start=%lx map=%lx end=%lx mapsize=%lx\n", 114 bdata - bootmem_node_data, start, mapstart, end, mapsize); 115 116 return mapsize; 117} 118 119/** 120 * init_bootmem_node - register a node as boot memory 121 * @pgdat: node to register 122 * @freepfn: pfn where the bitmap for this node is to be placed 123 * @startpfn: first pfn on the node 124 * @endpfn: first pfn after the node 125 * 126 * Returns the number of bytes needed to hold the bitmap for this node. 127 */ 128unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn, 129 unsigned long startpfn, unsigned long endpfn) 130{ 131 return init_bootmem_core(pgdat->bdata, freepfn, startpfn, endpfn); 132} 133 134/** 135 * init_bootmem - register boot memory 136 * @start: pfn where the bitmap is to be placed 137 * @pages: number of available physical pages 138 * 139 * Returns the number of bytes needed to hold the bitmap. 140 */ 141unsigned long __init init_bootmem(unsigned long start, unsigned long pages) 142{ 143 max_low_pfn = pages; 144 min_low_pfn = start; 145 return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages); 146} 147#endif 148/* 149 * free_bootmem_late - free bootmem pages directly to page allocator 150 * @addr: starting address of the range 151 * @size: size of the range in bytes 152 * 153 * This is only useful when the bootmem allocator has already been torn 154 * down, but we are still initializing the system. Pages are given directly 155 * to the page allocator, no bootmem metadata is updated because it is gone. 156 */ 157void __init free_bootmem_late(unsigned long addr, unsigned long size) 158{ 159 unsigned long cursor, end; 160 161 kmemleak_free_part(__va(addr), size); 162 163 cursor = PFN_UP(addr); 164 end = PFN_DOWN(addr + size); 165 166 for (; cursor < end; cursor++) { 167 __free_pages_bootmem(pfn_to_page(cursor), 0); 168 totalram_pages++; 169 } 170} 171 172#ifdef CONFIG_NO_BOOTMEM 173static void __init __free_pages_memory(unsigned long start, unsigned long end) 174{ 175 int i; 176 unsigned long start_aligned, end_aligned; 177 int order = ilog2(BITS_PER_LONG); 178 179 start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1); 180 end_aligned = end & ~(BITS_PER_LONG - 1); 181 182 if (end_aligned <= start_aligned) { 183 for (i = start; i < end; i++) 184 __free_pages_bootmem(pfn_to_page(i), 0); 185 186 return; 187 } 188 189 for (i = start; i < start_aligned; i++) 190 __free_pages_bootmem(pfn_to_page(i), 0); 191 192 for (i = start_aligned; i < end_aligned; i += BITS_PER_LONG) 193 __free_pages_bootmem(pfn_to_page(i), order); 194 195 for (i = end_aligned; i < end; i++) 196 __free_pages_bootmem(pfn_to_page(i), 0); 197} 198 199unsigned long __init free_all_memory_core_early(int nodeid) 200{ 201 int i; 202 u64 start, end; 203 unsigned long count = 0; 204 struct range *range = NULL; 205 int nr_range; 206 207 nr_range = get_free_all_memory_range(&range, nodeid); 208 209 for (i = 0; i < nr_range; i++) { 210 start = range[i].start; 211 end = range[i].end; 212 count += end - start; 213 __free_pages_memory(start, end); 214 } 215 216 return count; 217} 218#else 219static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) 220{ 221 int aligned; 222 struct page *page; 223 unsigned long start, end, pages, count = 0; 224 225 if (!bdata->node_bootmem_map) 226 return 0; 227 228 start = bdata->node_min_pfn; 229 end = bdata->node_low_pfn; 230 231 /* 232 * If the start is aligned to the machines wordsize, we might 233 * be able to free pages in bulks of that order. 234 */ 235 aligned = !(start & (BITS_PER_LONG - 1)); 236 237 bdebug("nid=%td start=%lx end=%lx aligned=%d\n", 238 bdata - bootmem_node_data, start, end, aligned); 239 240 while (start < end) { 241 unsigned long *map, idx, vec; 242 243 map = bdata->node_bootmem_map; 244 idx = start - bdata->node_min_pfn; 245 vec = ~map[idx / BITS_PER_LONG]; 246 247 if (aligned && vec == ~0UL && start + BITS_PER_LONG < end) { 248 int order = ilog2(BITS_PER_LONG); 249 250 __free_pages_bootmem(pfn_to_page(start), order); 251 count += BITS_PER_LONG; 252 } else { 253 unsigned long off = 0; 254 255 while (vec && off < BITS_PER_LONG) { 256 if (vec & 1) { 257 page = pfn_to_page(start + off); 258 __free_pages_bootmem(page, 0); 259 count++; 260 } 261 vec >>= 1; 262 off++; 263 } 264 } 265 start += BITS_PER_LONG; 266 } 267 268 page = virt_to_page(bdata->node_bootmem_map); 269 pages = bdata->node_low_pfn - bdata->node_min_pfn; 270 pages = bootmem_bootmap_pages(pages); 271 count += pages; 272 while (pages--) 273 __free_pages_bootmem(page++, 0); 274 275 bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count); 276 277 return count; 278} 279#endif 280 281/** 282 * free_all_bootmem_node - release a node's free pages to the buddy allocator 283 * @pgdat: node to be released 284 * 285 * Returns the number of pages actually released. 286 */ 287unsigned long __init free_all_bootmem_node(pg_data_t *pgdat) 288{ 289 register_page_bootmem_info_node(pgdat); 290#ifdef CONFIG_NO_BOOTMEM 291 /* free_all_memory_core_early(MAX_NUMNODES) will be called later */ 292 return 0; 293#else 294 return free_all_bootmem_core(pgdat->bdata); 295#endif 296} 297 298/** 299 * free_all_bootmem - release free pages to the buddy allocator 300 * 301 * Returns the number of pages actually released. 302 */ 303unsigned long __init free_all_bootmem(void) 304{ 305#ifdef CONFIG_NO_BOOTMEM 306 /* 307 * We need to use MAX_NUMNODES instead of NODE_DATA(0)->node_id 308 * because in some case like Node0 doesnt have RAM installed 309 * low ram will be on Node1 310 * Use MAX_NUMNODES will make sure all ranges in early_node_map[] 311 * will be used instead of only Node0 related 312 */ 313 return free_all_memory_core_early(MAX_NUMNODES); 314#else 315 return free_all_bootmem_core(NODE_DATA(0)->bdata); 316#endif 317} 318 319#ifndef CONFIG_NO_BOOTMEM 320static void __init __free(bootmem_data_t *bdata, 321 unsigned long sidx, unsigned long eidx) 322{ 323 unsigned long idx; 324 325 bdebug("nid=%td start=%lx end=%lx\n", bdata - bootmem_node_data, 326 sidx + bdata->node_min_pfn, 327 eidx + bdata->node_min_pfn); 328 329 if (bdata->hint_idx > sidx) 330 bdata->hint_idx = sidx; 331 332 for (idx = sidx; idx < eidx; idx++) 333 if (!test_and_clear_bit(idx, bdata->node_bootmem_map)) 334 BUG(); 335} 336 337static int __init __reserve(bootmem_data_t *bdata, unsigned long sidx, 338 unsigned long eidx, int flags) 339{ 340 unsigned long idx; 341 int exclusive = flags & BOOTMEM_EXCLUSIVE; 342 343 bdebug("nid=%td start=%lx end=%lx flags=%x\n", 344 bdata - bootmem_node_data, 345 sidx + bdata->node_min_pfn, 346 eidx + bdata->node_min_pfn, 347 flags); 348 349 for (idx = sidx; idx < eidx; idx++) 350 if (test_and_set_bit(idx, bdata->node_bootmem_map)) { 351 if (exclusive) { 352 __free(bdata, sidx, idx); 353 return -EBUSY; 354 } 355 bdebug("silent double reserve of PFN %lx\n", 356 idx + bdata->node_min_pfn); 357 } 358 return 0; 359} 360 361static int __init mark_bootmem_node(bootmem_data_t *bdata, 362 unsigned long start, unsigned long end, 363 int reserve, int flags) 364{ 365 unsigned long sidx, eidx; 366 367 bdebug("nid=%td start=%lx end=%lx reserve=%d flags=%x\n", 368 bdata - bootmem_node_data, start, end, reserve, flags); 369 370 BUG_ON(start < bdata->node_min_pfn); 371 BUG_ON(end > bdata->node_low_pfn); 372 373 sidx = start - bdata->node_min_pfn; 374 eidx = end - bdata->node_min_pfn; 375 376 if (reserve) 377 return __reserve(bdata, sidx, eidx, flags); 378 else 379 __free(bdata, sidx, eidx); 380 return 0; 381} 382 383static int __init mark_bootmem(unsigned long start, unsigned long end, 384 int reserve, int flags) 385{ 386 unsigned long pos; 387 bootmem_data_t *bdata; 388 389 pos = start; 390 list_for_each_entry(bdata, &bdata_list, list) { 391 int err; 392 unsigned long max; 393 394 if (pos < bdata->node_min_pfn || 395 pos >= bdata->node_low_pfn) { 396 BUG_ON(pos != start); 397 continue; 398 } 399 400 max = min(bdata->node_low_pfn, end); 401 402 err = mark_bootmem_node(bdata, pos, max, reserve, flags); 403 if (reserve && err) { 404 mark_bootmem(start, pos, 0, 0); 405 return err; 406 } 407 408 if (max == end) 409 return 0; 410 pos = bdata->node_low_pfn; 411 } 412 BUG(); 413} 414#endif 415 416/** 417 * free_bootmem_node - mark a page range as usable 418 * @pgdat: node the range resides on 419 * @physaddr: starting address of the range 420 * @size: size of the range in bytes 421 * 422 * Partial pages will be considered reserved and left as they are. 423 * 424 * The range must reside completely on the specified node. 425 */ 426void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, 427 unsigned long size) 428{ 429#ifdef CONFIG_NO_BOOTMEM 430 free_early(physaddr, physaddr + size); 431#else 432 unsigned long start, end; 433 434 kmemleak_free_part(__va(physaddr), size); 435 436 start = PFN_UP(physaddr); 437 end = PFN_DOWN(physaddr + size); 438 439 mark_bootmem_node(pgdat->bdata, start, end, 0, 0); 440#endif 441} 442 443/** 444 * free_bootmem - mark a page range as usable 445 * @addr: starting address of the range 446 * @size: size of the range in bytes 447 * 448 * Partial pages will be considered reserved and left as they are. 449 * 450 * The range must be contiguous but may span node boundaries. 451 */ 452void __init free_bootmem(unsigned long addr, unsigned long size) 453{ 454#ifdef CONFIG_NO_BOOTMEM 455 free_early(addr, addr + size); 456#else 457 unsigned long start, end; 458 459 kmemleak_free_part(__va(addr), size); 460 461 start = PFN_UP(addr); 462 end = PFN_DOWN(addr + size); 463 464 mark_bootmem(start, end, 0, 0); 465#endif 466} 467 468/** 469 * reserve_bootmem_node - mark a page range as reserved 470 * @pgdat: node the range resides on 471 * @physaddr: starting address of the range 472 * @size: size of the range in bytes 473 * @flags: reservation flags (see linux/bootmem.h) 474 * 475 * Partial pages will be reserved. 476 * 477 * The range must reside completely on the specified node. 478 */ 479int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, 480 unsigned long size, int flags) 481{ 482#ifdef CONFIG_NO_BOOTMEM 483 panic("no bootmem"); 484 return 0; 485#else 486 unsigned long start, end; 487 488 start = PFN_DOWN(physaddr); 489 end = PFN_UP(physaddr + size); 490 491 return mark_bootmem_node(pgdat->bdata, start, end, 1, flags); 492#endif 493} 494 495/** 496 * reserve_bootmem - mark a page range as usable 497 * @addr: starting address of the range 498 * @size: size of the range in bytes 499 * @flags: reservation flags (see linux/bootmem.h) 500 * 501 * Partial pages will be reserved. 502 * 503 * The range must be contiguous but may span node boundaries. 504 */ 505int __init reserve_bootmem(unsigned long addr, unsigned long size, 506 int flags) 507{ 508#ifdef CONFIG_NO_BOOTMEM 509 panic("no bootmem"); 510 return 0; 511#else 512 unsigned long start, end; 513 514 start = PFN_DOWN(addr); 515 end = PFN_UP(addr + size); 516 517 return mark_bootmem(start, end, 1, flags); 518#endif 519} 520 521#ifndef CONFIG_NO_BOOTMEM 522static unsigned long __init align_idx(struct bootmem_data *bdata, 523 unsigned long idx, unsigned long step) 524{ 525 unsigned long base = bdata->node_min_pfn; 526 527 /* 528 * Align the index with respect to the node start so that the 529 * combination of both satisfies the requested alignment. 530 */ 531 532 return ALIGN(base + idx, step) - base; 533} 534 535static unsigned long __init align_off(struct bootmem_data *bdata, 536 unsigned long off, unsigned long align) 537{ 538 unsigned long base = PFN_PHYS(bdata->node_min_pfn); 539 540 /* Same as align_idx for byte offsets */ 541 542 return ALIGN(base + off, align) - base; 543} 544 545static void * __init alloc_bootmem_core(struct bootmem_data *bdata, 546 unsigned long size, unsigned long align, 547 unsigned long goal, unsigned long limit) 548{ 549 unsigned long fallback = 0; 550 unsigned long min, max, start, sidx, midx, step; 551 552 bdebug("nid=%td size=%lx [%lu pages] align=%lx goal=%lx limit=%lx\n", 553 bdata - bootmem_node_data, size, PAGE_ALIGN(size) >> PAGE_SHIFT, 554 align, goal, limit); 555 556 BUG_ON(!size); 557 BUG_ON(align & (align - 1)); 558 BUG_ON(limit && goal + size > limit); 559 560 if (!bdata->node_bootmem_map) 561 return NULL; 562 563 min = bdata->node_min_pfn; 564 max = bdata->node_low_pfn; 565 566 goal >>= PAGE_SHIFT; 567 limit >>= PAGE_SHIFT; 568 569 if (limit && max > limit) 570 max = limit; 571 if (max <= min) 572 return NULL; 573 574 step = max(align >> PAGE_SHIFT, 1UL); 575 576 if (goal && min < goal && goal < max) 577 start = ALIGN(goal, step); 578 else 579 start = ALIGN(min, step); 580 581 sidx = start - bdata->node_min_pfn; 582 midx = max - bdata->node_min_pfn; 583 584 if (bdata->hint_idx > sidx) { 585 /* 586 * Handle the valid case of sidx being zero and still 587 * catch the fallback below. 588 */ 589 fallback = sidx + 1; 590 sidx = align_idx(bdata, bdata->hint_idx, step); 591 } 592 593 while (1) { 594 int merge; 595 void *region; 596 unsigned long eidx, i, start_off, end_off; 597find_block: 598 sidx = find_next_zero_bit(bdata->node_bootmem_map, midx, sidx); 599 sidx = align_idx(bdata, sidx, step); 600 eidx = sidx + PFN_UP(size); 601 602 if (sidx >= midx || eidx > midx) 603 break; 604 605 for (i = sidx; i < eidx; i++) 606 if (test_bit(i, bdata->node_bootmem_map)) { 607 sidx = align_idx(bdata, i, step); 608 if (sidx == i) 609 sidx += step; 610 goto find_block; 611 } 612 613 if (bdata->last_end_off & (PAGE_SIZE - 1) && 614 PFN_DOWN(bdata->last_end_off) + 1 == sidx) 615 start_off = align_off(bdata, bdata->last_end_off, align); 616 else 617 start_off = PFN_PHYS(sidx); 618 619 merge = PFN_DOWN(start_off) < sidx; 620 end_off = start_off + size; 621 622 bdata->last_end_off = end_off; 623 bdata->hint_idx = PFN_UP(end_off); 624 625 /* 626 * Reserve the area now: 627 */ 628 if (__reserve(bdata, PFN_DOWN(start_off) + merge, 629 PFN_UP(end_off), BOOTMEM_EXCLUSIVE)) 630 BUG(); 631 632 region = phys_to_virt(PFN_PHYS(bdata->node_min_pfn) + 633 start_off); 634 memset(region, 0, size); 635 /* 636 * The min_count is set to 0 so that bootmem allocated blocks 637 * are never reported as leaks. 638 */ 639 kmemleak_alloc(region, size, 0, 0); 640 return region; 641 } 642 643 if (fallback) { 644 sidx = align_idx(bdata, fallback - 1, step); 645 fallback = 0; 646 goto find_block; 647 } 648 649 return NULL; 650} 651 652static void * __init alloc_arch_preferred_bootmem(bootmem_data_t *bdata, 653 unsigned long size, unsigned long align, 654 unsigned long goal, unsigned long limit) 655{ 656 if (WARN_ON_ONCE(slab_is_available())) 657 return kzalloc(size, GFP_NOWAIT); 658 659#ifdef CONFIG_HAVE_ARCH_BOOTMEM 660 { 661 bootmem_data_t *p_bdata; 662 663 p_bdata = bootmem_arch_preferred_node(bdata, size, align, 664 goal, limit); 665 if (p_bdata) 666 return alloc_bootmem_core(p_bdata, size, align, 667 goal, limit); 668 } 669#endif 670 return NULL; 671} 672#endif 673 674static void * __init ___alloc_bootmem_nopanic(unsigned long size, 675 unsigned long align, 676 unsigned long goal, 677 unsigned long limit) 678{ 679#ifdef CONFIG_NO_BOOTMEM 680 void *ptr; 681 682 if (WARN_ON_ONCE(slab_is_available())) 683 return kzalloc(size, GFP_NOWAIT); 684 685restart: 686 687 ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, goal, limit); 688 689 if (ptr) 690 return ptr; 691 692 if (goal != 0) { 693 goal = 0; 694 goto restart; 695 } 696 697 return NULL; 698#else 699 bootmem_data_t *bdata; 700 void *region; 701 702restart: 703 region = alloc_arch_preferred_bootmem(NULL, size, align, goal, limit); 704 if (region) 705 return region; 706 707 list_for_each_entry(bdata, &bdata_list, list) { 708 if (goal && bdata->node_low_pfn <= PFN_DOWN(goal)) 709 continue; 710 if (limit && bdata->node_min_pfn >= PFN_DOWN(limit)) 711 break; 712 713 region = alloc_bootmem_core(bdata, size, align, goal, limit); 714 if (region) 715 return region; 716 } 717 718 if (goal) { 719 goal = 0; 720 goto restart; 721 } 722 723 return NULL; 724#endif 725} 726 727/** 728 * __alloc_bootmem_nopanic - allocate boot memory without panicking 729 * @size: size of the request in bytes 730 * @align: alignment of the region 731 * @goal: preferred starting address of the region 732 * 733 * The goal is dropped if it can not be satisfied and the allocation will 734 * fall back to memory below @goal. 735 * 736 * Allocation may happen on any node in the system. 737 * 738 * Returns NULL on failure. 739 */ 740void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align, 741 unsigned long goal) 742{ 743 unsigned long limit = 0; 744 745#ifdef CONFIG_NO_BOOTMEM 746 limit = -1UL; 747#endif 748 749 return ___alloc_bootmem_nopanic(size, align, goal, limit); 750} 751 752static void * __init ___alloc_bootmem(unsigned long size, unsigned long align, 753 unsigned long goal, unsigned long limit) 754{ 755 void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit); 756 757 if (mem) 758 return mem; 759 /* 760 * Whoops, we cannot satisfy the allocation request. 761 */ 762 printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size); 763 panic("Out of memory"); 764 return NULL; 765} 766 767/** 768 * __alloc_bootmem - allocate boot memory 769 * @size: size of the request in bytes 770 * @align: alignment of the region 771 * @goal: preferred starting address of the region 772 * 773 * The goal is dropped if it can not be satisfied and the allocation will 774 * fall back to memory below @goal. 775 * 776 * Allocation may happen on any node in the system. 777 * 778 * The function panics if the request can not be satisfied. 779 */ 780void * __init __alloc_bootmem(unsigned long size, unsigned long align, 781 unsigned long goal) 782{ 783 unsigned long limit = 0; 784 785#ifdef CONFIG_NO_BOOTMEM 786 limit = -1UL; 787#endif 788 789 return ___alloc_bootmem(size, align, goal, limit); 790} 791 792#ifndef CONFIG_NO_BOOTMEM 793static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata, 794 unsigned long size, unsigned long align, 795 unsigned long goal, unsigned long limit) 796{ 797 void *ptr; 798 799 ptr = alloc_arch_preferred_bootmem(bdata, size, align, goal, limit); 800 if (ptr) 801 return ptr; 802 803 ptr = alloc_bootmem_core(bdata, size, align, goal, limit); 804 if (ptr) 805 return ptr; 806 807 return ___alloc_bootmem(size, align, goal, limit); 808} 809#endif 810 811/** 812 * __alloc_bootmem_node - allocate boot memory from a specific node 813 * @pgdat: node to allocate from 814 * @size: size of the request in bytes 815 * @align: alignment of the region 816 * @goal: preferred starting address of the region 817 * 818 * The goal is dropped if it can not be satisfied and the allocation will 819 * fall back to memory below @goal. 820 * 821 * Allocation may fall back to any node in the system if the specified node 822 * can not hold the requested memory. 823 * 824 * The function panics if the request can not be satisfied. 825 */ 826void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, 827 unsigned long align, unsigned long goal) 828{ 829 if (WARN_ON_ONCE(slab_is_available())) 830 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); 831 832#ifdef CONFIG_NO_BOOTMEM 833 return __alloc_memory_core_early(pgdat->node_id, size, align, 834 goal, -1ULL); 835#else 836 return ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0); 837#endif 838} 839 840void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size, 841 unsigned long align, unsigned long goal) 842{ 843#ifdef MAX_DMA32_PFN 844 unsigned long end_pfn; 845 846 if (WARN_ON_ONCE(slab_is_available())) 847 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); 848 849 /* update goal according ...MAX_DMA32_PFN */ 850 end_pfn = pgdat->node_start_pfn + pgdat->node_spanned_pages; 851 852 if (end_pfn > MAX_DMA32_PFN + (128 >> (20 - PAGE_SHIFT)) && 853 (goal >> PAGE_SHIFT) < MAX_DMA32_PFN) { 854 void *ptr; 855 unsigned long new_goal; 856 857 new_goal = MAX_DMA32_PFN << PAGE_SHIFT; 858#ifdef CONFIG_NO_BOOTMEM 859 ptr = __alloc_memory_core_early(pgdat->node_id, size, align, 860 new_goal, -1ULL); 861#else 862 ptr = alloc_bootmem_core(pgdat->bdata, size, align, 863 new_goal, 0); 864#endif 865 if (ptr) 866 return ptr; 867 } 868#endif 869 870 return __alloc_bootmem_node(pgdat, size, align, goal); 871 872} 873 874#ifdef CONFIG_SPARSEMEM 875/** 876 * alloc_bootmem_section - allocate boot memory from a specific section 877 * @size: size of the request in bytes 878 * @section_nr: sparse map section to allocate from 879 * 880 * Return NULL on failure. 881 */ 882void * __init alloc_bootmem_section(unsigned long size, 883 unsigned long section_nr) 884{ 885#ifdef CONFIG_NO_BOOTMEM 886 unsigned long pfn, goal, limit; 887 888 pfn = section_nr_to_pfn(section_nr); 889 goal = pfn << PAGE_SHIFT; 890 limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT; 891 892 return __alloc_memory_core_early(early_pfn_to_nid(pfn), size, 893 SMP_CACHE_BYTES, goal, limit); 894#else 895 bootmem_data_t *bdata; 896 unsigned long pfn, goal, limit; 897 898 pfn = section_nr_to_pfn(section_nr); 899 goal = pfn << PAGE_SHIFT; 900 limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT; 901 bdata = &bootmem_node_data[early_pfn_to_nid(pfn)]; 902 903 return alloc_bootmem_core(bdata, size, SMP_CACHE_BYTES, goal, limit); 904#endif 905} 906#endif 907 908void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size, 909 unsigned long align, unsigned long goal) 910{ 911 void *ptr; 912 913 if (WARN_ON_ONCE(slab_is_available())) 914 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); 915 916#ifdef CONFIG_NO_BOOTMEM 917 ptr = __alloc_memory_core_early(pgdat->node_id, size, align, 918 goal, -1ULL); 919#else 920 ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size, align, goal, 0); 921 if (ptr) 922 return ptr; 923 924 ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0); 925#endif 926 if (ptr) 927 return ptr; 928 929 return __alloc_bootmem_nopanic(size, align, goal); 930} 931 932#ifndef ARCH_LOW_ADDRESS_LIMIT 933#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL 934#endif 935 936/** 937 * __alloc_bootmem_low - allocate low boot memory 938 * @size: size of the request in bytes 939 * @align: alignment of the region 940 * @goal: preferred starting address of the region 941 * 942 * The goal is dropped if it can not be satisfied and the allocation will 943 * fall back to memory below @goal. 944 * 945 * Allocation may happen on any node in the system. 946 * 947 * The function panics if the request can not be satisfied. 948 */ 949void * __init __alloc_bootmem_low(unsigned long size, unsigned long align, 950 unsigned long goal) 951{ 952 return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT); 953} 954 955/** 956 * __alloc_bootmem_low_node - allocate low boot memory from a specific node 957 * @pgdat: node to allocate from 958 * @size: size of the request in bytes 959 * @align: alignment of the region 960 * @goal: preferred starting address of the region 961 * 962 * The goal is dropped if it can not be satisfied and the allocation will 963 * fall back to memory below @goal. 964 * 965 * Allocation may fall back to any node in the system if the specified node 966 * can not hold the requested memory. 967 * 968 * The function panics if the request can not be satisfied. 969 */ 970void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size, 971 unsigned long align, unsigned long goal) 972{ 973 if (WARN_ON_ONCE(slab_is_available())) 974 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); 975 976#ifdef CONFIG_NO_BOOTMEM 977 return __alloc_memory_core_early(pgdat->node_id, size, align, 978 goal, ARCH_LOW_ADDRESS_LIMIT); 979#else 980 return ___alloc_bootmem_node(pgdat->bdata, size, align, 981 goal, ARCH_LOW_ADDRESS_LIMIT); 982#endif 983} 984