init_64.c revision 1f067167a83d1c7f80437fd1d32b55508aaca009
1/* 2 * linux/arch/x86_64/mm/init.c 3 * 4 * Copyright (C) 1995 Linus Torvalds 5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> 6 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de> 7 */ 8 9#include <linux/signal.h> 10#include <linux/sched.h> 11#include <linux/kernel.h> 12#include <linux/errno.h> 13#include <linux/string.h> 14#include <linux/types.h> 15#include <linux/ptrace.h> 16#include <linux/mman.h> 17#include <linux/mm.h> 18#include <linux/swap.h> 19#include <linux/smp.h> 20#include <linux/init.h> 21#include <linux/initrd.h> 22#include <linux/pagemap.h> 23#include <linux/bootmem.h> 24#include <linux/proc_fs.h> 25#include <linux/pci.h> 26#include <linux/pfn.h> 27#include <linux/poison.h> 28#include <linux/dma-mapping.h> 29#include <linux/module.h> 30#include <linux/memory_hotplug.h> 31#include <linux/nmi.h> 32 33#include <asm/processor.h> 34#include <asm/system.h> 35#include <asm/uaccess.h> 36#include <asm/pgtable.h> 37#include <asm/pgalloc.h> 38#include <asm/dma.h> 39#include <asm/fixmap.h> 40#include <asm/e820.h> 41#include <asm/apic.h> 42#include <asm/tlb.h> 43#include <asm/mmu_context.h> 44#include <asm/proto.h> 45#include <asm/smp.h> 46#include <asm/sections.h> 47#include <asm/kdebug.h> 48#include <asm/numa.h> 49#include <asm/cacheflush.h> 50 51/* 52 * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries. 53 * The direct mapping extends to max_pfn_mapped, so that we can directly access 54 * apertures, ACPI and other tables without having to play with fixmaps. 55 */ 56unsigned long max_low_pfn_mapped; 57unsigned long max_pfn_mapped; 58 59static unsigned long dma_reserve __initdata; 60 61DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 62 63int direct_gbpages __meminitdata 64#ifdef CONFIG_DIRECT_GBPAGES 65 = 1 66#endif 67; 68 69static int __init parse_direct_gbpages_off(char *arg) 70{ 71 direct_gbpages = 0; 72 return 0; 73} 74early_param("nogbpages", parse_direct_gbpages_off); 75 76static int __init parse_direct_gbpages_on(char *arg) 77{ 78 direct_gbpages = 1; 79 return 0; 80} 81early_param("gbpages", parse_direct_gbpages_on); 82 83/* 84 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the 85 * physical space so we can cache the place of the first one and move 86 * around without checking the pgd every time. 87 */ 88 89void show_mem(void) 90{ 91 long i, total = 0, reserved = 0; 92 long shared = 0, cached = 0; 93 struct page *page; 94 pg_data_t *pgdat; 95 96 printk(KERN_INFO "Mem-info:\n"); 97 show_free_areas(); 98 for_each_online_pgdat(pgdat) { 99 for (i = 0; i < pgdat->node_spanned_pages; ++i) { 100 /* 101 * This loop can take a while with 256 GB and 102 * 4k pages so defer the NMI watchdog: 103 */ 104 if (unlikely(i % MAX_ORDER_NR_PAGES == 0)) 105 touch_nmi_watchdog(); 106 107 if (!pfn_valid(pgdat->node_start_pfn + i)) 108 continue; 109 110 page = pfn_to_page(pgdat->node_start_pfn + i); 111 total++; 112 if (PageReserved(page)) 113 reserved++; 114 else if (PageSwapCache(page)) 115 cached++; 116 else if (page_count(page)) 117 shared += page_count(page) - 1; 118 } 119 } 120 printk(KERN_INFO "%lu pages of RAM\n", total); 121 printk(KERN_INFO "%lu reserved pages\n", reserved); 122 printk(KERN_INFO "%lu pages shared\n", shared); 123 printk(KERN_INFO "%lu pages swap cached\n", cached); 124} 125 126int after_bootmem; 127 128static __init void *spp_getpage(void) 129{ 130 void *ptr; 131 132 if (after_bootmem) 133 ptr = (void *) get_zeroed_page(GFP_ATOMIC); 134 else 135 ptr = alloc_bootmem_pages(PAGE_SIZE); 136 137 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) { 138 panic("set_pte_phys: cannot allocate page data %s\n", 139 after_bootmem ? "after bootmem" : ""); 140 } 141 142 pr_debug("spp_getpage %p\n", ptr); 143 144 return ptr; 145} 146 147void 148set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte) 149{ 150 pud_t *pud; 151 pmd_t *pmd; 152 pte_t *pte; 153 154 pud = pud_page + pud_index(vaddr); 155 if (pud_none(*pud)) { 156 pmd = (pmd_t *) spp_getpage(); 157 pud_populate(&init_mm, pud, pmd); 158 if (pmd != pmd_offset(pud, 0)) { 159 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n", 160 pmd, pmd_offset(pud, 0)); 161 return; 162 } 163 } 164 pmd = pmd_offset(pud, vaddr); 165 if (pmd_none(*pmd)) { 166 pte = (pte_t *) spp_getpage(); 167 pmd_populate_kernel(&init_mm, pmd, pte); 168 if (pte != pte_offset_kernel(pmd, 0)) { 169 printk(KERN_ERR "PAGETABLE BUG #02!\n"); 170 return; 171 } 172 } 173 174 pte = pte_offset_kernel(pmd, vaddr); 175 if (!pte_none(*pte) && pte_val(new_pte) && 176 pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask)) 177 pte_ERROR(*pte); 178 set_pte(pte, new_pte); 179 180 /* 181 * It's enough to flush this one mapping. 182 * (PGE mappings get flushed as well) 183 */ 184 __flush_tlb_one(vaddr); 185} 186 187void 188set_pte_vaddr(unsigned long vaddr, pte_t pteval) 189{ 190 pgd_t *pgd; 191 pud_t *pud_page; 192 193 pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval)); 194 195 pgd = pgd_offset_k(vaddr); 196 if (pgd_none(*pgd)) { 197 printk(KERN_ERR 198 "PGD FIXMAP MISSING, it should be setup in head.S!\n"); 199 return; 200 } 201 pud_page = (pud_t*)pgd_page_vaddr(*pgd); 202 set_pte_vaddr_pud(pud_page, vaddr, pteval); 203} 204 205/* 206 * Create large page table mappings for a range of physical addresses. 207 */ 208static void __init __init_extra_mapping(unsigned long phys, unsigned long size, 209 pgprot_t prot) 210{ 211 pgd_t *pgd; 212 pud_t *pud; 213 pmd_t *pmd; 214 215 BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK)); 216 for (; size; phys += PMD_SIZE, size -= PMD_SIZE) { 217 pgd = pgd_offset_k((unsigned long)__va(phys)); 218 if (pgd_none(*pgd)) { 219 pud = (pud_t *) spp_getpage(); 220 set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE | 221 _PAGE_USER)); 222 } 223 pud = pud_offset(pgd, (unsigned long)__va(phys)); 224 if (pud_none(*pud)) { 225 pmd = (pmd_t *) spp_getpage(); 226 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | 227 _PAGE_USER)); 228 } 229 pmd = pmd_offset(pud, phys); 230 BUG_ON(!pmd_none(*pmd)); 231 set_pmd(pmd, __pmd(phys | pgprot_val(prot))); 232 } 233} 234 235void __init init_extra_mapping_wb(unsigned long phys, unsigned long size) 236{ 237 __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE); 238} 239 240void __init init_extra_mapping_uc(unsigned long phys, unsigned long size) 241{ 242 __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE_NOCACHE); 243} 244 245/* 246 * The head.S code sets up the kernel high mapping: 247 * 248 * from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text) 249 * 250 * phys_addr holds the negative offset to the kernel, which is added 251 * to the compile time generated pmds. This results in invalid pmds up 252 * to the point where we hit the physaddr 0 mapping. 253 * 254 * We limit the mappings to the region from _text to _end. _end is 255 * rounded up to the 2MB boundary. This catches the invalid pmds as 256 * well, as they are located before _text: 257 */ 258void __init cleanup_highmap(void) 259{ 260 unsigned long vaddr = __START_KERNEL_map; 261 unsigned long end = round_up((unsigned long)_end, PMD_SIZE) - 1; 262 pmd_t *pmd = level2_kernel_pgt; 263 pmd_t *last_pmd = pmd + PTRS_PER_PMD; 264 265 for (; pmd < last_pmd; pmd++, vaddr += PMD_SIZE) { 266 if (pmd_none(*pmd)) 267 continue; 268 if (vaddr < (unsigned long) _text || vaddr > end) 269 set_pmd(pmd, __pmd(0)); 270 } 271} 272 273static unsigned long __initdata table_start; 274static unsigned long __meminitdata table_end; 275static unsigned long __meminitdata table_top; 276 277static __meminit void *alloc_low_page(unsigned long *phys) 278{ 279 unsigned long pfn = table_end++; 280 void *adr; 281 282 if (after_bootmem) { 283 adr = (void *)get_zeroed_page(GFP_ATOMIC); 284 *phys = __pa(adr); 285 286 return adr; 287 } 288 289 if (pfn >= table_top) 290 panic("alloc_low_page: ran out of memory"); 291 292 adr = early_ioremap(pfn * PAGE_SIZE, PAGE_SIZE); 293 memset(adr, 0, PAGE_SIZE); 294 *phys = pfn * PAGE_SIZE; 295 return adr; 296} 297 298static __meminit void unmap_low_page(void *adr) 299{ 300 if (after_bootmem) 301 return; 302 303 early_iounmap(adr, PAGE_SIZE); 304} 305 306static unsigned long __meminit 307phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end) 308{ 309 unsigned pages = 0; 310 unsigned long last_map_addr = end; 311 int i; 312 313 pte_t *pte = pte_page + pte_index(addr); 314 315 for(i = pte_index(addr); i < PTRS_PER_PTE; i++, addr += PAGE_SIZE, pte++) { 316 317 if (addr >= end) { 318 if (!after_bootmem) { 319 for(; i < PTRS_PER_PTE; i++, pte++) 320 set_pte(pte, __pte(0)); 321 } 322 break; 323 } 324 325 if (pte_val(*pte)) 326 continue; 327 328 if (0) 329 printk(" pte=%p addr=%lx pte=%016lx\n", 330 pte, addr, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL).pte); 331 set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL)); 332 last_map_addr = (addr & PAGE_MASK) + PAGE_SIZE; 333 pages++; 334 } 335 update_page_count(PG_LEVEL_4K, pages); 336 337 return last_map_addr; 338} 339 340static unsigned long __meminit 341phys_pte_update(pmd_t *pmd, unsigned long address, unsigned long end) 342{ 343 pte_t *pte = (pte_t *)pmd_page_vaddr(*pmd); 344 345 return phys_pte_init(pte, address, end); 346} 347 348static unsigned long __meminit 349phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, 350 unsigned long page_size_mask) 351{ 352 unsigned long pages = 0; 353 unsigned long last_map_addr = end; 354 355 int i = pmd_index(address); 356 357 for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) { 358 unsigned long pte_phys; 359 pmd_t *pmd = pmd_page + pmd_index(address); 360 pte_t *pte; 361 362 if (address >= end) { 363 if (!after_bootmem) { 364 for (; i < PTRS_PER_PMD; i++, pmd++) 365 set_pmd(pmd, __pmd(0)); 366 } 367 break; 368 } 369 370 if (pmd_val(*pmd)) { 371 if (!pmd_large(*pmd)) 372 last_map_addr = phys_pte_update(pmd, address, 373 end); 374 continue; 375 } 376 377 if (page_size_mask & (1<<PG_LEVEL_2M)) { 378 pages++; 379 set_pte((pte_t *)pmd, 380 pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); 381 last_map_addr = (address & PMD_MASK) + PMD_SIZE; 382 continue; 383 } 384 385 pte = alloc_low_page(&pte_phys); 386 last_map_addr = phys_pte_init(pte, address, end); 387 unmap_low_page(pte); 388 389 pmd_populate_kernel(&init_mm, pmd, __va(pte_phys)); 390 } 391 update_page_count(PG_LEVEL_2M, pages); 392 return last_map_addr; 393} 394 395static unsigned long __meminit 396phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end, 397 unsigned long page_size_mask) 398{ 399 pmd_t *pmd = pmd_offset(pud, 0); 400 unsigned long last_map_addr; 401 402 spin_lock(&init_mm.page_table_lock); 403 last_map_addr = phys_pmd_init(pmd, address, end, page_size_mask); 404 spin_unlock(&init_mm.page_table_lock); 405 __flush_tlb_all(); 406 return last_map_addr; 407} 408 409static unsigned long __meminit 410phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end, 411 unsigned long page_size_mask) 412{ 413 unsigned long pages = 0; 414 unsigned long last_map_addr = end; 415 int i = pud_index(addr); 416 417 for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE) { 418 unsigned long pmd_phys; 419 pud_t *pud = pud_page + pud_index(addr); 420 pmd_t *pmd; 421 422 if (addr >= end) 423 break; 424 425 if (!after_bootmem && 426 !e820_any_mapped(addr, addr+PUD_SIZE, 0)) { 427 set_pud(pud, __pud(0)); 428 continue; 429 } 430 431 if (pud_val(*pud)) { 432 if (!pud_large(*pud)) 433 last_map_addr = phys_pmd_update(pud, addr, end, 434 page_size_mask); 435 continue; 436 } 437 438 if (page_size_mask & (1<<PG_LEVEL_1G)) { 439 pages++; 440 set_pte((pte_t *)pud, 441 pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); 442 last_map_addr = (addr & PUD_MASK) + PUD_SIZE; 443 continue; 444 } 445 446 pmd = alloc_low_page(&pmd_phys); 447 448 spin_lock(&init_mm.page_table_lock); 449 last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask); 450 unmap_low_page(pmd); 451 pud_populate(&init_mm, pud, __va(pmd_phys)); 452 spin_unlock(&init_mm.page_table_lock); 453 454 } 455 __flush_tlb_all(); 456 update_page_count(PG_LEVEL_1G, pages); 457 458 return last_map_addr; 459} 460 461static unsigned long __meminit 462phys_pud_update(pgd_t *pgd, unsigned long addr, unsigned long end, 463 unsigned long page_size_mask) 464{ 465 pud_t *pud; 466 467 pud = (pud_t *)pgd_page_vaddr(*pgd); 468 469 return phys_pud_init(pud, addr, end, page_size_mask); 470} 471 472static void __init find_early_table_space(unsigned long end) 473{ 474 unsigned long puds, pmds, ptes, tables, start; 475 476 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; 477 tables = round_up(puds * sizeof(pud_t), PAGE_SIZE); 478 if (direct_gbpages) { 479 unsigned long extra; 480 extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT); 481 pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT; 482 } else 483 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; 484 tables += round_up(pmds * sizeof(pmd_t), PAGE_SIZE); 485 486 if (cpu_has_pse) { 487 unsigned long extra; 488 extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT); 489 ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; 490 } else 491 ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; 492 tables += round_up(ptes * sizeof(pte_t), PAGE_SIZE); 493 494 /* 495 * RED-PEN putting page tables only on node 0 could 496 * cause a hotspot and fill up ZONE_DMA. The page tables 497 * need roughly 0.5KB per GB. 498 */ 499 start = 0x8000; 500 table_start = find_e820_area(start, end, tables, PAGE_SIZE); 501 if (table_start == -1UL) 502 panic("Cannot find space for the kernel page tables"); 503 504 table_start >>= PAGE_SHIFT; 505 table_end = table_start; 506 table_top = table_start + (tables >> PAGE_SHIFT); 507 508 printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n", 509 end, table_start << PAGE_SHIFT, table_top << PAGE_SHIFT); 510} 511 512static void __init init_gbpages(void) 513{ 514 if (direct_gbpages && cpu_has_gbpages) 515 printk(KERN_INFO "Using GB pages for direct mapping\n"); 516 else 517 direct_gbpages = 0; 518} 519 520static unsigned long __init kernel_physical_mapping_init(unsigned long start, 521 unsigned long end, 522 unsigned long page_size_mask) 523{ 524 525 unsigned long next, last_map_addr = end; 526 527 start = (unsigned long)__va(start); 528 end = (unsigned long)__va(end); 529 530 for (; start < end; start = next) { 531 pgd_t *pgd = pgd_offset_k(start); 532 unsigned long pud_phys; 533 pud_t *pud; 534 535 next = (start + PGDIR_SIZE) & PGDIR_MASK; 536 if (next > end) 537 next = end; 538 539 if (pgd_val(*pgd)) { 540 last_map_addr = phys_pud_update(pgd, __pa(start), 541 __pa(end), page_size_mask); 542 continue; 543 } 544 545 if (after_bootmem) 546 pud = pud_offset(pgd, start & PGDIR_MASK); 547 else 548 pud = alloc_low_page(&pud_phys); 549 550 last_map_addr = phys_pud_init(pud, __pa(start), __pa(next), 551 page_size_mask); 552 unmap_low_page(pud); 553 pgd_populate(&init_mm, pgd_offset_k(start), 554 __va(pud_phys)); 555 } 556 557 return last_map_addr; 558} 559 560struct map_range { 561 unsigned long start; 562 unsigned long end; 563 unsigned page_size_mask; 564}; 565 566#define NR_RANGE_MR 5 567 568static int save_mr(struct map_range *mr, int nr_range, 569 unsigned long start_pfn, unsigned long end_pfn, 570 unsigned long page_size_mask) 571{ 572 573 if (start_pfn < end_pfn) { 574 if (nr_range >= NR_RANGE_MR) 575 panic("run out of range for init_memory_mapping\n"); 576 mr[nr_range].start = start_pfn<<PAGE_SHIFT; 577 mr[nr_range].end = end_pfn<<PAGE_SHIFT; 578 mr[nr_range].page_size_mask = page_size_mask; 579 nr_range++; 580 } 581 582 return nr_range; 583} 584 585/* 586 * Setup the direct mapping of the physical memory at PAGE_OFFSET. 587 * This runs before bootmem is initialized and gets pages directly from 588 * the physical memory. To access them they are temporarily mapped. 589 */ 590unsigned long __init_refok init_memory_mapping(unsigned long start, 591 unsigned long end) 592{ 593 unsigned long last_map_addr = 0; 594 unsigned long page_size_mask = 0; 595 unsigned long start_pfn, end_pfn; 596 597 struct map_range mr[NR_RANGE_MR]; 598 int nr_range, i; 599 600 printk(KERN_INFO "init_memory_mapping\n"); 601 602 /* 603 * Find space for the kernel direct mapping tables. 604 * 605 * Later we should allocate these tables in the local node of the 606 * memory mapped. Unfortunately this is done currently before the 607 * nodes are discovered. 608 */ 609 if (!after_bootmem) 610 init_gbpages(); 611 612 if (direct_gbpages) 613 page_size_mask |= 1 << PG_LEVEL_1G; 614 if (cpu_has_pse) 615 page_size_mask |= 1 << PG_LEVEL_2M; 616 617 memset(mr, 0, sizeof(mr)); 618 nr_range = 0; 619 620 /* head if not big page alignment ?*/ 621 start_pfn = start >> PAGE_SHIFT; 622 end_pfn = ((start + (PMD_SIZE - 1)) >> PMD_SHIFT) 623 << (PMD_SHIFT - PAGE_SHIFT); 624 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); 625 626 /* big page (2M) range*/ 627 start_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT) 628 << (PMD_SHIFT - PAGE_SHIFT); 629 end_pfn = ((start + (PUD_SIZE - 1))>>PUD_SHIFT) 630 << (PUD_SHIFT - PAGE_SHIFT); 631 if (end_pfn > ((end>>PUD_SHIFT)<<(PUD_SHIFT - PAGE_SHIFT))) 632 end_pfn = ((end>>PUD_SHIFT)<<(PUD_SHIFT - PAGE_SHIFT)); 633 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 634 page_size_mask & (1<<PG_LEVEL_2M)); 635 636 /* big page (1G) range */ 637 start_pfn = end_pfn; 638 end_pfn = (end>>PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT); 639 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 640 page_size_mask & 641 ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G))); 642 643 /* tail is not big page (1G) alignment */ 644 start_pfn = end_pfn; 645 end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT); 646 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 647 page_size_mask & (1<<PG_LEVEL_2M)); 648 649 /* tail is not big page (2M) alignment */ 650 start_pfn = end_pfn; 651 end_pfn = end>>PAGE_SHIFT; 652 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); 653 654 /* try to merge same page size and continuous */ 655 for (i = 0; nr_range > 1 && i < nr_range - 1; i++) { 656 unsigned long old_start; 657 if (mr[i].end != mr[i+1].start || 658 mr[i].page_size_mask != mr[i+1].page_size_mask) 659 continue; 660 /* move it */ 661 old_start = mr[i].start; 662 memmove(&mr[i], &mr[i+1], 663 (nr_range - 1 - i) * sizeof (struct map_range)); 664 mr[i].start = old_start; 665 nr_range--; 666 } 667 668 for (i = 0; i < nr_range; i++) 669 printk(KERN_DEBUG " %010lx - %010lx page %s\n", 670 mr[i].start, mr[i].end, 671 (mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":( 672 (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k")); 673 674 if (!after_bootmem) 675 find_early_table_space(end); 676 677 for (i = 0; i < nr_range; i++) 678 last_map_addr = kernel_physical_mapping_init( 679 mr[i].start, mr[i].end, 680 mr[i].page_size_mask); 681 682 if (!after_bootmem) 683 mmu_cr4_features = read_cr4(); 684 __flush_tlb_all(); 685 686 if (!after_bootmem && table_end > table_start) 687 reserve_early(table_start << PAGE_SHIFT, 688 table_end << PAGE_SHIFT, "PGTABLE"); 689 690 printk(KERN_INFO "last_map_addr: %lx end: %lx\n", 691 last_map_addr, end); 692 693 if (!after_bootmem) 694 early_memtest(start, end); 695 696 return last_map_addr >> PAGE_SHIFT; 697} 698 699#ifndef CONFIG_NUMA 700void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn) 701{ 702 unsigned long bootmap_size, bootmap; 703 704 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT; 705 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size, 706 PAGE_SIZE); 707 if (bootmap == -1L) 708 panic("Cannot find bootmem map of size %ld\n", bootmap_size); 709 /* don't touch min_low_pfn */ 710 bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap >> PAGE_SHIFT, 711 0, end_pfn); 712 e820_register_active_regions(0, start_pfn, end_pfn); 713 free_bootmem_with_active_regions(0, end_pfn); 714 early_res_to_bootmem(0, end_pfn<<PAGE_SHIFT); 715 reserve_bootmem(bootmap, bootmap_size, BOOTMEM_DEFAULT); 716} 717 718void __init paging_init(void) 719{ 720 unsigned long max_zone_pfns[MAX_NR_ZONES]; 721 722 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 723 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; 724 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; 725 max_zone_pfns[ZONE_NORMAL] = max_pfn; 726 727 memory_present(0, 0, max_pfn); 728 sparse_init(); 729 free_area_init_nodes(max_zone_pfns); 730} 731#endif 732 733/* 734 * Memory hotplug specific functions 735 */ 736#ifdef CONFIG_MEMORY_HOTPLUG 737/* 738 * Memory is added always to NORMAL zone. This means you will never get 739 * additional DMA/DMA32 memory. 740 */ 741int arch_add_memory(int nid, u64 start, u64 size) 742{ 743 struct pglist_data *pgdat = NODE_DATA(nid); 744 struct zone *zone = pgdat->node_zones + ZONE_NORMAL; 745 unsigned long last_mapped_pfn, start_pfn = start >> PAGE_SHIFT; 746 unsigned long nr_pages = size >> PAGE_SHIFT; 747 int ret; 748 749 last_mapped_pfn = init_memory_mapping(start, start + size-1); 750 if (last_mapped_pfn > max_pfn_mapped) 751 max_pfn_mapped = last_mapped_pfn; 752 753 ret = __add_pages(zone, start_pfn, nr_pages); 754 WARN_ON(1); 755 756 return ret; 757} 758EXPORT_SYMBOL_GPL(arch_add_memory); 759 760#if !defined(CONFIG_ACPI_NUMA) && defined(CONFIG_NUMA) 761int memory_add_physaddr_to_nid(u64 start) 762{ 763 return 0; 764} 765EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); 766#endif 767 768#endif /* CONFIG_MEMORY_HOTPLUG */ 769 770/* 771 * devmem_is_allowed() checks to see if /dev/mem access to a certain address 772 * is valid. The argument is a physical page number. 773 * 774 * 775 * On x86, access has to be given to the first megabyte of ram because that area 776 * contains bios code and data regions used by X and dosemu and similar apps. 777 * Access has to be given to non-kernel-ram areas as well, these contain the PCI 778 * mmio resources as well as potential bios/acpi data regions. 779 */ 780int devmem_is_allowed(unsigned long pagenr) 781{ 782 if (pagenr <= 256) 783 return 1; 784 if (!page_is_ram(pagenr)) 785 return 1; 786 return 0; 787} 788 789 790static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, 791 kcore_modules, kcore_vsyscall; 792 793void __init mem_init(void) 794{ 795 long codesize, reservedpages, datasize, initsize; 796 797 pci_iommu_alloc(); 798 799 /* clear_bss() already clear the empty_zero_page */ 800 801 reservedpages = 0; 802 803 /* this will put all low memory onto the freelists */ 804#ifdef CONFIG_NUMA 805 totalram_pages = numa_free_all_bootmem(); 806#else 807 totalram_pages = free_all_bootmem(); 808#endif 809 reservedpages = max_pfn - totalram_pages - 810 absent_pages_in_range(0, max_pfn); 811 after_bootmem = 1; 812 813 codesize = (unsigned long) &_etext - (unsigned long) &_text; 814 datasize = (unsigned long) &_edata - (unsigned long) &_etext; 815 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; 816 817 /* Register memory areas for /proc/kcore */ 818 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); 819 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, 820 VMALLOC_END-VMALLOC_START); 821 kclist_add(&kcore_kernel, &_stext, _end - _stext); 822 kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN); 823 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START, 824 VSYSCALL_END - VSYSCALL_START); 825 826 printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, " 827 "%ldk reserved, %ldk data, %ldk init)\n", 828 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), 829 max_pfn << (PAGE_SHIFT-10), 830 codesize >> 10, 831 reservedpages << (PAGE_SHIFT-10), 832 datasize >> 10, 833 initsize >> 10); 834 835 cpa_init(); 836} 837 838void free_init_pages(char *what, unsigned long begin, unsigned long end) 839{ 840 unsigned long addr = begin; 841 842 if (addr >= end) 843 return; 844 845 /* 846 * If debugging page accesses then do not free this memory but 847 * mark them not present - any buggy init-section access will 848 * create a kernel page fault: 849 */ 850#ifdef CONFIG_DEBUG_PAGEALLOC 851 printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n", 852 begin, PAGE_ALIGN(end)); 853 set_memory_np(begin, (end - begin) >> PAGE_SHIFT); 854#else 855 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); 856 857 for (; addr < end; addr += PAGE_SIZE) { 858 ClearPageReserved(virt_to_page(addr)); 859 init_page_count(virt_to_page(addr)); 860 memset((void *)(addr & ~(PAGE_SIZE-1)), 861 POISON_FREE_INITMEM, PAGE_SIZE); 862 free_page(addr); 863 totalram_pages++; 864 } 865#endif 866} 867 868void free_initmem(void) 869{ 870 free_init_pages("unused kernel memory", 871 (unsigned long)(&__init_begin), 872 (unsigned long)(&__init_end)); 873} 874 875#ifdef CONFIG_DEBUG_RODATA 876const int rodata_test_data = 0xC3; 877EXPORT_SYMBOL_GPL(rodata_test_data); 878 879void mark_rodata_ro(void) 880{ 881 unsigned long start = PFN_ALIGN(_stext), end = PFN_ALIGN(__end_rodata); 882 unsigned long rodata_start = 883 ((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK; 884 885#ifdef CONFIG_DYNAMIC_FTRACE 886 /* Dynamic tracing modifies the kernel text section */ 887 start = rodata_start; 888#endif 889 890 printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", 891 (end - start) >> 10); 892 set_memory_ro(start, (end - start) >> PAGE_SHIFT); 893 894 /* 895 * The rodata section (but not the kernel text!) should also be 896 * not-executable. 897 */ 898 set_memory_nx(rodata_start, (end - rodata_start) >> PAGE_SHIFT); 899 900 rodata_test(); 901 902#ifdef CONFIG_CPA_DEBUG 903 printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end); 904 set_memory_rw(start, (end-start) >> PAGE_SHIFT); 905 906 printk(KERN_INFO "Testing CPA: again\n"); 907 set_memory_ro(start, (end-start) >> PAGE_SHIFT); 908#endif 909} 910 911#endif 912 913#ifdef CONFIG_BLK_DEV_INITRD 914void free_initrd_mem(unsigned long start, unsigned long end) 915{ 916 free_init_pages("initrd memory", start, end); 917} 918#endif 919 920int __init reserve_bootmem_generic(unsigned long phys, unsigned long len, 921 int flags) 922{ 923#ifdef CONFIG_NUMA 924 int nid, next_nid; 925 int ret; 926#endif 927 unsigned long pfn = phys >> PAGE_SHIFT; 928 929 if (pfn >= max_pfn) { 930 /* 931 * This can happen with kdump kernels when accessing 932 * firmware tables: 933 */ 934 if (pfn < max_pfn_mapped) 935 return -EFAULT; 936 937 printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %lu\n", 938 phys, len); 939 return -EFAULT; 940 } 941 942 /* Should check here against the e820 map to avoid double free */ 943#ifdef CONFIG_NUMA 944 nid = phys_to_nid(phys); 945 next_nid = phys_to_nid(phys + len - 1); 946 if (nid == next_nid) 947 ret = reserve_bootmem_node(NODE_DATA(nid), phys, len, flags); 948 else 949 ret = reserve_bootmem(phys, len, flags); 950 951 if (ret != 0) 952 return ret; 953 954#else 955 reserve_bootmem(phys, len, BOOTMEM_DEFAULT); 956#endif 957 958 if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) { 959 dma_reserve += len / PAGE_SIZE; 960 set_dma_reserve(dma_reserve); 961 } 962 963 return 0; 964} 965 966int kern_addr_valid(unsigned long addr) 967{ 968 unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT; 969 pgd_t *pgd; 970 pud_t *pud; 971 pmd_t *pmd; 972 pte_t *pte; 973 974 if (above != 0 && above != -1UL) 975 return 0; 976 977 pgd = pgd_offset_k(addr); 978 if (pgd_none(*pgd)) 979 return 0; 980 981 pud = pud_offset(pgd, addr); 982 if (pud_none(*pud)) 983 return 0; 984 985 pmd = pmd_offset(pud, addr); 986 if (pmd_none(*pmd)) 987 return 0; 988 989 if (pmd_large(*pmd)) 990 return pfn_valid(pmd_pfn(*pmd)); 991 992 pte = pte_offset_kernel(pmd, addr); 993 if (pte_none(*pte)) 994 return 0; 995 996 return pfn_valid(pte_pfn(*pte)); 997} 998 999/* 1000 * A pseudo VMA to allow ptrace access for the vsyscall page. This only 1001 * covers the 64bit vsyscall page now. 32bit has a real VMA now and does 1002 * not need special handling anymore: 1003 */ 1004static struct vm_area_struct gate_vma = { 1005 .vm_start = VSYSCALL_START, 1006 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE), 1007 .vm_page_prot = PAGE_READONLY_EXEC, 1008 .vm_flags = VM_READ | VM_EXEC 1009}; 1010 1011struct vm_area_struct *get_gate_vma(struct task_struct *tsk) 1012{ 1013#ifdef CONFIG_IA32_EMULATION 1014 if (test_tsk_thread_flag(tsk, TIF_IA32)) 1015 return NULL; 1016#endif 1017 return &gate_vma; 1018} 1019 1020int in_gate_area(struct task_struct *task, unsigned long addr) 1021{ 1022 struct vm_area_struct *vma = get_gate_vma(task); 1023 1024 if (!vma) 1025 return 0; 1026 1027 return (addr >= vma->vm_start) && (addr < vma->vm_end); 1028} 1029 1030/* 1031 * Use this when you have no reliable task/vma, typically from interrupt 1032 * context. It is less reliable than using the task's vma and may give 1033 * false positives: 1034 */ 1035int in_gate_area_no_task(unsigned long addr) 1036{ 1037 return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END); 1038} 1039 1040const char *arch_vma_name(struct vm_area_struct *vma) 1041{ 1042 if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) 1043 return "[vdso]"; 1044 if (vma == &gate_vma) 1045 return "[vsyscall]"; 1046 return NULL; 1047} 1048 1049#ifdef CONFIG_SPARSEMEM_VMEMMAP 1050/* 1051 * Initialise the sparsemem vmemmap using huge-pages at the PMD level. 1052 */ 1053static long __meminitdata addr_start, addr_end; 1054static void __meminitdata *p_start, *p_end; 1055static int __meminitdata node_start; 1056 1057int __meminit 1058vmemmap_populate(struct page *start_page, unsigned long size, int node) 1059{ 1060 unsigned long addr = (unsigned long)start_page; 1061 unsigned long end = (unsigned long)(start_page + size); 1062 unsigned long next; 1063 pgd_t *pgd; 1064 pud_t *pud; 1065 pmd_t *pmd; 1066 1067 for (; addr < end; addr = next) { 1068 void *p = NULL; 1069 1070 pgd = vmemmap_pgd_populate(addr, node); 1071 if (!pgd) 1072 return -ENOMEM; 1073 1074 pud = vmemmap_pud_populate(pgd, addr, node); 1075 if (!pud) 1076 return -ENOMEM; 1077 1078 if (!cpu_has_pse) { 1079 next = (addr + PAGE_SIZE) & PAGE_MASK; 1080 pmd = vmemmap_pmd_populate(pud, addr, node); 1081 1082 if (!pmd) 1083 return -ENOMEM; 1084 1085 p = vmemmap_pte_populate(pmd, addr, node); 1086 1087 if (!p) 1088 return -ENOMEM; 1089 1090 addr_end = addr + PAGE_SIZE; 1091 p_end = p + PAGE_SIZE; 1092 } else { 1093 next = pmd_addr_end(addr, end); 1094 1095 pmd = pmd_offset(pud, addr); 1096 if (pmd_none(*pmd)) { 1097 pte_t entry; 1098 1099 p = vmemmap_alloc_block(PMD_SIZE, node); 1100 if (!p) 1101 return -ENOMEM; 1102 1103 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, 1104 PAGE_KERNEL_LARGE); 1105 set_pmd(pmd, __pmd(pte_val(entry))); 1106 1107 /* check to see if we have contiguous blocks */ 1108 if (p_end != p || node_start != node) { 1109 if (p_start) 1110 printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n", 1111 addr_start, addr_end-1, p_start, p_end-1, node_start); 1112 addr_start = addr; 1113 node_start = node; 1114 p_start = p; 1115 } 1116 1117 addr_end = addr + PMD_SIZE; 1118 p_end = p + PMD_SIZE; 1119 } else 1120 vmemmap_verify((pte_t *)pmd, node, addr, next); 1121 } 1122 1123 } 1124 return 0; 1125} 1126 1127void __meminit vmemmap_populate_print_last(void) 1128{ 1129 if (p_start) { 1130 printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n", 1131 addr_start, addr_end-1, p_start, p_end-1, node_start); 1132 p_start = NULL; 1133 p_end = NULL; 1134 node_start = 0; 1135 } 1136} 1137#endif 1138