init.c revision 1c395176962176660bb108f90e97e1686cfe0d85
1/* 2 * Copyright (C) 1995 Linus Torvalds 3 * Copyright 2010 Tilera Corporation. All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation, version 2. 8 * 9 * This program is distributed in the hope that it will be useful, but 10 * WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 12 * NON INFRINGEMENT. See the GNU General Public License for 13 * more details. 14 */ 15 16#include <linux/module.h> 17#include <linux/signal.h> 18#include <linux/sched.h> 19#include <linux/kernel.h> 20#include <linux/errno.h> 21#include <linux/string.h> 22#include <linux/types.h> 23#include <linux/ptrace.h> 24#include <linux/mman.h> 25#include <linux/mm.h> 26#include <linux/hugetlb.h> 27#include <linux/swap.h> 28#include <linux/smp.h> 29#include <linux/init.h> 30#include <linux/highmem.h> 31#include <linux/pagemap.h> 32#include <linux/poison.h> 33#include <linux/bootmem.h> 34#include <linux/slab.h> 35#include <linux/proc_fs.h> 36#include <linux/efi.h> 37#include <linux/memory_hotplug.h> 38#include <linux/uaccess.h> 39#include <asm/mmu_context.h> 40#include <asm/processor.h> 41#include <asm/system.h> 42#include <asm/pgtable.h> 43#include <asm/pgalloc.h> 44#include <asm/dma.h> 45#include <asm/fixmap.h> 46#include <asm/tlb.h> 47#include <asm/tlbflush.h> 48#include <asm/sections.h> 49#include <asm/setup.h> 50#include <asm/homecache.h> 51#include <hv/hypervisor.h> 52#include <arch/chip.h> 53 54#include "migrate.h" 55 56#define clear_pgd(pmdptr) (*(pmdptr) = hv_pte(0)) 57 58#ifndef __tilegx__ 59unsigned long VMALLOC_RESERVE = CONFIG_VMALLOC_RESERVE; 60EXPORT_SYMBOL(VMALLOC_RESERVE); 61#endif 62 63/* Create an L2 page table */ 64static pte_t * __init alloc_pte(void) 65{ 66 return __alloc_bootmem(L2_KERNEL_PGTABLE_SIZE, HV_PAGE_TABLE_ALIGN, 0); 67} 68 69/* 70 * L2 page tables per controller. We allocate these all at once from 71 * the bootmem allocator and store them here. This saves on kernel L2 72 * page table memory, compared to allocating a full 64K page per L2 73 * page table, and also means that in cases where we use huge pages, 74 * we are guaranteed to later be able to shatter those huge pages and 75 * switch to using these page tables instead, without requiring 76 * further allocation. Each l2_ptes[] entry points to the first page 77 * table for the first hugepage-size piece of memory on the 78 * controller; other page tables are just indexed directly, i.e. the 79 * L2 page tables are contiguous in memory for each controller. 80 */ 81static pte_t *l2_ptes[MAX_NUMNODES]; 82static int num_l2_ptes[MAX_NUMNODES]; 83 84static void init_prealloc_ptes(int node, int pages) 85{ 86 BUG_ON(pages & (HV_L2_ENTRIES-1)); 87 if (pages) { 88 num_l2_ptes[node] = pages; 89 l2_ptes[node] = __alloc_bootmem(pages * sizeof(pte_t), 90 HV_PAGE_TABLE_ALIGN, 0); 91 } 92} 93 94pte_t *get_prealloc_pte(unsigned long pfn) 95{ 96 int node = pfn_to_nid(pfn); 97 pfn &= ~(-1UL << (NR_PA_HIGHBIT_SHIFT - PAGE_SHIFT)); 98 BUG_ON(node >= MAX_NUMNODES); 99 BUG_ON(pfn >= num_l2_ptes[node]); 100 return &l2_ptes[node][pfn]; 101} 102 103/* 104 * What caching do we expect pages from the heap to have when 105 * they are allocated during bootup? (Once we've installed the 106 * "real" swapper_pg_dir.) 107 */ 108static int initial_heap_home(void) 109{ 110#if CHIP_HAS_CBOX_HOME_MAP() 111 if (hash_default) 112 return PAGE_HOME_HASH; 113#endif 114 return smp_processor_id(); 115} 116 117/* 118 * Place a pointer to an L2 page table in a middle page 119 * directory entry. 120 */ 121static void __init assign_pte(pmd_t *pmd, pte_t *page_table) 122{ 123 phys_addr_t pa = __pa(page_table); 124 unsigned long l2_ptfn = pa >> HV_LOG2_PAGE_TABLE_ALIGN; 125 pte_t pteval = hv_pte_set_ptfn(__pgprot(_PAGE_TABLE), l2_ptfn); 126 BUG_ON((pa & (HV_PAGE_TABLE_ALIGN-1)) != 0); 127 pteval = pte_set_home(pteval, initial_heap_home()); 128 *(pte_t *)pmd = pteval; 129 if (page_table != (pte_t *)pmd_page_vaddr(*pmd)) 130 BUG(); 131} 132 133#ifdef __tilegx__ 134 135#if HV_L1_SIZE != HV_L2_SIZE 136# error Rework assumption that L1 and L2 page tables are same size. 137#endif 138 139/* Since pmd_t arrays and pte_t arrays are the same size, just use casts. */ 140static inline pmd_t *alloc_pmd(void) 141{ 142 return (pmd_t *)alloc_pte(); 143} 144 145static inline void assign_pmd(pud_t *pud, pmd_t *pmd) 146{ 147 assign_pte((pmd_t *)pud, (pte_t *)pmd); 148} 149 150#endif /* __tilegx__ */ 151 152/* Replace the given pmd with a full PTE table. */ 153void __init shatter_pmd(pmd_t *pmd) 154{ 155 pte_t *pte = get_prealloc_pte(pte_pfn(*(pte_t *)pmd)); 156 assign_pte(pmd, pte); 157} 158 159#ifdef CONFIG_HIGHMEM 160/* 161 * This function initializes a certain range of kernel virtual memory 162 * with new bootmem page tables, everywhere page tables are missing in 163 * the given range. 164 */ 165 166/* 167 * NOTE: The pagetables are allocated contiguous on the physical space 168 * so we can cache the place of the first one and move around without 169 * checking the pgd every time. 170 */ 171static void __init page_table_range_init(unsigned long start, 172 unsigned long end, pgd_t *pgd_base) 173{ 174 pgd_t *pgd; 175 int pgd_idx; 176 unsigned long vaddr; 177 178 vaddr = start; 179 pgd_idx = pgd_index(vaddr); 180 pgd = pgd_base + pgd_idx; 181 182 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) { 183 pmd_t *pmd = pmd_offset(pud_offset(pgd, vaddr), vaddr); 184 if (pmd_none(*pmd)) 185 assign_pte(pmd, alloc_pte()); 186 vaddr += PMD_SIZE; 187 } 188} 189#endif /* CONFIG_HIGHMEM */ 190 191 192#if CHIP_HAS_CBOX_HOME_MAP() 193 194static int __initdata ktext_hash = 1; /* .text pages */ 195static int __initdata kdata_hash = 1; /* .data and .bss pages */ 196int __write_once hash_default = 1; /* kernel allocator pages */ 197EXPORT_SYMBOL(hash_default); 198int __write_once kstack_hash = 1; /* if no homecaching, use h4h */ 199#endif /* CHIP_HAS_CBOX_HOME_MAP */ 200 201/* 202 * CPUs to use to for striping the pages of kernel data. If hash-for-home 203 * is available, this is only relevant if kcache_hash sets up the 204 * .data and .bss to be page-homed, and we don't want the default mode 205 * of using the full set of kernel cpus for the striping. 206 */ 207static __initdata struct cpumask kdata_mask; 208static __initdata int kdata_arg_seen; 209 210int __write_once kdata_huge; /* if no homecaching, small pages */ 211 212 213/* Combine a generic pgprot_t with cache home to get a cache-aware pgprot. */ 214static pgprot_t __init construct_pgprot(pgprot_t prot, int home) 215{ 216 prot = pte_set_home(prot, home); 217#if CHIP_HAS_CBOX_HOME_MAP() 218 if (home == PAGE_HOME_IMMUTABLE) { 219 if (ktext_hash) 220 prot = hv_pte_set_mode(prot, HV_PTE_MODE_CACHE_HASH_L3); 221 else 222 prot = hv_pte_set_mode(prot, HV_PTE_MODE_CACHE_NO_L3); 223 } 224#endif 225 return prot; 226} 227 228/* 229 * For a given kernel data VA, how should it be cached? 230 * We return the complete pgprot_t with caching bits set. 231 */ 232static pgprot_t __init init_pgprot(ulong address) 233{ 234 int cpu; 235 unsigned long page; 236 enum { CODE_DELTA = MEM_SV_INTRPT - PAGE_OFFSET }; 237 238#if CHIP_HAS_CBOX_HOME_MAP() 239 /* For kdata=huge, everything is just hash-for-home. */ 240 if (kdata_huge) 241 return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH); 242#endif 243 244 /* We map the aliased pages of permanent text inaccessible. */ 245 if (address < (ulong) _sinittext - CODE_DELTA) 246 return PAGE_NONE; 247 248 /* 249 * We map read-only data non-coherent for performance. We could 250 * use neighborhood caching on TILE64, but it's not clear it's a win. 251 */ 252 if ((address >= (ulong) __start_rodata && 253 address < (ulong) __end_rodata) || 254 address == (ulong) empty_zero_page) { 255 return construct_pgprot(PAGE_KERNEL_RO, PAGE_HOME_IMMUTABLE); 256 } 257 258 /* As a performance optimization, keep the boot init stack here. */ 259 if (address >= (ulong)&init_thread_union && 260 address < (ulong)&init_thread_union + THREAD_SIZE) 261 return construct_pgprot(PAGE_KERNEL, smp_processor_id()); 262 263#ifndef __tilegx__ 264#if !ATOMIC_LOCKS_FOUND_VIA_TABLE() 265 /* Force the atomic_locks[] array page to be hash-for-home. */ 266 if (address == (ulong) atomic_locks) 267 return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH); 268#endif 269#endif 270 271 /* 272 * Everything else that isn't data or bss is heap, so mark it 273 * with the initial heap home (hash-for-home, or this cpu). This 274 * includes any addresses after the loaded image and any address before 275 * _einitdata, since we already captured the case of text before 276 * _sinittext, and __pa(einittext) is approximately __pa(sinitdata). 277 * 278 * All the LOWMEM pages that we mark this way will get their 279 * struct page homecache properly marked later, in set_page_homes(). 280 * The HIGHMEM pages we leave with a default zero for their 281 * homes, but with a zero free_time we don't have to actually 282 * do a flush action the first time we use them, either. 283 */ 284 if (address >= (ulong) _end || address < (ulong) _einitdata) 285 return construct_pgprot(PAGE_KERNEL, initial_heap_home()); 286 287#if CHIP_HAS_CBOX_HOME_MAP() 288 /* Use hash-for-home if requested for data/bss. */ 289 if (kdata_hash) 290 return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH); 291#endif 292 293 /* 294 * Make the w1data homed like heap to start with, to avoid 295 * making it part of the page-striped data area when we're just 296 * going to convert it to read-only soon anyway. 297 */ 298 if (address >= (ulong)__w1data_begin && address < (ulong)__w1data_end) 299 return construct_pgprot(PAGE_KERNEL, initial_heap_home()); 300 301 /* 302 * Otherwise we just hand out consecutive cpus. To avoid 303 * requiring this function to hold state, we just walk forward from 304 * _sdata by PAGE_SIZE, skipping the readonly and init data, to reach 305 * the requested address, while walking cpu home around kdata_mask. 306 * This is typically no more than a dozen or so iterations. 307 */ 308 page = (((ulong)__w1data_end) + PAGE_SIZE - 1) & PAGE_MASK; 309 BUG_ON(address < page || address >= (ulong)_end); 310 cpu = cpumask_first(&kdata_mask); 311 for (; page < address; page += PAGE_SIZE) { 312 if (page >= (ulong)&init_thread_union && 313 page < (ulong)&init_thread_union + THREAD_SIZE) 314 continue; 315 if (page == (ulong)empty_zero_page) 316 continue; 317#ifndef __tilegx__ 318#if !ATOMIC_LOCKS_FOUND_VIA_TABLE() 319 if (page == (ulong)atomic_locks) 320 continue; 321#endif 322#endif 323 cpu = cpumask_next(cpu, &kdata_mask); 324 if (cpu == NR_CPUS) 325 cpu = cpumask_first(&kdata_mask); 326 } 327 return construct_pgprot(PAGE_KERNEL, cpu); 328} 329 330/* 331 * This function sets up how we cache the kernel text. If we have 332 * hash-for-home support, normally that is used instead (see the 333 * kcache_hash boot flag for more information). But if we end up 334 * using a page-based caching technique, this option sets up the 335 * details of that. In addition, the "ktext=nocache" option may 336 * always be used to disable local caching of text pages, if desired. 337 */ 338 339static int __initdata ktext_arg_seen; 340static int __initdata ktext_small; 341static int __initdata ktext_local; 342static int __initdata ktext_all; 343static int __initdata ktext_nondataplane; 344static int __initdata ktext_nocache; 345static struct cpumask __initdata ktext_mask; 346 347static int __init setup_ktext(char *str) 348{ 349 if (str == NULL) 350 return -EINVAL; 351 352 /* If you have a leading "nocache", turn off ktext caching */ 353 if (strncmp(str, "nocache", 7) == 0) { 354 ktext_nocache = 1; 355 pr_info("ktext: disabling local caching of kernel text\n"); 356 str += 7; 357 if (*str == ',') 358 ++str; 359 if (*str == '\0') 360 return 0; 361 } 362 363 ktext_arg_seen = 1; 364 365 /* Default setting on Tile64: use a huge page */ 366 if (strcmp(str, "huge") == 0) 367 pr_info("ktext: using one huge locally cached page\n"); 368 369 /* Pay TLB cost but get no cache benefit: cache small pages locally */ 370 else if (strcmp(str, "local") == 0) { 371 ktext_small = 1; 372 ktext_local = 1; 373 pr_info("ktext: using small pages with local caching\n"); 374 } 375 376 /* Neighborhood cache ktext pages on all cpus. */ 377 else if (strcmp(str, "all") == 0) { 378 ktext_small = 1; 379 ktext_all = 1; 380 pr_info("ktext: using maximal caching neighborhood\n"); 381 } 382 383 384 /* Neighborhood ktext pages on specified mask */ 385 else if (cpulist_parse(str, &ktext_mask) == 0) { 386 char buf[NR_CPUS * 5]; 387 cpulist_scnprintf(buf, sizeof(buf), &ktext_mask); 388 if (cpumask_weight(&ktext_mask) > 1) { 389 ktext_small = 1; 390 pr_info("ktext: using caching neighborhood %s " 391 "with small pages\n", buf); 392 } else { 393 pr_info("ktext: caching on cpu %s with one huge page\n", 394 buf); 395 } 396 } 397 398 else if (*str) 399 return -EINVAL; 400 401 return 0; 402} 403 404early_param("ktext", setup_ktext); 405 406 407static inline pgprot_t ktext_set_nocache(pgprot_t prot) 408{ 409 if (!ktext_nocache) 410 prot = hv_pte_set_nc(prot); 411#if CHIP_HAS_NC_AND_NOALLOC_BITS() 412 else 413 prot = hv_pte_set_no_alloc_l2(prot); 414#endif 415 return prot; 416} 417 418#ifndef __tilegx__ 419static pmd_t *__init get_pmd(pgd_t pgtables[], unsigned long va) 420{ 421 return pmd_offset(pud_offset(&pgtables[pgd_index(va)], va), va); 422} 423#else 424static pmd_t *__init get_pmd(pgd_t pgtables[], unsigned long va) 425{ 426 pud_t *pud = pud_offset(&pgtables[pgd_index(va)], va); 427 if (pud_none(*pud)) 428 assign_pmd(pud, alloc_pmd()); 429 return pmd_offset(pud, va); 430} 431#endif 432 433/* Temporary page table we use for staging. */ 434static pgd_t pgtables[PTRS_PER_PGD] 435 __attribute__((aligned(HV_PAGE_TABLE_ALIGN))); 436 437/* 438 * This maps the physical memory to kernel virtual address space, a total 439 * of max_low_pfn pages, by creating page tables starting from address 440 * PAGE_OFFSET. 441 * 442 * This routine transitions us from using a set of compiled-in large 443 * pages to using some more precise caching, including removing access 444 * to code pages mapped at PAGE_OFFSET (executed only at MEM_SV_START) 445 * marking read-only data as locally cacheable, striping the remaining 446 * .data and .bss across all the available tiles, and removing access 447 * to pages above the top of RAM (thus ensuring a page fault from a bad 448 * virtual address rather than a hypervisor shoot down for accessing 449 * memory outside the assigned limits). 450 */ 451static void __init kernel_physical_mapping_init(pgd_t *pgd_base) 452{ 453 unsigned long address, pfn; 454 pmd_t *pmd; 455 pte_t *pte; 456 int pte_ofs; 457 const struct cpumask *my_cpu_mask = cpumask_of(smp_processor_id()); 458 struct cpumask kstripe_mask; 459 int rc, i; 460 461#if CHIP_HAS_CBOX_HOME_MAP() 462 if (ktext_arg_seen && ktext_hash) { 463 pr_warning("warning: \"ktext\" boot argument ignored" 464 " if \"kcache_hash\" sets up text hash-for-home\n"); 465 ktext_small = 0; 466 } 467 468 if (kdata_arg_seen && kdata_hash) { 469 pr_warning("warning: \"kdata\" boot argument ignored" 470 " if \"kcache_hash\" sets up data hash-for-home\n"); 471 } 472 473 if (kdata_huge && !hash_default) { 474 pr_warning("warning: disabling \"kdata=huge\"; requires" 475 " kcache_hash=all or =allbutstack\n"); 476 kdata_huge = 0; 477 } 478#endif 479 480 /* 481 * Set up a mask for cpus to use for kernel striping. 482 * This is normally all cpus, but minus dataplane cpus if any. 483 * If the dataplane covers the whole chip, we stripe over 484 * the whole chip too. 485 */ 486 cpumask_copy(&kstripe_mask, cpu_possible_mask); 487 if (!kdata_arg_seen) 488 kdata_mask = kstripe_mask; 489 490 /* Allocate and fill in L2 page tables */ 491 for (i = 0; i < MAX_NUMNODES; ++i) { 492#ifdef CONFIG_HIGHMEM 493 unsigned long end_pfn = node_lowmem_end_pfn[i]; 494#else 495 unsigned long end_pfn = node_end_pfn[i]; 496#endif 497 unsigned long end_huge_pfn = 0; 498 499 /* Pre-shatter the last huge page to allow per-cpu pages. */ 500 if (kdata_huge) 501 end_huge_pfn = end_pfn - (HPAGE_SIZE >> PAGE_SHIFT); 502 503 pfn = node_start_pfn[i]; 504 505 /* Allocate enough memory to hold L2 page tables for node. */ 506 init_prealloc_ptes(i, end_pfn - pfn); 507 508 address = (unsigned long) pfn_to_kaddr(pfn); 509 while (pfn < end_pfn) { 510 BUG_ON(address & (HPAGE_SIZE-1)); 511 pmd = get_pmd(pgtables, address); 512 pte = get_prealloc_pte(pfn); 513 if (pfn < end_huge_pfn) { 514 pgprot_t prot = init_pgprot(address); 515 *(pte_t *)pmd = pte_mkhuge(pfn_pte(pfn, prot)); 516 for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE; 517 pfn++, pte_ofs++, address += PAGE_SIZE) 518 pte[pte_ofs] = pfn_pte(pfn, prot); 519 } else { 520 if (kdata_huge) 521 printk(KERN_DEBUG "pre-shattered huge" 522 " page at %#lx\n", address); 523 for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE; 524 pfn++, pte_ofs++, address += PAGE_SIZE) { 525 pgprot_t prot = init_pgprot(address); 526 pte[pte_ofs] = pfn_pte(pfn, prot); 527 } 528 assign_pte(pmd, pte); 529 } 530 } 531 } 532 533 /* 534 * Set or check ktext_map now that we have cpu_possible_mask 535 * and kstripe_mask to work with. 536 */ 537 if (ktext_all) 538 cpumask_copy(&ktext_mask, cpu_possible_mask); 539 else if (ktext_nondataplane) 540 ktext_mask = kstripe_mask; 541 else if (!cpumask_empty(&ktext_mask)) { 542 /* Sanity-check any mask that was requested */ 543 struct cpumask bad; 544 cpumask_andnot(&bad, &ktext_mask, cpu_possible_mask); 545 cpumask_and(&ktext_mask, &ktext_mask, cpu_possible_mask); 546 if (!cpumask_empty(&bad)) { 547 char buf[NR_CPUS * 5]; 548 cpulist_scnprintf(buf, sizeof(buf), &bad); 549 pr_info("ktext: not using unavailable cpus %s\n", buf); 550 } 551 if (cpumask_empty(&ktext_mask)) { 552 pr_warning("ktext: no valid cpus; caching on %d.\n", 553 smp_processor_id()); 554 cpumask_copy(&ktext_mask, 555 cpumask_of(smp_processor_id())); 556 } 557 } 558 559 address = MEM_SV_INTRPT; 560 pmd = get_pmd(pgtables, address); 561 if (ktext_small) { 562 /* Allocate an L2 PTE for the kernel text */ 563 int cpu = 0; 564 pgprot_t prot = construct_pgprot(PAGE_KERNEL_EXEC, 565 PAGE_HOME_IMMUTABLE); 566 567 if (ktext_local) { 568 if (ktext_nocache) 569 prot = hv_pte_set_mode(prot, 570 HV_PTE_MODE_UNCACHED); 571 else 572 prot = hv_pte_set_mode(prot, 573 HV_PTE_MODE_CACHE_NO_L3); 574 } else { 575 prot = hv_pte_set_mode(prot, 576 HV_PTE_MODE_CACHE_TILE_L3); 577 cpu = cpumask_first(&ktext_mask); 578 579 prot = ktext_set_nocache(prot); 580 } 581 582 BUG_ON(address != (unsigned long)_stext); 583 pfn = 0; /* code starts at PA 0 */ 584 pte = alloc_pte(); 585 for (pte_ofs = 0; address < (unsigned long)_einittext; 586 pfn++, pte_ofs++, address += PAGE_SIZE) { 587 if (!ktext_local) { 588 prot = set_remote_cache_cpu(prot, cpu); 589 cpu = cpumask_next(cpu, &ktext_mask); 590 if (cpu == NR_CPUS) 591 cpu = cpumask_first(&ktext_mask); 592 } 593 pte[pte_ofs] = pfn_pte(pfn, prot); 594 } 595 assign_pte(pmd, pte); 596 } else { 597 pte_t pteval = pfn_pte(0, PAGE_KERNEL_EXEC); 598 pteval = pte_mkhuge(pteval); 599#if CHIP_HAS_CBOX_HOME_MAP() 600 if (ktext_hash) { 601 pteval = hv_pte_set_mode(pteval, 602 HV_PTE_MODE_CACHE_HASH_L3); 603 pteval = ktext_set_nocache(pteval); 604 } else 605#endif /* CHIP_HAS_CBOX_HOME_MAP() */ 606 if (cpumask_weight(&ktext_mask) == 1) { 607 pteval = set_remote_cache_cpu(pteval, 608 cpumask_first(&ktext_mask)); 609 pteval = hv_pte_set_mode(pteval, 610 HV_PTE_MODE_CACHE_TILE_L3); 611 pteval = ktext_set_nocache(pteval); 612 } else if (ktext_nocache) 613 pteval = hv_pte_set_mode(pteval, 614 HV_PTE_MODE_UNCACHED); 615 else 616 pteval = hv_pte_set_mode(pteval, 617 HV_PTE_MODE_CACHE_NO_L3); 618 *(pte_t *)pmd = pteval; 619 } 620 621 /* Set swapper_pgprot here so it is flushed to memory right away. */ 622 swapper_pgprot = init_pgprot((unsigned long)swapper_pg_dir); 623 624 /* 625 * Since we may be changing the caching of the stack and page 626 * table itself, we invoke an assembly helper to do the 627 * following steps: 628 * 629 * - flush the cache so we start with an empty slate 630 * - install pgtables[] as the real page table 631 * - flush the TLB so the new page table takes effect 632 */ 633 rc = flush_and_install_context(__pa(pgtables), 634 init_pgprot((unsigned long)pgtables), 635 __get_cpu_var(current_asid), 636 cpumask_bits(my_cpu_mask)); 637 BUG_ON(rc != 0); 638 639 /* Copy the page table back to the normal swapper_pg_dir. */ 640 memcpy(pgd_base, pgtables, sizeof(pgtables)); 641 __install_page_table(pgd_base, __get_cpu_var(current_asid), 642 swapper_pgprot); 643 644 /* 645 * We just read swapper_pgprot and thus brought it into the cache, 646 * with its new home & caching mode. When we start the other CPUs, 647 * they're going to reference swapper_pgprot via their initial fake 648 * VA-is-PA mappings, which cache everything locally. At that 649 * time, if it's in our cache with a conflicting home, the 650 * simulator's coherence checker will complain. So, flush it out 651 * of our cache; we're not going to ever use it again anyway. 652 */ 653 __insn_finv(&swapper_pgprot); 654} 655 656/* 657 * devmem_is_allowed() checks to see if /dev/mem access to a certain address 658 * is valid. The argument is a physical page number. 659 * 660 * On Tile, the only valid things for which we can just hand out unchecked 661 * PTEs are the kernel code and data. Anything else might change its 662 * homing with time, and we wouldn't know to adjust the /dev/mem PTEs. 663 * Note that init_thread_union is released to heap soon after boot, 664 * so we include it in the init data. 665 * 666 * For TILE-Gx, we might want to consider allowing access to PA 667 * regions corresponding to PCI space, etc. 668 */ 669int devmem_is_allowed(unsigned long pagenr) 670{ 671 return pagenr < kaddr_to_pfn(_end) && 672 !(pagenr >= kaddr_to_pfn(&init_thread_union) || 673 pagenr < kaddr_to_pfn(_einitdata)) && 674 !(pagenr >= kaddr_to_pfn(_sinittext) || 675 pagenr <= kaddr_to_pfn(_einittext-1)); 676} 677 678#ifdef CONFIG_HIGHMEM 679static void __init permanent_kmaps_init(pgd_t *pgd_base) 680{ 681 pgd_t *pgd; 682 pud_t *pud; 683 pmd_t *pmd; 684 pte_t *pte; 685 unsigned long vaddr; 686 687 vaddr = PKMAP_BASE; 688 page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); 689 690 pgd = swapper_pg_dir + pgd_index(vaddr); 691 pud = pud_offset(pgd, vaddr); 692 pmd = pmd_offset(pud, vaddr); 693 pte = pte_offset_kernel(pmd, vaddr); 694 pkmap_page_table = pte; 695} 696#endif /* CONFIG_HIGHMEM */ 697 698 699static void __init init_free_pfn_range(unsigned long start, unsigned long end) 700{ 701 unsigned long pfn; 702 struct page *page = pfn_to_page(start); 703 704 for (pfn = start; pfn < end; ) { 705 /* Optimize by freeing pages in large batches */ 706 int order = __ffs(pfn); 707 int count, i; 708 struct page *p; 709 710 if (order >= MAX_ORDER) 711 order = MAX_ORDER-1; 712 count = 1 << order; 713 while (pfn + count > end) { 714 count >>= 1; 715 --order; 716 } 717 for (p = page, i = 0; i < count; ++i, ++p) { 718 __ClearPageReserved(p); 719 /* 720 * Hacky direct set to avoid unnecessary 721 * lock take/release for EVERY page here. 722 */ 723 p->_count.counter = 0; 724 p->_mapcount.counter = -1; 725 } 726 init_page_count(page); 727 __free_pages(page, order); 728 totalram_pages += count; 729 730 page += count; 731 pfn += count; 732 } 733} 734 735static void __init set_non_bootmem_pages_init(void) 736{ 737 struct zone *z; 738 for_each_zone(z) { 739 unsigned long start, end; 740 int nid = z->zone_pgdat->node_id; 741 int idx = zone_idx(z); 742 743 start = z->zone_start_pfn; 744 if (start == 0) 745 continue; /* bootmem */ 746 end = start + z->spanned_pages; 747 if (idx == ZONE_NORMAL) { 748 BUG_ON(start != node_start_pfn[nid]); 749 start = node_free_pfn[nid]; 750 } 751#ifdef CONFIG_HIGHMEM 752 if (idx == ZONE_HIGHMEM) 753 totalhigh_pages += z->spanned_pages; 754#endif 755 if (kdata_huge) { 756 unsigned long percpu_pfn = node_percpu_pfn[nid]; 757 if (start < percpu_pfn && end > percpu_pfn) 758 end = percpu_pfn; 759 } 760#ifdef CONFIG_PCI 761 if (start <= pci_reserve_start_pfn && 762 end > pci_reserve_start_pfn) { 763 if (end > pci_reserve_end_pfn) 764 init_free_pfn_range(pci_reserve_end_pfn, end); 765 end = pci_reserve_start_pfn; 766 } 767#endif 768 init_free_pfn_range(start, end); 769 } 770} 771 772/* 773 * paging_init() sets up the page tables - note that all of lowmem is 774 * already mapped by head.S. 775 */ 776void __init paging_init(void) 777{ 778#ifdef CONFIG_HIGHMEM 779 unsigned long vaddr, end; 780#endif 781#ifdef __tilegx__ 782 pud_t *pud; 783#endif 784 pgd_t *pgd_base = swapper_pg_dir; 785 786 kernel_physical_mapping_init(pgd_base); 787 788#ifdef CONFIG_HIGHMEM 789 /* 790 * Fixed mappings, only the page table structure has to be 791 * created - mappings will be set by set_fixmap(): 792 */ 793 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; 794 end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; 795 page_table_range_init(vaddr, end, pgd_base); 796 permanent_kmaps_init(pgd_base); 797#endif 798 799#ifdef __tilegx__ 800 /* 801 * Since GX allocates just one pmd_t array worth of vmalloc space, 802 * we go ahead and allocate it statically here, then share it 803 * globally. As a result we don't have to worry about any task 804 * changing init_mm once we get up and running, and there's no 805 * need for e.g. vmalloc_sync_all(). 806 */ 807 BUILD_BUG_ON(pgd_index(VMALLOC_START) != pgd_index(VMALLOC_END)); 808 pud = pud_offset(pgd_base + pgd_index(VMALLOC_START), VMALLOC_START); 809 assign_pmd(pud, alloc_pmd()); 810#endif 811} 812 813 814/* 815 * Walk the kernel page tables and derive the page_home() from 816 * the PTEs, so that set_pte() can properly validate the caching 817 * of all PTEs it sees. 818 */ 819void __init set_page_homes(void) 820{ 821} 822 823static void __init set_max_mapnr_init(void) 824{ 825#ifdef CONFIG_FLATMEM 826 max_mapnr = max_low_pfn; 827#endif 828} 829 830void __init mem_init(void) 831{ 832 int codesize, datasize, initsize; 833 int i; 834#ifndef __tilegx__ 835 void *last; 836#endif 837 838#ifdef CONFIG_FLATMEM 839 if (!mem_map) 840 BUG(); 841#endif 842 843#ifdef CONFIG_HIGHMEM 844 /* check that fixmap and pkmap do not overlap */ 845 if (PKMAP_ADDR(LAST_PKMAP-1) >= FIXADDR_START) { 846 pr_err("fixmap and kmap areas overlap" 847 " - this will crash\n"); 848 pr_err("pkstart: %lxh pkend: %lxh fixstart %lxh\n", 849 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP-1), 850 FIXADDR_START); 851 BUG(); 852 } 853#endif 854 855 set_max_mapnr_init(); 856 857 /* this will put all bootmem onto the freelists */ 858 totalram_pages += free_all_bootmem(); 859 860 /* count all remaining LOWMEM and give all HIGHMEM to page allocator */ 861 set_non_bootmem_pages_init(); 862 863 codesize = (unsigned long)&_etext - (unsigned long)&_text; 864 datasize = (unsigned long)&_end - (unsigned long)&_sdata; 865 initsize = (unsigned long)&_einittext - (unsigned long)&_sinittext; 866 initsize += (unsigned long)&_einitdata - (unsigned long)&_sinitdata; 867 868 pr_info("Memory: %luk/%luk available (%dk kernel code, %dk data, %dk init, %ldk highmem)\n", 869 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), 870 num_physpages << (PAGE_SHIFT-10), 871 codesize >> 10, 872 datasize >> 10, 873 initsize >> 10, 874 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)) 875 ); 876 877 /* 878 * In debug mode, dump some interesting memory mappings. 879 */ 880#ifdef CONFIG_HIGHMEM 881 printk(KERN_DEBUG " KMAP %#lx - %#lx\n", 882 FIXADDR_START, FIXADDR_TOP + PAGE_SIZE - 1); 883 printk(KERN_DEBUG " PKMAP %#lx - %#lx\n", 884 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP) - 1); 885#endif 886#ifdef CONFIG_HUGEVMAP 887 printk(KERN_DEBUG " HUGEMAP %#lx - %#lx\n", 888 HUGE_VMAP_BASE, HUGE_VMAP_END - 1); 889#endif 890 printk(KERN_DEBUG " VMALLOC %#lx - %#lx\n", 891 _VMALLOC_START, _VMALLOC_END - 1); 892#ifdef __tilegx__ 893 for (i = MAX_NUMNODES-1; i >= 0; --i) { 894 struct pglist_data *node = &node_data[i]; 895 if (node->node_present_pages) { 896 unsigned long start = (unsigned long) 897 pfn_to_kaddr(node->node_start_pfn); 898 unsigned long end = start + 899 (node->node_present_pages << PAGE_SHIFT); 900 printk(KERN_DEBUG " MEM%d %#lx - %#lx\n", 901 i, start, end - 1); 902 } 903 } 904#else 905 last = high_memory; 906 for (i = MAX_NUMNODES-1; i >= 0; --i) { 907 if ((unsigned long)vbase_map[i] != -1UL) { 908 printk(KERN_DEBUG " LOWMEM%d %#lx - %#lx\n", 909 i, (unsigned long) (vbase_map[i]), 910 (unsigned long) (last-1)); 911 last = vbase_map[i]; 912 } 913 } 914#endif 915 916#ifndef __tilegx__ 917 /* 918 * Convert from using one lock for all atomic operations to 919 * one per cpu. 920 */ 921 __init_atomic_per_cpu(); 922#endif 923} 924 925/* 926 * this is for the non-NUMA, single node SMP system case. 927 * Specifically, in the case of x86, we will always add 928 * memory to the highmem for now. 929 */ 930#ifndef CONFIG_NEED_MULTIPLE_NODES 931int arch_add_memory(u64 start, u64 size) 932{ 933 struct pglist_data *pgdata = &contig_page_data; 934 struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1; 935 unsigned long start_pfn = start >> PAGE_SHIFT; 936 unsigned long nr_pages = size >> PAGE_SHIFT; 937 938 return __add_pages(zone, start_pfn, nr_pages); 939} 940 941int remove_memory(u64 start, u64 size) 942{ 943 return -EINVAL; 944} 945#endif 946 947struct kmem_cache *pgd_cache; 948 949void __init pgtable_cache_init(void) 950{ 951 pgd_cache = kmem_cache_create("pgd", SIZEOF_PGD, SIZEOF_PGD, 0, NULL); 952 if (!pgd_cache) 953 panic("pgtable_cache_init(): Cannot create pgd cache"); 954} 955 956#if !CHIP_HAS_COHERENT_LOCAL_CACHE() 957/* 958 * The __w1data area holds data that is only written during initialization, 959 * and is read-only and thus freely cacheable thereafter. Fix the page 960 * table entries that cover that region accordingly. 961 */ 962static void mark_w1data_ro(void) 963{ 964 /* Loop over page table entries */ 965 unsigned long addr = (unsigned long)__w1data_begin; 966 BUG_ON((addr & (PAGE_SIZE-1)) != 0); 967 for (; addr <= (unsigned long)__w1data_end - 1; addr += PAGE_SIZE) { 968 unsigned long pfn = kaddr_to_pfn((void *)addr); 969 pte_t *ptep = virt_to_pte(NULL, addr); 970 BUG_ON(pte_huge(*ptep)); /* not relevant for kdata_huge */ 971 set_pte_at(&init_mm, addr, ptep, pfn_pte(pfn, PAGE_KERNEL_RO)); 972 } 973} 974#endif 975 976#ifdef CONFIG_DEBUG_PAGEALLOC 977static long __write_once initfree; 978#else 979static long __write_once initfree = 1; 980#endif 981 982/* Select whether to free (1) or mark unusable (0) the __init pages. */ 983static int __init set_initfree(char *str) 984{ 985 long val; 986 if (strict_strtol(str, 0, &val) == 0) { 987 initfree = val; 988 pr_info("initfree: %s free init pages\n", 989 initfree ? "will" : "won't"); 990 } 991 return 1; 992} 993__setup("initfree=", set_initfree); 994 995static void free_init_pages(char *what, unsigned long begin, unsigned long end) 996{ 997 unsigned long addr = (unsigned long) begin; 998 999 if (kdata_huge && !initfree) { 1000 pr_warning("Warning: ignoring initfree=0:" 1001 " incompatible with kdata=huge\n"); 1002 initfree = 1; 1003 } 1004 end = (end + PAGE_SIZE - 1) & PAGE_MASK; 1005 local_flush_tlb_pages(NULL, begin, PAGE_SIZE, end - begin); 1006 for (addr = begin; addr < end; addr += PAGE_SIZE) { 1007 /* 1008 * Note we just reset the home here directly in the 1009 * page table. We know this is safe because our caller 1010 * just flushed the caches on all the other cpus, 1011 * and they won't be touching any of these pages. 1012 */ 1013 int pfn = kaddr_to_pfn((void *)addr); 1014 struct page *page = pfn_to_page(pfn); 1015 pte_t *ptep = virt_to_pte(NULL, addr); 1016 if (!initfree) { 1017 /* 1018 * If debugging page accesses then do not free 1019 * this memory but mark them not present - any 1020 * buggy init-section access will create a 1021 * kernel page fault: 1022 */ 1023 pte_clear(&init_mm, addr, ptep); 1024 continue; 1025 } 1026 __ClearPageReserved(page); 1027 init_page_count(page); 1028 if (pte_huge(*ptep)) 1029 BUG_ON(!kdata_huge); 1030 else 1031 set_pte_at(&init_mm, addr, ptep, 1032 pfn_pte(pfn, PAGE_KERNEL)); 1033 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); 1034 free_page(addr); 1035 totalram_pages++; 1036 } 1037 pr_info("Freeing %s: %ldk freed\n", what, (end - begin) >> 10); 1038} 1039 1040void free_initmem(void) 1041{ 1042 const unsigned long text_delta = MEM_SV_INTRPT - PAGE_OFFSET; 1043 1044 /* 1045 * Evict the dirty initdata on the boot cpu, evict the w1data 1046 * wherever it's homed, and evict all the init code everywhere. 1047 * We are guaranteed that no one will touch the init pages any 1048 * more, and although other cpus may be touching the w1data, 1049 * we only actually change the caching on tile64, which won't 1050 * be keeping local copies in the other tiles' caches anyway. 1051 */ 1052 homecache_evict(&cpu_cacheable_map); 1053 1054 /* Free the data pages that we won't use again after init. */ 1055 free_init_pages("unused kernel data", 1056 (unsigned long)_sinitdata, 1057 (unsigned long)_einitdata); 1058 1059 /* 1060 * Free the pages mapped from 0xc0000000 that correspond to code 1061 * pages from MEM_SV_INTRPT that we won't use again after init. 1062 */ 1063 free_init_pages("unused kernel text", 1064 (unsigned long)_sinittext - text_delta, 1065 (unsigned long)_einittext - text_delta); 1066 1067#if !CHIP_HAS_COHERENT_LOCAL_CACHE() 1068 /* 1069 * Upgrade the .w1data section to globally cached. 1070 * We don't do this on tilepro, since the cache architecture 1071 * pretty much makes it irrelevant, and in any case we end 1072 * up having racing issues with other tiles that may touch 1073 * the data after we flush the cache but before we update 1074 * the PTEs and flush the TLBs, causing sharer shootdowns 1075 * later. Even though this is to clean data, it seems like 1076 * an unnecessary complication. 1077 */ 1078 mark_w1data_ro(); 1079#endif 1080 1081 /* Do a global TLB flush so everyone sees the changes. */ 1082 flush_tlb_all(); 1083} 1084