init.c revision 00dce03134689a257120ae2aa18ba7d1a736bef7
1/* 2 * Copyright (C) 1995 Linus Torvalds 3 * Copyright 2010 Tilera Corporation. All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation, version 2. 8 * 9 * This program is distributed in the hope that it will be useful, but 10 * WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 12 * NON INFRINGEMENT. See the GNU General Public License for 13 * more details. 14 */ 15 16#include <linux/module.h> 17#include <linux/signal.h> 18#include <linux/sched.h> 19#include <linux/kernel.h> 20#include <linux/errno.h> 21#include <linux/string.h> 22#include <linux/types.h> 23#include <linux/ptrace.h> 24#include <linux/mman.h> 25#include <linux/mm.h> 26#include <linux/hugetlb.h> 27#include <linux/swap.h> 28#include <linux/smp.h> 29#include <linux/init.h> 30#include <linux/highmem.h> 31#include <linux/pagemap.h> 32#include <linux/poison.h> 33#include <linux/bootmem.h> 34#include <linux/slab.h> 35#include <linux/proc_fs.h> 36#include <linux/efi.h> 37#include <linux/memory_hotplug.h> 38#include <linux/uaccess.h> 39#include <asm/mmu_context.h> 40#include <asm/processor.h> 41#include <asm/system.h> 42#include <asm/pgtable.h> 43#include <asm/pgalloc.h> 44#include <asm/dma.h> 45#include <asm/fixmap.h> 46#include <asm/tlb.h> 47#include <asm/tlbflush.h> 48#include <asm/sections.h> 49#include <asm/setup.h> 50#include <asm/homecache.h> 51#include <hv/hypervisor.h> 52#include <arch/chip.h> 53 54#include "migrate.h" 55 56/* 57 * We could set FORCE_MAX_ZONEORDER to "(HPAGE_SHIFT - PAGE_SHIFT + 1)" 58 * in the Tile Kconfig, but this generates configure warnings. 59 * Do it here and force people to get it right to compile this file. 60 * The problem is that with 4KB small pages and 16MB huge pages, 61 * the default value doesn't allow us to group enough small pages 62 * together to make up a huge page. 63 */ 64#if CONFIG_FORCE_MAX_ZONEORDER < HPAGE_SHIFT - PAGE_SHIFT + 1 65# error "Change FORCE_MAX_ZONEORDER in arch/tile/Kconfig to match page size" 66#endif 67 68#define clear_pgd(pmdptr) (*(pmdptr) = hv_pte(0)) 69 70#ifndef __tilegx__ 71unsigned long VMALLOC_RESERVE = CONFIG_VMALLOC_RESERVE; 72EXPORT_SYMBOL(VMALLOC_RESERVE); 73#endif 74 75DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 76 77/* Create an L2 page table */ 78static pte_t * __init alloc_pte(void) 79{ 80 return __alloc_bootmem(L2_KERNEL_PGTABLE_SIZE, HV_PAGE_TABLE_ALIGN, 0); 81} 82 83/* 84 * L2 page tables per controller. We allocate these all at once from 85 * the bootmem allocator and store them here. This saves on kernel L2 86 * page table memory, compared to allocating a full 64K page per L2 87 * page table, and also means that in cases where we use huge pages, 88 * we are guaranteed to later be able to shatter those huge pages and 89 * switch to using these page tables instead, without requiring 90 * further allocation. Each l2_ptes[] entry points to the first page 91 * table for the first hugepage-size piece of memory on the 92 * controller; other page tables are just indexed directly, i.e. the 93 * L2 page tables are contiguous in memory for each controller. 94 */ 95static pte_t *l2_ptes[MAX_NUMNODES]; 96static int num_l2_ptes[MAX_NUMNODES]; 97 98static void init_prealloc_ptes(int node, int pages) 99{ 100 BUG_ON(pages & (HV_L2_ENTRIES-1)); 101 if (pages) { 102 num_l2_ptes[node] = pages; 103 l2_ptes[node] = __alloc_bootmem(pages * sizeof(pte_t), 104 HV_PAGE_TABLE_ALIGN, 0); 105 } 106} 107 108pte_t *get_prealloc_pte(unsigned long pfn) 109{ 110 int node = pfn_to_nid(pfn); 111 pfn &= ~(-1UL << (NR_PA_HIGHBIT_SHIFT - PAGE_SHIFT)); 112 BUG_ON(node >= MAX_NUMNODES); 113 BUG_ON(pfn >= num_l2_ptes[node]); 114 return &l2_ptes[node][pfn]; 115} 116 117/* 118 * What caching do we expect pages from the heap to have when 119 * they are allocated during bootup? (Once we've installed the 120 * "real" swapper_pg_dir.) 121 */ 122static int initial_heap_home(void) 123{ 124#if CHIP_HAS_CBOX_HOME_MAP() 125 if (hash_default) 126 return PAGE_HOME_HASH; 127#endif 128 return smp_processor_id(); 129} 130 131/* 132 * Place a pointer to an L2 page table in a middle page 133 * directory entry. 134 */ 135static void __init assign_pte(pmd_t *pmd, pte_t *page_table) 136{ 137 phys_addr_t pa = __pa(page_table); 138 unsigned long l2_ptfn = pa >> HV_LOG2_PAGE_TABLE_ALIGN; 139 pte_t pteval = hv_pte_set_ptfn(__pgprot(_PAGE_TABLE), l2_ptfn); 140 BUG_ON((pa & (HV_PAGE_TABLE_ALIGN-1)) != 0); 141 pteval = pte_set_home(pteval, initial_heap_home()); 142 *(pte_t *)pmd = pteval; 143 if (page_table != (pte_t *)pmd_page_vaddr(*pmd)) 144 BUG(); 145} 146 147#ifdef __tilegx__ 148 149#if HV_L1_SIZE != HV_L2_SIZE 150# error Rework assumption that L1 and L2 page tables are same size. 151#endif 152 153/* Since pmd_t arrays and pte_t arrays are the same size, just use casts. */ 154static inline pmd_t *alloc_pmd(void) 155{ 156 return (pmd_t *)alloc_pte(); 157} 158 159static inline void assign_pmd(pud_t *pud, pmd_t *pmd) 160{ 161 assign_pte((pmd_t *)pud, (pte_t *)pmd); 162} 163 164#endif /* __tilegx__ */ 165 166/* Replace the given pmd with a full PTE table. */ 167void __init shatter_pmd(pmd_t *pmd) 168{ 169 pte_t *pte = get_prealloc_pte(pte_pfn(*(pte_t *)pmd)); 170 assign_pte(pmd, pte); 171} 172 173#ifdef CONFIG_HIGHMEM 174/* 175 * This function initializes a certain range of kernel virtual memory 176 * with new bootmem page tables, everywhere page tables are missing in 177 * the given range. 178 */ 179 180/* 181 * NOTE: The pagetables are allocated contiguous on the physical space 182 * so we can cache the place of the first one and move around without 183 * checking the pgd every time. 184 */ 185static void __init page_table_range_init(unsigned long start, 186 unsigned long end, pgd_t *pgd_base) 187{ 188 pgd_t *pgd; 189 int pgd_idx; 190 unsigned long vaddr; 191 192 vaddr = start; 193 pgd_idx = pgd_index(vaddr); 194 pgd = pgd_base + pgd_idx; 195 196 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) { 197 pmd_t *pmd = pmd_offset(pud_offset(pgd, vaddr), vaddr); 198 if (pmd_none(*pmd)) 199 assign_pte(pmd, alloc_pte()); 200 vaddr += PMD_SIZE; 201 } 202} 203#endif /* CONFIG_HIGHMEM */ 204 205 206#if CHIP_HAS_CBOX_HOME_MAP() 207 208static int __initdata ktext_hash = 1; /* .text pages */ 209static int __initdata kdata_hash = 1; /* .data and .bss pages */ 210int __write_once hash_default = 1; /* kernel allocator pages */ 211EXPORT_SYMBOL(hash_default); 212int __write_once kstack_hash = 1; /* if no homecaching, use h4h */ 213#endif /* CHIP_HAS_CBOX_HOME_MAP */ 214 215/* 216 * CPUs to use to for striping the pages of kernel data. If hash-for-home 217 * is available, this is only relevant if kcache_hash sets up the 218 * .data and .bss to be page-homed, and we don't want the default mode 219 * of using the full set of kernel cpus for the striping. 220 */ 221static __initdata struct cpumask kdata_mask; 222static __initdata int kdata_arg_seen; 223 224int __write_once kdata_huge; /* if no homecaching, small pages */ 225 226 227/* Combine a generic pgprot_t with cache home to get a cache-aware pgprot. */ 228static pgprot_t __init construct_pgprot(pgprot_t prot, int home) 229{ 230 prot = pte_set_home(prot, home); 231#if CHIP_HAS_CBOX_HOME_MAP() 232 if (home == PAGE_HOME_IMMUTABLE) { 233 if (ktext_hash) 234 prot = hv_pte_set_mode(prot, HV_PTE_MODE_CACHE_HASH_L3); 235 else 236 prot = hv_pte_set_mode(prot, HV_PTE_MODE_CACHE_NO_L3); 237 } 238#endif 239 return prot; 240} 241 242/* 243 * For a given kernel data VA, how should it be cached? 244 * We return the complete pgprot_t with caching bits set. 245 */ 246static pgprot_t __init init_pgprot(ulong address) 247{ 248 int cpu; 249 unsigned long page; 250 enum { CODE_DELTA = MEM_SV_INTRPT - PAGE_OFFSET }; 251 252#if CHIP_HAS_CBOX_HOME_MAP() 253 /* For kdata=huge, everything is just hash-for-home. */ 254 if (kdata_huge) 255 return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH); 256#endif 257 258 /* We map the aliased pages of permanent text inaccessible. */ 259 if (address < (ulong) _sinittext - CODE_DELTA) 260 return PAGE_NONE; 261 262 /* 263 * We map read-only data non-coherent for performance. We could 264 * use neighborhood caching on TILE64, but it's not clear it's a win. 265 */ 266 if ((address >= (ulong) __start_rodata && 267 address < (ulong) __end_rodata) || 268 address == (ulong) empty_zero_page) { 269 return construct_pgprot(PAGE_KERNEL_RO, PAGE_HOME_IMMUTABLE); 270 } 271 272 /* As a performance optimization, keep the boot init stack here. */ 273 if (address >= (ulong)&init_thread_union && 274 address < (ulong)&init_thread_union + THREAD_SIZE) 275 return construct_pgprot(PAGE_KERNEL, smp_processor_id()); 276 277#ifndef __tilegx__ 278#if !ATOMIC_LOCKS_FOUND_VIA_TABLE() 279 /* Force the atomic_locks[] array page to be hash-for-home. */ 280 if (address == (ulong) atomic_locks) 281 return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH); 282#endif 283#endif 284 285 /* 286 * Everything else that isn't data or bss is heap, so mark it 287 * with the initial heap home (hash-for-home, or this cpu). This 288 * includes any addresses after the loaded image and any address before 289 * _einitdata, since we already captured the case of text before 290 * _sinittext, and __pa(einittext) is approximately __pa(sinitdata). 291 * 292 * All the LOWMEM pages that we mark this way will get their 293 * struct page homecache properly marked later, in set_page_homes(). 294 * The HIGHMEM pages we leave with a default zero for their 295 * homes, but with a zero free_time we don't have to actually 296 * do a flush action the first time we use them, either. 297 */ 298 if (address >= (ulong) _end || address < (ulong) _einitdata) 299 return construct_pgprot(PAGE_KERNEL, initial_heap_home()); 300 301#if CHIP_HAS_CBOX_HOME_MAP() 302 /* Use hash-for-home if requested for data/bss. */ 303 if (kdata_hash) 304 return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH); 305#endif 306 307 /* 308 * Make the w1data homed like heap to start with, to avoid 309 * making it part of the page-striped data area when we're just 310 * going to convert it to read-only soon anyway. 311 */ 312 if (address >= (ulong)__w1data_begin && address < (ulong)__w1data_end) 313 return construct_pgprot(PAGE_KERNEL, initial_heap_home()); 314 315 /* 316 * Otherwise we just hand out consecutive cpus. To avoid 317 * requiring this function to hold state, we just walk forward from 318 * _sdata by PAGE_SIZE, skipping the readonly and init data, to reach 319 * the requested address, while walking cpu home around kdata_mask. 320 * This is typically no more than a dozen or so iterations. 321 */ 322 page = (((ulong)__w1data_end) + PAGE_SIZE - 1) & PAGE_MASK; 323 BUG_ON(address < page || address >= (ulong)_end); 324 cpu = cpumask_first(&kdata_mask); 325 for (; page < address; page += PAGE_SIZE) { 326 if (page >= (ulong)&init_thread_union && 327 page < (ulong)&init_thread_union + THREAD_SIZE) 328 continue; 329 if (page == (ulong)empty_zero_page) 330 continue; 331#ifndef __tilegx__ 332#if !ATOMIC_LOCKS_FOUND_VIA_TABLE() 333 if (page == (ulong)atomic_locks) 334 continue; 335#endif 336#endif 337 cpu = cpumask_next(cpu, &kdata_mask); 338 if (cpu == NR_CPUS) 339 cpu = cpumask_first(&kdata_mask); 340 } 341 return construct_pgprot(PAGE_KERNEL, cpu); 342} 343 344/* 345 * This function sets up how we cache the kernel text. If we have 346 * hash-for-home support, normally that is used instead (see the 347 * kcache_hash boot flag for more information). But if we end up 348 * using a page-based caching technique, this option sets up the 349 * details of that. In addition, the "ktext=nocache" option may 350 * always be used to disable local caching of text pages, if desired. 351 */ 352 353static int __initdata ktext_arg_seen; 354static int __initdata ktext_small; 355static int __initdata ktext_local; 356static int __initdata ktext_all; 357static int __initdata ktext_nondataplane; 358static int __initdata ktext_nocache; 359static struct cpumask __initdata ktext_mask; 360 361static int __init setup_ktext(char *str) 362{ 363 if (str == NULL) 364 return -EINVAL; 365 366 /* If you have a leading "nocache", turn off ktext caching */ 367 if (strncmp(str, "nocache", 7) == 0) { 368 ktext_nocache = 1; 369 pr_info("ktext: disabling local caching of kernel text\n"); 370 str += 7; 371 if (*str == ',') 372 ++str; 373 if (*str == '\0') 374 return 0; 375 } 376 377 ktext_arg_seen = 1; 378 379 /* Default setting on Tile64: use a huge page */ 380 if (strcmp(str, "huge") == 0) 381 pr_info("ktext: using one huge locally cached page\n"); 382 383 /* Pay TLB cost but get no cache benefit: cache small pages locally */ 384 else if (strcmp(str, "local") == 0) { 385 ktext_small = 1; 386 ktext_local = 1; 387 pr_info("ktext: using small pages with local caching\n"); 388 } 389 390 /* Neighborhood cache ktext pages on all cpus. */ 391 else if (strcmp(str, "all") == 0) { 392 ktext_small = 1; 393 ktext_all = 1; 394 pr_info("ktext: using maximal caching neighborhood\n"); 395 } 396 397 398 /* Neighborhood ktext pages on specified mask */ 399 else if (cpulist_parse(str, &ktext_mask) == 0) { 400 char buf[NR_CPUS * 5]; 401 cpulist_scnprintf(buf, sizeof(buf), &ktext_mask); 402 if (cpumask_weight(&ktext_mask) > 1) { 403 ktext_small = 1; 404 pr_info("ktext: using caching neighborhood %s " 405 "with small pages\n", buf); 406 } else { 407 pr_info("ktext: caching on cpu %s with one huge page\n", 408 buf); 409 } 410 } 411 412 else if (*str) 413 return -EINVAL; 414 415 return 0; 416} 417 418early_param("ktext", setup_ktext); 419 420 421static inline pgprot_t ktext_set_nocache(pgprot_t prot) 422{ 423 if (!ktext_nocache) 424 prot = hv_pte_set_nc(prot); 425#if CHIP_HAS_NC_AND_NOALLOC_BITS() 426 else 427 prot = hv_pte_set_no_alloc_l2(prot); 428#endif 429 return prot; 430} 431 432#ifndef __tilegx__ 433static pmd_t *__init get_pmd(pgd_t pgtables[], unsigned long va) 434{ 435 return pmd_offset(pud_offset(&pgtables[pgd_index(va)], va), va); 436} 437#else 438static pmd_t *__init get_pmd(pgd_t pgtables[], unsigned long va) 439{ 440 pud_t *pud = pud_offset(&pgtables[pgd_index(va)], va); 441 if (pud_none(*pud)) 442 assign_pmd(pud, alloc_pmd()); 443 return pmd_offset(pud, va); 444} 445#endif 446 447/* Temporary page table we use for staging. */ 448static pgd_t pgtables[PTRS_PER_PGD] 449 __attribute__((aligned(HV_PAGE_TABLE_ALIGN))); 450 451/* 452 * This maps the physical memory to kernel virtual address space, a total 453 * of max_low_pfn pages, by creating page tables starting from address 454 * PAGE_OFFSET. 455 * 456 * This routine transitions us from using a set of compiled-in large 457 * pages to using some more precise caching, including removing access 458 * to code pages mapped at PAGE_OFFSET (executed only at MEM_SV_START) 459 * marking read-only data as locally cacheable, striping the remaining 460 * .data and .bss across all the available tiles, and removing access 461 * to pages above the top of RAM (thus ensuring a page fault from a bad 462 * virtual address rather than a hypervisor shoot down for accessing 463 * memory outside the assigned limits). 464 */ 465static void __init kernel_physical_mapping_init(pgd_t *pgd_base) 466{ 467 unsigned long address, pfn; 468 pmd_t *pmd; 469 pte_t *pte; 470 int pte_ofs; 471 const struct cpumask *my_cpu_mask = cpumask_of(smp_processor_id()); 472 struct cpumask kstripe_mask; 473 int rc, i; 474 475#if CHIP_HAS_CBOX_HOME_MAP() 476 if (ktext_arg_seen && ktext_hash) { 477 pr_warning("warning: \"ktext\" boot argument ignored" 478 " if \"kcache_hash\" sets up text hash-for-home\n"); 479 ktext_small = 0; 480 } 481 482 if (kdata_arg_seen && kdata_hash) { 483 pr_warning("warning: \"kdata\" boot argument ignored" 484 " if \"kcache_hash\" sets up data hash-for-home\n"); 485 } 486 487 if (kdata_huge && !hash_default) { 488 pr_warning("warning: disabling \"kdata=huge\"; requires" 489 " kcache_hash=all or =allbutstack\n"); 490 kdata_huge = 0; 491 } 492#endif 493 494 /* 495 * Set up a mask for cpus to use for kernel striping. 496 * This is normally all cpus, but minus dataplane cpus if any. 497 * If the dataplane covers the whole chip, we stripe over 498 * the whole chip too. 499 */ 500 cpumask_copy(&kstripe_mask, cpu_possible_mask); 501 if (!kdata_arg_seen) 502 kdata_mask = kstripe_mask; 503 504 /* Allocate and fill in L2 page tables */ 505 for (i = 0; i < MAX_NUMNODES; ++i) { 506#ifdef CONFIG_HIGHMEM 507 unsigned long end_pfn = node_lowmem_end_pfn[i]; 508#else 509 unsigned long end_pfn = node_end_pfn[i]; 510#endif 511 unsigned long end_huge_pfn = 0; 512 513 /* Pre-shatter the last huge page to allow per-cpu pages. */ 514 if (kdata_huge) 515 end_huge_pfn = end_pfn - (HPAGE_SIZE >> PAGE_SHIFT); 516 517 pfn = node_start_pfn[i]; 518 519 /* Allocate enough memory to hold L2 page tables for node. */ 520 init_prealloc_ptes(i, end_pfn - pfn); 521 522 address = (unsigned long) pfn_to_kaddr(pfn); 523 while (pfn < end_pfn) { 524 BUG_ON(address & (HPAGE_SIZE-1)); 525 pmd = get_pmd(pgtables, address); 526 pte = get_prealloc_pte(pfn); 527 if (pfn < end_huge_pfn) { 528 pgprot_t prot = init_pgprot(address); 529 *(pte_t *)pmd = pte_mkhuge(pfn_pte(pfn, prot)); 530 for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE; 531 pfn++, pte_ofs++, address += PAGE_SIZE) 532 pte[pte_ofs] = pfn_pte(pfn, prot); 533 } else { 534 if (kdata_huge) 535 printk(KERN_DEBUG "pre-shattered huge" 536 " page at %#lx\n", address); 537 for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE; 538 pfn++, pte_ofs++, address += PAGE_SIZE) { 539 pgprot_t prot = init_pgprot(address); 540 pte[pte_ofs] = pfn_pte(pfn, prot); 541 } 542 assign_pte(pmd, pte); 543 } 544 } 545 } 546 547 /* 548 * Set or check ktext_map now that we have cpu_possible_mask 549 * and kstripe_mask to work with. 550 */ 551 if (ktext_all) 552 cpumask_copy(&ktext_mask, cpu_possible_mask); 553 else if (ktext_nondataplane) 554 ktext_mask = kstripe_mask; 555 else if (!cpumask_empty(&ktext_mask)) { 556 /* Sanity-check any mask that was requested */ 557 struct cpumask bad; 558 cpumask_andnot(&bad, &ktext_mask, cpu_possible_mask); 559 cpumask_and(&ktext_mask, &ktext_mask, cpu_possible_mask); 560 if (!cpumask_empty(&bad)) { 561 char buf[NR_CPUS * 5]; 562 cpulist_scnprintf(buf, sizeof(buf), &bad); 563 pr_info("ktext: not using unavailable cpus %s\n", buf); 564 } 565 if (cpumask_empty(&ktext_mask)) { 566 pr_warning("ktext: no valid cpus; caching on %d.\n", 567 smp_processor_id()); 568 cpumask_copy(&ktext_mask, 569 cpumask_of(smp_processor_id())); 570 } 571 } 572 573 address = MEM_SV_INTRPT; 574 pmd = get_pmd(pgtables, address); 575 if (ktext_small) { 576 /* Allocate an L2 PTE for the kernel text */ 577 int cpu = 0; 578 pgprot_t prot = construct_pgprot(PAGE_KERNEL_EXEC, 579 PAGE_HOME_IMMUTABLE); 580 581 if (ktext_local) { 582 if (ktext_nocache) 583 prot = hv_pte_set_mode(prot, 584 HV_PTE_MODE_UNCACHED); 585 else 586 prot = hv_pte_set_mode(prot, 587 HV_PTE_MODE_CACHE_NO_L3); 588 } else { 589 prot = hv_pte_set_mode(prot, 590 HV_PTE_MODE_CACHE_TILE_L3); 591 cpu = cpumask_first(&ktext_mask); 592 593 prot = ktext_set_nocache(prot); 594 } 595 596 BUG_ON(address != (unsigned long)_stext); 597 pfn = 0; /* code starts at PA 0 */ 598 pte = alloc_pte(); 599 for (pte_ofs = 0; address < (unsigned long)_einittext; 600 pfn++, pte_ofs++, address += PAGE_SIZE) { 601 if (!ktext_local) { 602 prot = set_remote_cache_cpu(prot, cpu); 603 cpu = cpumask_next(cpu, &ktext_mask); 604 if (cpu == NR_CPUS) 605 cpu = cpumask_first(&ktext_mask); 606 } 607 pte[pte_ofs] = pfn_pte(pfn, prot); 608 } 609 assign_pte(pmd, pte); 610 } else { 611 pte_t pteval = pfn_pte(0, PAGE_KERNEL_EXEC); 612 pteval = pte_mkhuge(pteval); 613#if CHIP_HAS_CBOX_HOME_MAP() 614 if (ktext_hash) { 615 pteval = hv_pte_set_mode(pteval, 616 HV_PTE_MODE_CACHE_HASH_L3); 617 pteval = ktext_set_nocache(pteval); 618 } else 619#endif /* CHIP_HAS_CBOX_HOME_MAP() */ 620 if (cpumask_weight(&ktext_mask) == 1) { 621 pteval = set_remote_cache_cpu(pteval, 622 cpumask_first(&ktext_mask)); 623 pteval = hv_pte_set_mode(pteval, 624 HV_PTE_MODE_CACHE_TILE_L3); 625 pteval = ktext_set_nocache(pteval); 626 } else if (ktext_nocache) 627 pteval = hv_pte_set_mode(pteval, 628 HV_PTE_MODE_UNCACHED); 629 else 630 pteval = hv_pte_set_mode(pteval, 631 HV_PTE_MODE_CACHE_NO_L3); 632 *(pte_t *)pmd = pteval; 633 } 634 635 /* Set swapper_pgprot here so it is flushed to memory right away. */ 636 swapper_pgprot = init_pgprot((unsigned long)swapper_pg_dir); 637 638 /* 639 * Since we may be changing the caching of the stack and page 640 * table itself, we invoke an assembly helper to do the 641 * following steps: 642 * 643 * - flush the cache so we start with an empty slate 644 * - install pgtables[] as the real page table 645 * - flush the TLB so the new page table takes effect 646 */ 647 rc = flush_and_install_context(__pa(pgtables), 648 init_pgprot((unsigned long)pgtables), 649 __get_cpu_var(current_asid), 650 cpumask_bits(my_cpu_mask)); 651 BUG_ON(rc != 0); 652 653 /* Copy the page table back to the normal swapper_pg_dir. */ 654 memcpy(pgd_base, pgtables, sizeof(pgtables)); 655 __install_page_table(pgd_base, __get_cpu_var(current_asid), 656 swapper_pgprot); 657 658 /* 659 * We just read swapper_pgprot and thus brought it into the cache, 660 * with its new home & caching mode. When we start the other CPUs, 661 * they're going to reference swapper_pgprot via their initial fake 662 * VA-is-PA mappings, which cache everything locally. At that 663 * time, if it's in our cache with a conflicting home, the 664 * simulator's coherence checker will complain. So, flush it out 665 * of our cache; we're not going to ever use it again anyway. 666 */ 667 __insn_finv(&swapper_pgprot); 668} 669 670/* 671 * devmem_is_allowed() checks to see if /dev/mem access to a certain address 672 * is valid. The argument is a physical page number. 673 * 674 * On Tile, the only valid things for which we can just hand out unchecked 675 * PTEs are the kernel code and data. Anything else might change its 676 * homing with time, and we wouldn't know to adjust the /dev/mem PTEs. 677 * Note that init_thread_union is released to heap soon after boot, 678 * so we include it in the init data. 679 * 680 * For TILE-Gx, we might want to consider allowing access to PA 681 * regions corresponding to PCI space, etc. 682 */ 683int devmem_is_allowed(unsigned long pagenr) 684{ 685 return pagenr < kaddr_to_pfn(_end) && 686 !(pagenr >= kaddr_to_pfn(&init_thread_union) || 687 pagenr < kaddr_to_pfn(_einitdata)) && 688 !(pagenr >= kaddr_to_pfn(_sinittext) || 689 pagenr <= kaddr_to_pfn(_einittext-1)); 690} 691 692#ifdef CONFIG_HIGHMEM 693static void __init permanent_kmaps_init(pgd_t *pgd_base) 694{ 695 pgd_t *pgd; 696 pud_t *pud; 697 pmd_t *pmd; 698 pte_t *pte; 699 unsigned long vaddr; 700 701 vaddr = PKMAP_BASE; 702 page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); 703 704 pgd = swapper_pg_dir + pgd_index(vaddr); 705 pud = pud_offset(pgd, vaddr); 706 pmd = pmd_offset(pud, vaddr); 707 pte = pte_offset_kernel(pmd, vaddr); 708 pkmap_page_table = pte; 709} 710#endif /* CONFIG_HIGHMEM */ 711 712 713static void __init init_free_pfn_range(unsigned long start, unsigned long end) 714{ 715 unsigned long pfn; 716 struct page *page = pfn_to_page(start); 717 718 for (pfn = start; pfn < end; ) { 719 /* Optimize by freeing pages in large batches */ 720 int order = __ffs(pfn); 721 int count, i; 722 struct page *p; 723 724 if (order >= MAX_ORDER) 725 order = MAX_ORDER-1; 726 count = 1 << order; 727 while (pfn + count > end) { 728 count >>= 1; 729 --order; 730 } 731 for (p = page, i = 0; i < count; ++i, ++p) { 732 __ClearPageReserved(p); 733 /* 734 * Hacky direct set to avoid unnecessary 735 * lock take/release for EVERY page here. 736 */ 737 p->_count.counter = 0; 738 p->_mapcount.counter = -1; 739 } 740 init_page_count(page); 741 __free_pages(page, order); 742 totalram_pages += count; 743 744 page += count; 745 pfn += count; 746 } 747} 748 749static void __init set_non_bootmem_pages_init(void) 750{ 751 struct zone *z; 752 for_each_zone(z) { 753 unsigned long start, end; 754 int nid = z->zone_pgdat->node_id; 755 int idx = zone_idx(z); 756 757 start = z->zone_start_pfn; 758 if (start == 0) 759 continue; /* bootmem */ 760 end = start + z->spanned_pages; 761 if (idx == ZONE_NORMAL) { 762 BUG_ON(start != node_start_pfn[nid]); 763 start = node_free_pfn[nid]; 764 } 765#ifdef CONFIG_HIGHMEM 766 if (idx == ZONE_HIGHMEM) 767 totalhigh_pages += z->spanned_pages; 768#endif 769 if (kdata_huge) { 770 unsigned long percpu_pfn = node_percpu_pfn[nid]; 771 if (start < percpu_pfn && end > percpu_pfn) 772 end = percpu_pfn; 773 } 774#ifdef CONFIG_PCI 775 if (start <= pci_reserve_start_pfn && 776 end > pci_reserve_start_pfn) { 777 if (end > pci_reserve_end_pfn) 778 init_free_pfn_range(pci_reserve_end_pfn, end); 779 end = pci_reserve_start_pfn; 780 } 781#endif 782 init_free_pfn_range(start, end); 783 } 784} 785 786/* 787 * paging_init() sets up the page tables - note that all of lowmem is 788 * already mapped by head.S. 789 */ 790void __init paging_init(void) 791{ 792#ifdef CONFIG_HIGHMEM 793 unsigned long vaddr, end; 794#endif 795#ifdef __tilegx__ 796 pud_t *pud; 797#endif 798 pgd_t *pgd_base = swapper_pg_dir; 799 800 kernel_physical_mapping_init(pgd_base); 801 802#ifdef CONFIG_HIGHMEM 803 /* 804 * Fixed mappings, only the page table structure has to be 805 * created - mappings will be set by set_fixmap(): 806 */ 807 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; 808 end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; 809 page_table_range_init(vaddr, end, pgd_base); 810 permanent_kmaps_init(pgd_base); 811#endif 812 813#ifdef __tilegx__ 814 /* 815 * Since GX allocates just one pmd_t array worth of vmalloc space, 816 * we go ahead and allocate it statically here, then share it 817 * globally. As a result we don't have to worry about any task 818 * changing init_mm once we get up and running, and there's no 819 * need for e.g. vmalloc_sync_all(). 820 */ 821 BUILD_BUG_ON(pgd_index(VMALLOC_START) != pgd_index(VMALLOC_END)); 822 pud = pud_offset(pgd_base + pgd_index(VMALLOC_START), VMALLOC_START); 823 assign_pmd(pud, alloc_pmd()); 824#endif 825} 826 827 828/* 829 * Walk the kernel page tables and derive the page_home() from 830 * the PTEs, so that set_pte() can properly validate the caching 831 * of all PTEs it sees. 832 */ 833void __init set_page_homes(void) 834{ 835} 836 837static void __init set_max_mapnr_init(void) 838{ 839#ifdef CONFIG_FLATMEM 840 max_mapnr = max_low_pfn; 841#endif 842} 843 844void __init mem_init(void) 845{ 846 int codesize, datasize, initsize; 847 int i; 848#ifndef __tilegx__ 849 void *last; 850#endif 851 852#ifdef CONFIG_FLATMEM 853 if (!mem_map) 854 BUG(); 855#endif 856 857#ifdef CONFIG_HIGHMEM 858 /* check that fixmap and pkmap do not overlap */ 859 if (PKMAP_ADDR(LAST_PKMAP-1) >= FIXADDR_START) { 860 pr_err("fixmap and kmap areas overlap" 861 " - this will crash\n"); 862 pr_err("pkstart: %lxh pkend: %lxh fixstart %lxh\n", 863 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP-1), 864 FIXADDR_START); 865 BUG(); 866 } 867#endif 868 869 set_max_mapnr_init(); 870 871 /* this will put all bootmem onto the freelists */ 872 totalram_pages += free_all_bootmem(); 873 874 /* count all remaining LOWMEM and give all HIGHMEM to page allocator */ 875 set_non_bootmem_pages_init(); 876 877 codesize = (unsigned long)&_etext - (unsigned long)&_text; 878 datasize = (unsigned long)&_end - (unsigned long)&_sdata; 879 initsize = (unsigned long)&_einittext - (unsigned long)&_sinittext; 880 initsize += (unsigned long)&_einitdata - (unsigned long)&_sinitdata; 881 882 pr_info("Memory: %luk/%luk available (%dk kernel code, %dk data, %dk init, %ldk highmem)\n", 883 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), 884 num_physpages << (PAGE_SHIFT-10), 885 codesize >> 10, 886 datasize >> 10, 887 initsize >> 10, 888 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)) 889 ); 890 891 /* 892 * In debug mode, dump some interesting memory mappings. 893 */ 894#ifdef CONFIG_HIGHMEM 895 printk(KERN_DEBUG " KMAP %#lx - %#lx\n", 896 FIXADDR_START, FIXADDR_TOP + PAGE_SIZE - 1); 897 printk(KERN_DEBUG " PKMAP %#lx - %#lx\n", 898 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP) - 1); 899#endif 900#ifdef CONFIG_HUGEVMAP 901 printk(KERN_DEBUG " HUGEMAP %#lx - %#lx\n", 902 HUGE_VMAP_BASE, HUGE_VMAP_END - 1); 903#endif 904 printk(KERN_DEBUG " VMALLOC %#lx - %#lx\n", 905 _VMALLOC_START, _VMALLOC_END - 1); 906#ifdef __tilegx__ 907 for (i = MAX_NUMNODES-1; i >= 0; --i) { 908 struct pglist_data *node = &node_data[i]; 909 if (node->node_present_pages) { 910 unsigned long start = (unsigned long) 911 pfn_to_kaddr(node->node_start_pfn); 912 unsigned long end = start + 913 (node->node_present_pages << PAGE_SHIFT); 914 printk(KERN_DEBUG " MEM%d %#lx - %#lx\n", 915 i, start, end - 1); 916 } 917 } 918#else 919 last = high_memory; 920 for (i = MAX_NUMNODES-1; i >= 0; --i) { 921 if ((unsigned long)vbase_map[i] != -1UL) { 922 printk(KERN_DEBUG " LOWMEM%d %#lx - %#lx\n", 923 i, (unsigned long) (vbase_map[i]), 924 (unsigned long) (last-1)); 925 last = vbase_map[i]; 926 } 927 } 928#endif 929 930#ifndef __tilegx__ 931 /* 932 * Convert from using one lock for all atomic operations to 933 * one per cpu. 934 */ 935 __init_atomic_per_cpu(); 936#endif 937} 938 939/* 940 * this is for the non-NUMA, single node SMP system case. 941 * Specifically, in the case of x86, we will always add 942 * memory to the highmem for now. 943 */ 944#ifndef CONFIG_NEED_MULTIPLE_NODES 945int arch_add_memory(u64 start, u64 size) 946{ 947 struct pglist_data *pgdata = &contig_page_data; 948 struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1; 949 unsigned long start_pfn = start >> PAGE_SHIFT; 950 unsigned long nr_pages = size >> PAGE_SHIFT; 951 952 return __add_pages(zone, start_pfn, nr_pages); 953} 954 955int remove_memory(u64 start, u64 size) 956{ 957 return -EINVAL; 958} 959#endif 960 961struct kmem_cache *pgd_cache; 962 963void __init pgtable_cache_init(void) 964{ 965 pgd_cache = kmem_cache_create("pgd", 966 PTRS_PER_PGD*sizeof(pgd_t), 967 PTRS_PER_PGD*sizeof(pgd_t), 968 0, 969 NULL); 970 if (!pgd_cache) 971 panic("pgtable_cache_init(): Cannot create pgd cache"); 972} 973 974#if !CHIP_HAS_COHERENT_LOCAL_CACHE() 975/* 976 * The __w1data area holds data that is only written during initialization, 977 * and is read-only and thus freely cacheable thereafter. Fix the page 978 * table entries that cover that region accordingly. 979 */ 980static void mark_w1data_ro(void) 981{ 982 /* Loop over page table entries */ 983 unsigned long addr = (unsigned long)__w1data_begin; 984 BUG_ON((addr & (PAGE_SIZE-1)) != 0); 985 for (; addr <= (unsigned long)__w1data_end - 1; addr += PAGE_SIZE) { 986 unsigned long pfn = kaddr_to_pfn((void *)addr); 987 pte_t *ptep = virt_to_pte(NULL, addr); 988 BUG_ON(pte_huge(*ptep)); /* not relevant for kdata_huge */ 989 set_pte_at(&init_mm, addr, ptep, pfn_pte(pfn, PAGE_KERNEL_RO)); 990 } 991} 992#endif 993 994#ifdef CONFIG_DEBUG_PAGEALLOC 995static long __write_once initfree; 996#else 997static long __write_once initfree = 1; 998#endif 999 1000/* Select whether to free (1) or mark unusable (0) the __init pages. */ 1001static int __init set_initfree(char *str) 1002{ 1003 long val; 1004 if (strict_strtol(str, 0, &val) == 0) { 1005 initfree = val; 1006 pr_info("initfree: %s free init pages\n", 1007 initfree ? "will" : "won't"); 1008 } 1009 return 1; 1010} 1011__setup("initfree=", set_initfree); 1012 1013static void free_init_pages(char *what, unsigned long begin, unsigned long end) 1014{ 1015 unsigned long addr = (unsigned long) begin; 1016 1017 if (kdata_huge && !initfree) { 1018 pr_warning("Warning: ignoring initfree=0:" 1019 " incompatible with kdata=huge\n"); 1020 initfree = 1; 1021 } 1022 end = (end + PAGE_SIZE - 1) & PAGE_MASK; 1023 local_flush_tlb_pages(NULL, begin, PAGE_SIZE, end - begin); 1024 for (addr = begin; addr < end; addr += PAGE_SIZE) { 1025 /* 1026 * Note we just reset the home here directly in the 1027 * page table. We know this is safe because our caller 1028 * just flushed the caches on all the other cpus, 1029 * and they won't be touching any of these pages. 1030 */ 1031 int pfn = kaddr_to_pfn((void *)addr); 1032 struct page *page = pfn_to_page(pfn); 1033 pte_t *ptep = virt_to_pte(NULL, addr); 1034 if (!initfree) { 1035 /* 1036 * If debugging page accesses then do not free 1037 * this memory but mark them not present - any 1038 * buggy init-section access will create a 1039 * kernel page fault: 1040 */ 1041 pte_clear(&init_mm, addr, ptep); 1042 continue; 1043 } 1044 __ClearPageReserved(page); 1045 init_page_count(page); 1046 if (pte_huge(*ptep)) 1047 BUG_ON(!kdata_huge); 1048 else 1049 set_pte_at(&init_mm, addr, ptep, 1050 pfn_pte(pfn, PAGE_KERNEL)); 1051 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); 1052 free_page(addr); 1053 totalram_pages++; 1054 } 1055 pr_info("Freeing %s: %ldk freed\n", what, (end - begin) >> 10); 1056} 1057 1058void free_initmem(void) 1059{ 1060 const unsigned long text_delta = MEM_SV_INTRPT - PAGE_OFFSET; 1061 1062 /* 1063 * Evict the dirty initdata on the boot cpu, evict the w1data 1064 * wherever it's homed, and evict all the init code everywhere. 1065 * We are guaranteed that no one will touch the init pages any 1066 * more, and although other cpus may be touching the w1data, 1067 * we only actually change the caching on tile64, which won't 1068 * be keeping local copies in the other tiles' caches anyway. 1069 */ 1070 homecache_evict(&cpu_cacheable_map); 1071 1072 /* Free the data pages that we won't use again after init. */ 1073 free_init_pages("unused kernel data", 1074 (unsigned long)_sinitdata, 1075 (unsigned long)_einitdata); 1076 1077 /* 1078 * Free the pages mapped from 0xc0000000 that correspond to code 1079 * pages from MEM_SV_INTRPT that we won't use again after init. 1080 */ 1081 free_init_pages("unused kernel text", 1082 (unsigned long)_sinittext - text_delta, 1083 (unsigned long)_einittext - text_delta); 1084 1085#if !CHIP_HAS_COHERENT_LOCAL_CACHE() 1086 /* 1087 * Upgrade the .w1data section to globally cached. 1088 * We don't do this on tilepro, since the cache architecture 1089 * pretty much makes it irrelevant, and in any case we end 1090 * up having racing issues with other tiles that may touch 1091 * the data after we flush the cache but before we update 1092 * the PTEs and flush the TLBs, causing sharer shootdowns 1093 * later. Even though this is to clean data, it seems like 1094 * an unnecessary complication. 1095 */ 1096 mark_w1data_ro(); 1097#endif 1098 1099 /* Do a global TLB flush so everyone sees the changes. */ 1100 flush_tlb_all(); 1101} 1102