mempolicy.c revision 699397499742d1245ea5d677a08fa265df666d2d
1/* 2 * Simple NUMA memory policy for the Linux kernel. 3 * 4 * Copyright 2003,2004 Andi Kleen, SuSE Labs. 5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc. 6 * Subject to the GNU Public License, version 2. 7 * 8 * NUMA policy allows the user to give hints in which node(s) memory should 9 * be allocated. 10 * 11 * Support four policies per VMA and per process: 12 * 13 * The VMA policy has priority over the process policy for a page fault. 14 * 15 * interleave Allocate memory interleaved over a set of nodes, 16 * with normal fallback if it fails. 17 * For VMA based allocations this interleaves based on the 18 * offset into the backing object or offset into the mapping 19 * for anonymous memory. For process policy an process counter 20 * is used. 21 * 22 * bind Only allocate memory on a specific set of nodes, 23 * no fallback. 24 * FIXME: memory is allocated starting with the first node 25 * to the last. It would be better if bind would truly restrict 26 * the allocation to memory nodes instead 27 * 28 * preferred Try a specific node first before normal fallback. 29 * As a special case node -1 here means do the allocation 30 * on the local CPU. This is normally identical to default, 31 * but useful to set in a VMA when you have a non default 32 * process policy. 33 * 34 * default Allocate on the local node first, or when on a VMA 35 * use the process policy. This is what Linux always did 36 * in a NUMA aware kernel and still does by, ahem, default. 37 * 38 * The process policy is applied for most non interrupt memory allocations 39 * in that process' context. Interrupts ignore the policies and always 40 * try to allocate on the local CPU. The VMA policy is only applied for memory 41 * allocations for a VMA in the VM. 42 * 43 * Currently there are a few corner cases in swapping where the policy 44 * is not applied, but the majority should be handled. When process policy 45 * is used it is not remembered over swap outs/swap ins. 46 * 47 * Only the highest zone in the zone hierarchy gets policied. Allocations 48 * requesting a lower zone just use default policy. This implies that 49 * on systems with highmem kernel lowmem allocation don't get policied. 50 * Same with GFP_DMA allocations. 51 * 52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between 53 * all users and remembered even when nobody has memory mapped. 54 */ 55 56/* Notebook: 57 fix mmap readahead to honour policy and enable policy for any page cache 58 object 59 statistics for bigpages 60 global policy for page cache? currently it uses process policy. Requires 61 first item above. 62 handle mremap for shared memory (currently ignored for the policy) 63 grows down? 64 make bind policy root only? It can trigger oom much faster and the 65 kernel is not always grateful with that. 66 could replace all the switch()es with a mempolicy_ops structure. 67*/ 68 69#include <linux/mempolicy.h> 70#include <linux/mm.h> 71#include <linux/highmem.h> 72#include <linux/hugetlb.h> 73#include <linux/kernel.h> 74#include <linux/sched.h> 75#include <linux/mm.h> 76#include <linux/nodemask.h> 77#include <linux/cpuset.h> 78#include <linux/gfp.h> 79#include <linux/slab.h> 80#include <linux/string.h> 81#include <linux/module.h> 82#include <linux/interrupt.h> 83#include <linux/init.h> 84#include <linux/compat.h> 85#include <linux/mempolicy.h> 86#include <linux/swap.h> 87#include <linux/seq_file.h> 88#include <linux/proc_fs.h> 89#include <linux/migrate.h> 90#include <linux/rmap.h> 91#include <linux/security.h> 92 93#include <asm/tlbflush.h> 94#include <asm/uaccess.h> 95 96/* Internal flags */ 97#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */ 98#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ 99#define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2) /* Gather statistics */ 100 101static struct kmem_cache *policy_cache; 102static struct kmem_cache *sn_cache; 103 104#define PDprintk(fmt...) 105 106/* Highest zone. An specific allocation for a zone below that is not 107 policied. */ 108enum zone_type policy_zone = ZONE_DMA; 109 110struct mempolicy default_policy = { 111 .refcnt = ATOMIC_INIT(1), /* never free it */ 112 .policy = MPOL_DEFAULT, 113}; 114 115/* Do sanity checking on a policy */ 116static int mpol_check_policy(int mode, nodemask_t *nodes) 117{ 118 int empty = nodes_empty(*nodes); 119 120 switch (mode) { 121 case MPOL_DEFAULT: 122 if (!empty) 123 return -EINVAL; 124 break; 125 case MPOL_BIND: 126 case MPOL_INTERLEAVE: 127 /* Preferred will only use the first bit, but allow 128 more for now. */ 129 if (empty) 130 return -EINVAL; 131 break; 132 } 133 return nodes_subset(*nodes, node_online_map) ? 0 : -EINVAL; 134} 135 136/* Generate a custom zonelist for the BIND policy. */ 137static struct zonelist *bind_zonelist(nodemask_t *nodes) 138{ 139 struct zonelist *zl; 140 int num, max, nd; 141 enum zone_type k; 142 143 max = 1 + MAX_NR_ZONES * nodes_weight(*nodes); 144 zl = kmalloc(sizeof(struct zone *) * max, GFP_KERNEL); 145 if (!zl) 146 return NULL; 147 num = 0; 148 /* First put in the highest zones from all nodes, then all the next 149 lower zones etc. Avoid empty zones because the memory allocator 150 doesn't like them. If you implement node hot removal you 151 have to fix that. */ 152 k = policy_zone; 153 while (1) { 154 for_each_node_mask(nd, *nodes) { 155 struct zone *z = &NODE_DATA(nd)->node_zones[k]; 156 if (z->present_pages > 0) 157 zl->zones[num++] = z; 158 } 159 if (k == 0) 160 break; 161 k--; 162 } 163 zl->zones[num] = NULL; 164 return zl; 165} 166 167/* Create a new policy */ 168static struct mempolicy *mpol_new(int mode, nodemask_t *nodes) 169{ 170 struct mempolicy *policy; 171 172 PDprintk("setting mode %d nodes[0] %lx\n", mode, nodes_addr(*nodes)[0]); 173 if (mode == MPOL_DEFAULT) 174 return NULL; 175 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); 176 if (!policy) 177 return ERR_PTR(-ENOMEM); 178 atomic_set(&policy->refcnt, 1); 179 switch (mode) { 180 case MPOL_INTERLEAVE: 181 policy->v.nodes = *nodes; 182 if (nodes_weight(*nodes) == 0) { 183 kmem_cache_free(policy_cache, policy); 184 return ERR_PTR(-EINVAL); 185 } 186 break; 187 case MPOL_PREFERRED: 188 policy->v.preferred_node = first_node(*nodes); 189 if (policy->v.preferred_node >= MAX_NUMNODES) 190 policy->v.preferred_node = -1; 191 break; 192 case MPOL_BIND: 193 policy->v.zonelist = bind_zonelist(nodes); 194 if (policy->v.zonelist == NULL) { 195 kmem_cache_free(policy_cache, policy); 196 return ERR_PTR(-ENOMEM); 197 } 198 break; 199 } 200 policy->policy = mode; 201 policy->cpuset_mems_allowed = cpuset_mems_allowed(current); 202 return policy; 203} 204 205static void gather_stats(struct page *, void *, int pte_dirty); 206static void migrate_page_add(struct page *page, struct list_head *pagelist, 207 unsigned long flags); 208 209/* Scan through pages checking if pages follow certain conditions. */ 210static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd, 211 unsigned long addr, unsigned long end, 212 const nodemask_t *nodes, unsigned long flags, 213 void *private) 214{ 215 pte_t *orig_pte; 216 pte_t *pte; 217 spinlock_t *ptl; 218 219 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 220 do { 221 struct page *page; 222 unsigned int nid; 223 224 if (!pte_present(*pte)) 225 continue; 226 page = vm_normal_page(vma, addr, *pte); 227 if (!page) 228 continue; 229 /* 230 * The check for PageReserved here is important to avoid 231 * handling zero pages and other pages that may have been 232 * marked special by the system. 233 * 234 * If the PageReserved would not be checked here then f.e. 235 * the location of the zero page could have an influence 236 * on MPOL_MF_STRICT, zero pages would be counted for 237 * the per node stats, and there would be useless attempts 238 * to put zero pages on the migration list. 239 */ 240 if (PageReserved(page)) 241 continue; 242 nid = page_to_nid(page); 243 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT)) 244 continue; 245 246 if (flags & MPOL_MF_STATS) 247 gather_stats(page, private, pte_dirty(*pte)); 248 else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 249 migrate_page_add(page, private, flags); 250 else 251 break; 252 } while (pte++, addr += PAGE_SIZE, addr != end); 253 pte_unmap_unlock(orig_pte, ptl); 254 return addr != end; 255} 256 257static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud, 258 unsigned long addr, unsigned long end, 259 const nodemask_t *nodes, unsigned long flags, 260 void *private) 261{ 262 pmd_t *pmd; 263 unsigned long next; 264 265 pmd = pmd_offset(pud, addr); 266 do { 267 next = pmd_addr_end(addr, end); 268 if (pmd_none_or_clear_bad(pmd)) 269 continue; 270 if (check_pte_range(vma, pmd, addr, next, nodes, 271 flags, private)) 272 return -EIO; 273 } while (pmd++, addr = next, addr != end); 274 return 0; 275} 276 277static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd, 278 unsigned long addr, unsigned long end, 279 const nodemask_t *nodes, unsigned long flags, 280 void *private) 281{ 282 pud_t *pud; 283 unsigned long next; 284 285 pud = pud_offset(pgd, addr); 286 do { 287 next = pud_addr_end(addr, end); 288 if (pud_none_or_clear_bad(pud)) 289 continue; 290 if (check_pmd_range(vma, pud, addr, next, nodes, 291 flags, private)) 292 return -EIO; 293 } while (pud++, addr = next, addr != end); 294 return 0; 295} 296 297static inline int check_pgd_range(struct vm_area_struct *vma, 298 unsigned long addr, unsigned long end, 299 const nodemask_t *nodes, unsigned long flags, 300 void *private) 301{ 302 pgd_t *pgd; 303 unsigned long next; 304 305 pgd = pgd_offset(vma->vm_mm, addr); 306 do { 307 next = pgd_addr_end(addr, end); 308 if (pgd_none_or_clear_bad(pgd)) 309 continue; 310 if (check_pud_range(vma, pgd, addr, next, nodes, 311 flags, private)) 312 return -EIO; 313 } while (pgd++, addr = next, addr != end); 314 return 0; 315} 316 317/* Check if a vma is migratable */ 318static inline int vma_migratable(struct vm_area_struct *vma) 319{ 320 if (vma->vm_flags & ( 321 VM_LOCKED|VM_IO|VM_HUGETLB|VM_PFNMAP|VM_RESERVED)) 322 return 0; 323 return 1; 324} 325 326/* 327 * Check if all pages in a range are on a set of nodes. 328 * If pagelist != NULL then isolate pages from the LRU and 329 * put them on the pagelist. 330 */ 331static struct vm_area_struct * 332check_range(struct mm_struct *mm, unsigned long start, unsigned long end, 333 const nodemask_t *nodes, unsigned long flags, void *private) 334{ 335 int err; 336 struct vm_area_struct *first, *vma, *prev; 337 338 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 339 340 err = migrate_prep(); 341 if (err) 342 return ERR_PTR(err); 343 } 344 345 first = find_vma(mm, start); 346 if (!first) 347 return ERR_PTR(-EFAULT); 348 prev = NULL; 349 for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) { 350 if (!(flags & MPOL_MF_DISCONTIG_OK)) { 351 if (!vma->vm_next && vma->vm_end < end) 352 return ERR_PTR(-EFAULT); 353 if (prev && prev->vm_end < vma->vm_start) 354 return ERR_PTR(-EFAULT); 355 } 356 if (!is_vm_hugetlb_page(vma) && 357 ((flags & MPOL_MF_STRICT) || 358 ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) && 359 vma_migratable(vma)))) { 360 unsigned long endvma = vma->vm_end; 361 362 if (endvma > end) 363 endvma = end; 364 if (vma->vm_start > start) 365 start = vma->vm_start; 366 err = check_pgd_range(vma, start, endvma, nodes, 367 flags, private); 368 if (err) { 369 first = ERR_PTR(err); 370 break; 371 } 372 } 373 prev = vma; 374 } 375 return first; 376} 377 378/* Apply policy to a single VMA */ 379static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new) 380{ 381 int err = 0; 382 struct mempolicy *old = vma->vm_policy; 383 384 PDprintk("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", 385 vma->vm_start, vma->vm_end, vma->vm_pgoff, 386 vma->vm_ops, vma->vm_file, 387 vma->vm_ops ? vma->vm_ops->set_policy : NULL); 388 389 if (vma->vm_ops && vma->vm_ops->set_policy) 390 err = vma->vm_ops->set_policy(vma, new); 391 if (!err) { 392 mpol_get(new); 393 vma->vm_policy = new; 394 mpol_free(old); 395 } 396 return err; 397} 398 399/* Step 2: apply policy to a range and do splits. */ 400static int mbind_range(struct vm_area_struct *vma, unsigned long start, 401 unsigned long end, struct mempolicy *new) 402{ 403 struct vm_area_struct *next; 404 int err; 405 406 err = 0; 407 for (; vma && vma->vm_start < end; vma = next) { 408 next = vma->vm_next; 409 if (vma->vm_start < start) 410 err = split_vma(vma->vm_mm, vma, start, 1); 411 if (!err && vma->vm_end > end) 412 err = split_vma(vma->vm_mm, vma, end, 0); 413 if (!err) 414 err = policy_vma(vma, new); 415 if (err) 416 break; 417 } 418 return err; 419} 420 421static int contextualize_policy(int mode, nodemask_t *nodes) 422{ 423 if (!nodes) 424 return 0; 425 426 cpuset_update_task_memory_state(); 427 if (!cpuset_nodes_subset_current_mems_allowed(*nodes)) 428 return -EINVAL; 429 return mpol_check_policy(mode, nodes); 430} 431 432 433/* 434 * Update task->flags PF_MEMPOLICY bit: set iff non-default 435 * mempolicy. Allows more rapid checking of this (combined perhaps 436 * with other PF_* flag bits) on memory allocation hot code paths. 437 * 438 * If called from outside this file, the task 'p' should -only- be 439 * a newly forked child not yet visible on the task list, because 440 * manipulating the task flags of a visible task is not safe. 441 * 442 * The above limitation is why this routine has the funny name 443 * mpol_fix_fork_child_flag(). 444 * 445 * It is also safe to call this with a task pointer of current, 446 * which the static wrapper mpol_set_task_struct_flag() does, 447 * for use within this file. 448 */ 449 450void mpol_fix_fork_child_flag(struct task_struct *p) 451{ 452 if (p->mempolicy) 453 p->flags |= PF_MEMPOLICY; 454 else 455 p->flags &= ~PF_MEMPOLICY; 456} 457 458static void mpol_set_task_struct_flag(void) 459{ 460 mpol_fix_fork_child_flag(current); 461} 462 463/* Set the process memory policy */ 464long do_set_mempolicy(int mode, nodemask_t *nodes) 465{ 466 struct mempolicy *new; 467 468 if (contextualize_policy(mode, nodes)) 469 return -EINVAL; 470 new = mpol_new(mode, nodes); 471 if (IS_ERR(new)) 472 return PTR_ERR(new); 473 mpol_free(current->mempolicy); 474 current->mempolicy = new; 475 mpol_set_task_struct_flag(); 476 if (new && new->policy == MPOL_INTERLEAVE) 477 current->il_next = first_node(new->v.nodes); 478 return 0; 479} 480 481/* Fill a zone bitmap for a policy */ 482static void get_zonemask(struct mempolicy *p, nodemask_t *nodes) 483{ 484 int i; 485 486 nodes_clear(*nodes); 487 switch (p->policy) { 488 case MPOL_BIND: 489 for (i = 0; p->v.zonelist->zones[i]; i++) 490 node_set(zone_to_nid(p->v.zonelist->zones[i]), 491 *nodes); 492 break; 493 case MPOL_DEFAULT: 494 break; 495 case MPOL_INTERLEAVE: 496 *nodes = p->v.nodes; 497 break; 498 case MPOL_PREFERRED: 499 /* or use current node instead of online map? */ 500 if (p->v.preferred_node < 0) 501 *nodes = node_online_map; 502 else 503 node_set(p->v.preferred_node, *nodes); 504 break; 505 default: 506 BUG(); 507 } 508} 509 510static int lookup_node(struct mm_struct *mm, unsigned long addr) 511{ 512 struct page *p; 513 int err; 514 515 err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL); 516 if (err >= 0) { 517 err = page_to_nid(p); 518 put_page(p); 519 } 520 return err; 521} 522 523/* Retrieve NUMA policy */ 524long do_get_mempolicy(int *policy, nodemask_t *nmask, 525 unsigned long addr, unsigned long flags) 526{ 527 int err; 528 struct mm_struct *mm = current->mm; 529 struct vm_area_struct *vma = NULL; 530 struct mempolicy *pol = current->mempolicy; 531 532 cpuset_update_task_memory_state(); 533 if (flags & ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR)) 534 return -EINVAL; 535 if (flags & MPOL_F_ADDR) { 536 down_read(&mm->mmap_sem); 537 vma = find_vma_intersection(mm, addr, addr+1); 538 if (!vma) { 539 up_read(&mm->mmap_sem); 540 return -EFAULT; 541 } 542 if (vma->vm_ops && vma->vm_ops->get_policy) 543 pol = vma->vm_ops->get_policy(vma, addr); 544 else 545 pol = vma->vm_policy; 546 } else if (addr) 547 return -EINVAL; 548 549 if (!pol) 550 pol = &default_policy; 551 552 if (flags & MPOL_F_NODE) { 553 if (flags & MPOL_F_ADDR) { 554 err = lookup_node(mm, addr); 555 if (err < 0) 556 goto out; 557 *policy = err; 558 } else if (pol == current->mempolicy && 559 pol->policy == MPOL_INTERLEAVE) { 560 *policy = current->il_next; 561 } else { 562 err = -EINVAL; 563 goto out; 564 } 565 } else 566 *policy = pol->policy; 567 568 if (vma) { 569 up_read(¤t->mm->mmap_sem); 570 vma = NULL; 571 } 572 573 err = 0; 574 if (nmask) 575 get_zonemask(pol, nmask); 576 577 out: 578 if (vma) 579 up_read(¤t->mm->mmap_sem); 580 return err; 581} 582 583#ifdef CONFIG_MIGRATION 584/* 585 * page migration 586 */ 587static void migrate_page_add(struct page *page, struct list_head *pagelist, 588 unsigned long flags) 589{ 590 /* 591 * Avoid migrating a page that is shared with others. 592 */ 593 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) 594 isolate_lru_page(page, pagelist); 595} 596 597static struct page *new_node_page(struct page *page, unsigned long node, int **x) 598{ 599 return alloc_pages_node(node, GFP_HIGHUSER, 0); 600} 601 602/* 603 * Migrate pages from one node to a target node. 604 * Returns error or the number of pages not migrated. 605 */ 606int migrate_to_node(struct mm_struct *mm, int source, int dest, int flags) 607{ 608 nodemask_t nmask; 609 LIST_HEAD(pagelist); 610 int err = 0; 611 612 nodes_clear(nmask); 613 node_set(source, nmask); 614 615 check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nmask, 616 flags | MPOL_MF_DISCONTIG_OK, &pagelist); 617 618 if (!list_empty(&pagelist)) 619 err = migrate_pages(&pagelist, new_node_page, dest); 620 621 return err; 622} 623 624/* 625 * Move pages between the two nodesets so as to preserve the physical 626 * layout as much as possible. 627 * 628 * Returns the number of page that could not be moved. 629 */ 630int do_migrate_pages(struct mm_struct *mm, 631 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags) 632{ 633 LIST_HEAD(pagelist); 634 int busy = 0; 635 int err = 0; 636 nodemask_t tmp; 637 638 down_read(&mm->mmap_sem); 639 640 err = migrate_vmas(mm, from_nodes, to_nodes, flags); 641 if (err) 642 goto out; 643 644/* 645 * Find a 'source' bit set in 'tmp' whose corresponding 'dest' 646 * bit in 'to' is not also set in 'tmp'. Clear the found 'source' 647 * bit in 'tmp', and return that <source, dest> pair for migration. 648 * The pair of nodemasks 'to' and 'from' define the map. 649 * 650 * If no pair of bits is found that way, fallback to picking some 651 * pair of 'source' and 'dest' bits that are not the same. If the 652 * 'source' and 'dest' bits are the same, this represents a node 653 * that will be migrating to itself, so no pages need move. 654 * 655 * If no bits are left in 'tmp', or if all remaining bits left 656 * in 'tmp' correspond to the same bit in 'to', return false 657 * (nothing left to migrate). 658 * 659 * This lets us pick a pair of nodes to migrate between, such that 660 * if possible the dest node is not already occupied by some other 661 * source node, minimizing the risk of overloading the memory on a 662 * node that would happen if we migrated incoming memory to a node 663 * before migrating outgoing memory source that same node. 664 * 665 * A single scan of tmp is sufficient. As we go, we remember the 666 * most recent <s, d> pair that moved (s != d). If we find a pair 667 * that not only moved, but what's better, moved to an empty slot 668 * (d is not set in tmp), then we break out then, with that pair. 669 * Otherwise when we finish scannng from_tmp, we at least have the 670 * most recent <s, d> pair that moved. If we get all the way through 671 * the scan of tmp without finding any node that moved, much less 672 * moved to an empty node, then there is nothing left worth migrating. 673 */ 674 675 tmp = *from_nodes; 676 while (!nodes_empty(tmp)) { 677 int s,d; 678 int source = -1; 679 int dest = 0; 680 681 for_each_node_mask(s, tmp) { 682 d = node_remap(s, *from_nodes, *to_nodes); 683 if (s == d) 684 continue; 685 686 source = s; /* Node moved. Memorize */ 687 dest = d; 688 689 /* dest not in remaining from nodes? */ 690 if (!node_isset(dest, tmp)) 691 break; 692 } 693 if (source == -1) 694 break; 695 696 node_clear(source, tmp); 697 err = migrate_to_node(mm, source, dest, flags); 698 if (err > 0) 699 busy += err; 700 if (err < 0) 701 break; 702 } 703out: 704 up_read(&mm->mmap_sem); 705 if (err < 0) 706 return err; 707 return busy; 708 709} 710 711static struct page *new_vma_page(struct page *page, unsigned long private, int **x) 712{ 713 struct vm_area_struct *vma = (struct vm_area_struct *)private; 714 715 return alloc_page_vma(GFP_HIGHUSER, vma, page_address_in_vma(page, vma)); 716} 717#else 718 719static void migrate_page_add(struct page *page, struct list_head *pagelist, 720 unsigned long flags) 721{ 722} 723 724int do_migrate_pages(struct mm_struct *mm, 725 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags) 726{ 727 return -ENOSYS; 728} 729 730static struct page *new_vma_page(struct page *page, unsigned long private, int **x) 731{ 732 return NULL; 733} 734#endif 735 736long do_mbind(unsigned long start, unsigned long len, 737 unsigned long mode, nodemask_t *nmask, unsigned long flags) 738{ 739 struct vm_area_struct *vma; 740 struct mm_struct *mm = current->mm; 741 struct mempolicy *new; 742 unsigned long end; 743 int err; 744 LIST_HEAD(pagelist); 745 746 if ((flags & ~(unsigned long)(MPOL_MF_STRICT | 747 MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 748 || mode > MPOL_MAX) 749 return -EINVAL; 750 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 751 return -EPERM; 752 753 if (start & ~PAGE_MASK) 754 return -EINVAL; 755 756 if (mode == MPOL_DEFAULT) 757 flags &= ~MPOL_MF_STRICT; 758 759 len = (len + PAGE_SIZE - 1) & PAGE_MASK; 760 end = start + len; 761 762 if (end < start) 763 return -EINVAL; 764 if (end == start) 765 return 0; 766 767 if (mpol_check_policy(mode, nmask)) 768 return -EINVAL; 769 770 new = mpol_new(mode, nmask); 771 if (IS_ERR(new)) 772 return PTR_ERR(new); 773 774 /* 775 * If we are using the default policy then operation 776 * on discontinuous address spaces is okay after all 777 */ 778 if (!new) 779 flags |= MPOL_MF_DISCONTIG_OK; 780 781 PDprintk("mbind %lx-%lx mode:%ld nodes:%lx\n",start,start+len, 782 mode,nodes_addr(nodes)[0]); 783 784 down_write(&mm->mmap_sem); 785 vma = check_range(mm, start, end, nmask, 786 flags | MPOL_MF_INVERT, &pagelist); 787 788 err = PTR_ERR(vma); 789 if (!IS_ERR(vma)) { 790 int nr_failed = 0; 791 792 err = mbind_range(vma, start, end, new); 793 794 if (!list_empty(&pagelist)) 795 nr_failed = migrate_pages(&pagelist, new_vma_page, 796 (unsigned long)vma); 797 798 if (!err && nr_failed && (flags & MPOL_MF_STRICT)) 799 err = -EIO; 800 } 801 802 up_write(&mm->mmap_sem); 803 mpol_free(new); 804 return err; 805} 806 807/* 808 * User space interface with variable sized bitmaps for nodelists. 809 */ 810 811/* Copy a node mask from user space. */ 812static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, 813 unsigned long maxnode) 814{ 815 unsigned long k; 816 unsigned long nlongs; 817 unsigned long endmask; 818 819 --maxnode; 820 nodes_clear(*nodes); 821 if (maxnode == 0 || !nmask) 822 return 0; 823 if (maxnode > PAGE_SIZE*BITS_PER_BYTE) 824 return -EINVAL; 825 826 nlongs = BITS_TO_LONGS(maxnode); 827 if ((maxnode % BITS_PER_LONG) == 0) 828 endmask = ~0UL; 829 else 830 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1; 831 832 /* When the user specified more nodes than supported just check 833 if the non supported part is all zero. */ 834 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) { 835 if (nlongs > PAGE_SIZE/sizeof(long)) 836 return -EINVAL; 837 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) { 838 unsigned long t; 839 if (get_user(t, nmask + k)) 840 return -EFAULT; 841 if (k == nlongs - 1) { 842 if (t & endmask) 843 return -EINVAL; 844 } else if (t) 845 return -EINVAL; 846 } 847 nlongs = BITS_TO_LONGS(MAX_NUMNODES); 848 endmask = ~0UL; 849 } 850 851 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long))) 852 return -EFAULT; 853 nodes_addr(*nodes)[nlongs-1] &= endmask; 854 return 0; 855} 856 857/* Copy a kernel node mask to user space */ 858static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, 859 nodemask_t *nodes) 860{ 861 unsigned long copy = ALIGN(maxnode-1, 64) / 8; 862 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long); 863 864 if (copy > nbytes) { 865 if (copy > PAGE_SIZE) 866 return -EINVAL; 867 if (clear_user((char __user *)mask + nbytes, copy - nbytes)) 868 return -EFAULT; 869 copy = nbytes; 870 } 871 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; 872} 873 874asmlinkage long sys_mbind(unsigned long start, unsigned long len, 875 unsigned long mode, 876 unsigned long __user *nmask, unsigned long maxnode, 877 unsigned flags) 878{ 879 nodemask_t nodes; 880 int err; 881 882 err = get_nodes(&nodes, nmask, maxnode); 883 if (err) 884 return err; 885 return do_mbind(start, len, mode, &nodes, flags); 886} 887 888/* Set the process memory policy */ 889asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask, 890 unsigned long maxnode) 891{ 892 int err; 893 nodemask_t nodes; 894 895 if (mode < 0 || mode > MPOL_MAX) 896 return -EINVAL; 897 err = get_nodes(&nodes, nmask, maxnode); 898 if (err) 899 return err; 900 return do_set_mempolicy(mode, &nodes); 901} 902 903asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode, 904 const unsigned long __user *old_nodes, 905 const unsigned long __user *new_nodes) 906{ 907 struct mm_struct *mm; 908 struct task_struct *task; 909 nodemask_t old; 910 nodemask_t new; 911 nodemask_t task_nodes; 912 int err; 913 914 err = get_nodes(&old, old_nodes, maxnode); 915 if (err) 916 return err; 917 918 err = get_nodes(&new, new_nodes, maxnode); 919 if (err) 920 return err; 921 922 /* Find the mm_struct */ 923 read_lock(&tasklist_lock); 924 task = pid ? find_task_by_pid(pid) : current; 925 if (!task) { 926 read_unlock(&tasklist_lock); 927 return -ESRCH; 928 } 929 mm = get_task_mm(task); 930 read_unlock(&tasklist_lock); 931 932 if (!mm) 933 return -EINVAL; 934 935 /* 936 * Check if this process has the right to modify the specified 937 * process. The right exists if the process has administrative 938 * capabilities, superuser privileges or the same 939 * userid as the target process. 940 */ 941 if ((current->euid != task->suid) && (current->euid != task->uid) && 942 (current->uid != task->suid) && (current->uid != task->uid) && 943 !capable(CAP_SYS_NICE)) { 944 err = -EPERM; 945 goto out; 946 } 947 948 task_nodes = cpuset_mems_allowed(task); 949 /* Is the user allowed to access the target nodes? */ 950 if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_NICE)) { 951 err = -EPERM; 952 goto out; 953 } 954 955 err = security_task_movememory(task); 956 if (err) 957 goto out; 958 959 err = do_migrate_pages(mm, &old, &new, 960 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); 961out: 962 mmput(mm); 963 return err; 964} 965 966 967/* Retrieve NUMA policy */ 968asmlinkage long sys_get_mempolicy(int __user *policy, 969 unsigned long __user *nmask, 970 unsigned long maxnode, 971 unsigned long addr, unsigned long flags) 972{ 973 int err, pval; 974 nodemask_t nodes; 975 976 if (nmask != NULL && maxnode < MAX_NUMNODES) 977 return -EINVAL; 978 979 err = do_get_mempolicy(&pval, &nodes, addr, flags); 980 981 if (err) 982 return err; 983 984 if (policy && put_user(pval, policy)) 985 return -EFAULT; 986 987 if (nmask) 988 err = copy_nodes_to_user(nmask, maxnode, &nodes); 989 990 return err; 991} 992 993#ifdef CONFIG_COMPAT 994 995asmlinkage long compat_sys_get_mempolicy(int __user *policy, 996 compat_ulong_t __user *nmask, 997 compat_ulong_t maxnode, 998 compat_ulong_t addr, compat_ulong_t flags) 999{ 1000 long err; 1001 unsigned long __user *nm = NULL; 1002 unsigned long nr_bits, alloc_size; 1003 DECLARE_BITMAP(bm, MAX_NUMNODES); 1004 1005 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 1006 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 1007 1008 if (nmask) 1009 nm = compat_alloc_user_space(alloc_size); 1010 1011 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags); 1012 1013 if (!err && nmask) { 1014 err = copy_from_user(bm, nm, alloc_size); 1015 /* ensure entire bitmap is zeroed */ 1016 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8); 1017 err |= compat_put_bitmap(nmask, bm, nr_bits); 1018 } 1019 1020 return err; 1021} 1022 1023asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask, 1024 compat_ulong_t maxnode) 1025{ 1026 long err = 0; 1027 unsigned long __user *nm = NULL; 1028 unsigned long nr_bits, alloc_size; 1029 DECLARE_BITMAP(bm, MAX_NUMNODES); 1030 1031 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 1032 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 1033 1034 if (nmask) { 1035 err = compat_get_bitmap(bm, nmask, nr_bits); 1036 nm = compat_alloc_user_space(alloc_size); 1037 err |= copy_to_user(nm, bm, alloc_size); 1038 } 1039 1040 if (err) 1041 return -EFAULT; 1042 1043 return sys_set_mempolicy(mode, nm, nr_bits+1); 1044} 1045 1046asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len, 1047 compat_ulong_t mode, compat_ulong_t __user *nmask, 1048 compat_ulong_t maxnode, compat_ulong_t flags) 1049{ 1050 long err = 0; 1051 unsigned long __user *nm = NULL; 1052 unsigned long nr_bits, alloc_size; 1053 nodemask_t bm; 1054 1055 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 1056 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 1057 1058 if (nmask) { 1059 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits); 1060 nm = compat_alloc_user_space(alloc_size); 1061 err |= copy_to_user(nm, nodes_addr(bm), alloc_size); 1062 } 1063 1064 if (err) 1065 return -EFAULT; 1066 1067 return sys_mbind(start, len, mode, nm, nr_bits+1, flags); 1068} 1069 1070#endif 1071 1072/* Return effective policy for a VMA */ 1073static struct mempolicy * get_vma_policy(struct task_struct *task, 1074 struct vm_area_struct *vma, unsigned long addr) 1075{ 1076 struct mempolicy *pol = task->mempolicy; 1077 1078 if (vma) { 1079 if (vma->vm_ops && vma->vm_ops->get_policy) 1080 pol = vma->vm_ops->get_policy(vma, addr); 1081 else if (vma->vm_policy && 1082 vma->vm_policy->policy != MPOL_DEFAULT) 1083 pol = vma->vm_policy; 1084 } 1085 if (!pol) 1086 pol = &default_policy; 1087 return pol; 1088} 1089 1090/* Return a zonelist representing a mempolicy */ 1091static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy) 1092{ 1093 int nd; 1094 1095 switch (policy->policy) { 1096 case MPOL_PREFERRED: 1097 nd = policy->v.preferred_node; 1098 if (nd < 0) 1099 nd = numa_node_id(); 1100 break; 1101 case MPOL_BIND: 1102 /* Lower zones don't get a policy applied */ 1103 /* Careful: current->mems_allowed might have moved */ 1104 if (gfp_zone(gfp) >= policy_zone) 1105 if (cpuset_zonelist_valid_mems_allowed(policy->v.zonelist)) 1106 return policy->v.zonelist; 1107 /*FALL THROUGH*/ 1108 case MPOL_INTERLEAVE: /* should not happen */ 1109 case MPOL_DEFAULT: 1110 nd = numa_node_id(); 1111 break; 1112 default: 1113 nd = 0; 1114 BUG(); 1115 } 1116 return NODE_DATA(nd)->node_zonelists + gfp_zone(gfp); 1117} 1118 1119/* Do dynamic interleaving for a process */ 1120static unsigned interleave_nodes(struct mempolicy *policy) 1121{ 1122 unsigned nid, next; 1123 struct task_struct *me = current; 1124 1125 nid = me->il_next; 1126 next = next_node(nid, policy->v.nodes); 1127 if (next >= MAX_NUMNODES) 1128 next = first_node(policy->v.nodes); 1129 me->il_next = next; 1130 return nid; 1131} 1132 1133/* 1134 * Depending on the memory policy provide a node from which to allocate the 1135 * next slab entry. 1136 */ 1137unsigned slab_node(struct mempolicy *policy) 1138{ 1139 int pol = policy ? policy->policy : MPOL_DEFAULT; 1140 1141 switch (pol) { 1142 case MPOL_INTERLEAVE: 1143 return interleave_nodes(policy); 1144 1145 case MPOL_BIND: 1146 /* 1147 * Follow bind policy behavior and start allocation at the 1148 * first node. 1149 */ 1150 return zone_to_nid(policy->v.zonelist->zones[0]); 1151 1152 case MPOL_PREFERRED: 1153 if (policy->v.preferred_node >= 0) 1154 return policy->v.preferred_node; 1155 /* Fall through */ 1156 1157 default: 1158 return numa_node_id(); 1159 } 1160} 1161 1162/* Do static interleaving for a VMA with known offset. */ 1163static unsigned offset_il_node(struct mempolicy *pol, 1164 struct vm_area_struct *vma, unsigned long off) 1165{ 1166 unsigned nnodes = nodes_weight(pol->v.nodes); 1167 unsigned target = (unsigned)off % nnodes; 1168 int c; 1169 int nid = -1; 1170 1171 c = 0; 1172 do { 1173 nid = next_node(nid, pol->v.nodes); 1174 c++; 1175 } while (c <= target); 1176 return nid; 1177} 1178 1179/* Determine a node number for interleave */ 1180static inline unsigned interleave_nid(struct mempolicy *pol, 1181 struct vm_area_struct *vma, unsigned long addr, int shift) 1182{ 1183 if (vma) { 1184 unsigned long off; 1185 1186 /* 1187 * for small pages, there is no difference between 1188 * shift and PAGE_SHIFT, so the bit-shift is safe. 1189 * for huge pages, since vm_pgoff is in units of small 1190 * pages, we need to shift off the always 0 bits to get 1191 * a useful offset. 1192 */ 1193 BUG_ON(shift < PAGE_SHIFT); 1194 off = vma->vm_pgoff >> (shift - PAGE_SHIFT); 1195 off += (addr - vma->vm_start) >> shift; 1196 return offset_il_node(pol, vma, off); 1197 } else 1198 return interleave_nodes(pol); 1199} 1200 1201#ifdef CONFIG_HUGETLBFS 1202/* Return a zonelist suitable for a huge page allocation. */ 1203struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr) 1204{ 1205 struct mempolicy *pol = get_vma_policy(current, vma, addr); 1206 1207 if (pol->policy == MPOL_INTERLEAVE) { 1208 unsigned nid; 1209 1210 nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT); 1211 return NODE_DATA(nid)->node_zonelists + gfp_zone(GFP_HIGHUSER); 1212 } 1213 return zonelist_policy(GFP_HIGHUSER, pol); 1214} 1215#endif 1216 1217/* Allocate a page in interleaved policy. 1218 Own path because it needs to do special accounting. */ 1219static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, 1220 unsigned nid) 1221{ 1222 struct zonelist *zl; 1223 struct page *page; 1224 1225 zl = NODE_DATA(nid)->node_zonelists + gfp_zone(gfp); 1226 page = __alloc_pages(gfp, order, zl); 1227 if (page && page_zone(page) == zl->zones[0]) 1228 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT); 1229 return page; 1230} 1231 1232/** 1233 * alloc_page_vma - Allocate a page for a VMA. 1234 * 1235 * @gfp: 1236 * %GFP_USER user allocation. 1237 * %GFP_KERNEL kernel allocations, 1238 * %GFP_HIGHMEM highmem/user allocations, 1239 * %GFP_FS allocation should not call back into a file system. 1240 * %GFP_ATOMIC don't sleep. 1241 * 1242 * @vma: Pointer to VMA or NULL if not available. 1243 * @addr: Virtual Address of the allocation. Must be inside the VMA. 1244 * 1245 * This function allocates a page from the kernel page pool and applies 1246 * a NUMA policy associated with the VMA or the current process. 1247 * When VMA is not NULL caller must hold down_read on the mmap_sem of the 1248 * mm_struct of the VMA to prevent it from going away. Should be used for 1249 * all allocations for pages that will be mapped into 1250 * user space. Returns NULL when no page can be allocated. 1251 * 1252 * Should be called with the mm_sem of the vma hold. 1253 */ 1254struct page * 1255alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr) 1256{ 1257 struct mempolicy *pol = get_vma_policy(current, vma, addr); 1258 1259 cpuset_update_task_memory_state(); 1260 1261 if (unlikely(pol->policy == MPOL_INTERLEAVE)) { 1262 unsigned nid; 1263 1264 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT); 1265 return alloc_page_interleave(gfp, 0, nid); 1266 } 1267 return __alloc_pages(gfp, 0, zonelist_policy(gfp, pol)); 1268} 1269 1270/** 1271 * alloc_pages_current - Allocate pages. 1272 * 1273 * @gfp: 1274 * %GFP_USER user allocation, 1275 * %GFP_KERNEL kernel allocation, 1276 * %GFP_HIGHMEM highmem allocation, 1277 * %GFP_FS don't call back into a file system. 1278 * %GFP_ATOMIC don't sleep. 1279 * @order: Power of two of allocation size in pages. 0 is a single page. 1280 * 1281 * Allocate a page from the kernel page pool. When not in 1282 * interrupt context and apply the current process NUMA policy. 1283 * Returns NULL when no page can be allocated. 1284 * 1285 * Don't call cpuset_update_task_memory_state() unless 1286 * 1) it's ok to take cpuset_sem (can WAIT), and 1287 * 2) allocating for current task (not interrupt). 1288 */ 1289struct page *alloc_pages_current(gfp_t gfp, unsigned order) 1290{ 1291 struct mempolicy *pol = current->mempolicy; 1292 1293 if ((gfp & __GFP_WAIT) && !in_interrupt()) 1294 cpuset_update_task_memory_state(); 1295 if (!pol || in_interrupt() || (gfp & __GFP_THISNODE)) 1296 pol = &default_policy; 1297 if (pol->policy == MPOL_INTERLEAVE) 1298 return alloc_page_interleave(gfp, order, interleave_nodes(pol)); 1299 return __alloc_pages(gfp, order, zonelist_policy(gfp, pol)); 1300} 1301EXPORT_SYMBOL(alloc_pages_current); 1302 1303/* 1304 * If mpol_copy() sees current->cpuset == cpuset_being_rebound, then it 1305 * rebinds the mempolicy its copying by calling mpol_rebind_policy() 1306 * with the mems_allowed returned by cpuset_mems_allowed(). This 1307 * keeps mempolicies cpuset relative after its cpuset moves. See 1308 * further kernel/cpuset.c update_nodemask(). 1309 */ 1310void *cpuset_being_rebound; 1311 1312/* Slow path of a mempolicy copy */ 1313struct mempolicy *__mpol_copy(struct mempolicy *old) 1314{ 1315 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 1316 1317 if (!new) 1318 return ERR_PTR(-ENOMEM); 1319 if (current_cpuset_is_being_rebound()) { 1320 nodemask_t mems = cpuset_mems_allowed(current); 1321 mpol_rebind_policy(old, &mems); 1322 } 1323 *new = *old; 1324 atomic_set(&new->refcnt, 1); 1325 if (new->policy == MPOL_BIND) { 1326 int sz = ksize(old->v.zonelist); 1327 new->v.zonelist = kmemdup(old->v.zonelist, sz, SLAB_KERNEL); 1328 if (!new->v.zonelist) { 1329 kmem_cache_free(policy_cache, new); 1330 return ERR_PTR(-ENOMEM); 1331 } 1332 } 1333 return new; 1334} 1335 1336/* Slow path of a mempolicy comparison */ 1337int __mpol_equal(struct mempolicy *a, struct mempolicy *b) 1338{ 1339 if (!a || !b) 1340 return 0; 1341 if (a->policy != b->policy) 1342 return 0; 1343 switch (a->policy) { 1344 case MPOL_DEFAULT: 1345 return 1; 1346 case MPOL_INTERLEAVE: 1347 return nodes_equal(a->v.nodes, b->v.nodes); 1348 case MPOL_PREFERRED: 1349 return a->v.preferred_node == b->v.preferred_node; 1350 case MPOL_BIND: { 1351 int i; 1352 for (i = 0; a->v.zonelist->zones[i]; i++) 1353 if (a->v.zonelist->zones[i] != b->v.zonelist->zones[i]) 1354 return 0; 1355 return b->v.zonelist->zones[i] == NULL; 1356 } 1357 default: 1358 BUG(); 1359 return 0; 1360 } 1361} 1362 1363/* Slow path of a mpol destructor. */ 1364void __mpol_free(struct mempolicy *p) 1365{ 1366 if (!atomic_dec_and_test(&p->refcnt)) 1367 return; 1368 if (p->policy == MPOL_BIND) 1369 kfree(p->v.zonelist); 1370 p->policy = MPOL_DEFAULT; 1371 kmem_cache_free(policy_cache, p); 1372} 1373 1374/* 1375 * Shared memory backing store policy support. 1376 * 1377 * Remember policies even when nobody has shared memory mapped. 1378 * The policies are kept in Red-Black tree linked from the inode. 1379 * They are protected by the sp->lock spinlock, which should be held 1380 * for any accesses to the tree. 1381 */ 1382 1383/* lookup first element intersecting start-end */ 1384/* Caller holds sp->lock */ 1385static struct sp_node * 1386sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) 1387{ 1388 struct rb_node *n = sp->root.rb_node; 1389 1390 while (n) { 1391 struct sp_node *p = rb_entry(n, struct sp_node, nd); 1392 1393 if (start >= p->end) 1394 n = n->rb_right; 1395 else if (end <= p->start) 1396 n = n->rb_left; 1397 else 1398 break; 1399 } 1400 if (!n) 1401 return NULL; 1402 for (;;) { 1403 struct sp_node *w = NULL; 1404 struct rb_node *prev = rb_prev(n); 1405 if (!prev) 1406 break; 1407 w = rb_entry(prev, struct sp_node, nd); 1408 if (w->end <= start) 1409 break; 1410 n = prev; 1411 } 1412 return rb_entry(n, struct sp_node, nd); 1413} 1414 1415/* Insert a new shared policy into the list. */ 1416/* Caller holds sp->lock */ 1417static void sp_insert(struct shared_policy *sp, struct sp_node *new) 1418{ 1419 struct rb_node **p = &sp->root.rb_node; 1420 struct rb_node *parent = NULL; 1421 struct sp_node *nd; 1422 1423 while (*p) { 1424 parent = *p; 1425 nd = rb_entry(parent, struct sp_node, nd); 1426 if (new->start < nd->start) 1427 p = &(*p)->rb_left; 1428 else if (new->end > nd->end) 1429 p = &(*p)->rb_right; 1430 else 1431 BUG(); 1432 } 1433 rb_link_node(&new->nd, parent, p); 1434 rb_insert_color(&new->nd, &sp->root); 1435 PDprintk("inserting %lx-%lx: %d\n", new->start, new->end, 1436 new->policy ? new->policy->policy : 0); 1437} 1438 1439/* Find shared policy intersecting idx */ 1440struct mempolicy * 1441mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) 1442{ 1443 struct mempolicy *pol = NULL; 1444 struct sp_node *sn; 1445 1446 if (!sp->root.rb_node) 1447 return NULL; 1448 spin_lock(&sp->lock); 1449 sn = sp_lookup(sp, idx, idx+1); 1450 if (sn) { 1451 mpol_get(sn->policy); 1452 pol = sn->policy; 1453 } 1454 spin_unlock(&sp->lock); 1455 return pol; 1456} 1457 1458static void sp_delete(struct shared_policy *sp, struct sp_node *n) 1459{ 1460 PDprintk("deleting %lx-l%x\n", n->start, n->end); 1461 rb_erase(&n->nd, &sp->root); 1462 mpol_free(n->policy); 1463 kmem_cache_free(sn_cache, n); 1464} 1465 1466struct sp_node * 1467sp_alloc(unsigned long start, unsigned long end, struct mempolicy *pol) 1468{ 1469 struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL); 1470 1471 if (!n) 1472 return NULL; 1473 n->start = start; 1474 n->end = end; 1475 mpol_get(pol); 1476 n->policy = pol; 1477 return n; 1478} 1479 1480/* Replace a policy range. */ 1481static int shared_policy_replace(struct shared_policy *sp, unsigned long start, 1482 unsigned long end, struct sp_node *new) 1483{ 1484 struct sp_node *n, *new2 = NULL; 1485 1486restart: 1487 spin_lock(&sp->lock); 1488 n = sp_lookup(sp, start, end); 1489 /* Take care of old policies in the same range. */ 1490 while (n && n->start < end) { 1491 struct rb_node *next = rb_next(&n->nd); 1492 if (n->start >= start) { 1493 if (n->end <= end) 1494 sp_delete(sp, n); 1495 else 1496 n->start = end; 1497 } else { 1498 /* Old policy spanning whole new range. */ 1499 if (n->end > end) { 1500 if (!new2) { 1501 spin_unlock(&sp->lock); 1502 new2 = sp_alloc(end, n->end, n->policy); 1503 if (!new2) 1504 return -ENOMEM; 1505 goto restart; 1506 } 1507 n->end = start; 1508 sp_insert(sp, new2); 1509 new2 = NULL; 1510 break; 1511 } else 1512 n->end = start; 1513 } 1514 if (!next) 1515 break; 1516 n = rb_entry(next, struct sp_node, nd); 1517 } 1518 if (new) 1519 sp_insert(sp, new); 1520 spin_unlock(&sp->lock); 1521 if (new2) { 1522 mpol_free(new2->policy); 1523 kmem_cache_free(sn_cache, new2); 1524 } 1525 return 0; 1526} 1527 1528void mpol_shared_policy_init(struct shared_policy *info, int policy, 1529 nodemask_t *policy_nodes) 1530{ 1531 info->root = RB_ROOT; 1532 spin_lock_init(&info->lock); 1533 1534 if (policy != MPOL_DEFAULT) { 1535 struct mempolicy *newpol; 1536 1537 /* Falls back to MPOL_DEFAULT on any error */ 1538 newpol = mpol_new(policy, policy_nodes); 1539 if (!IS_ERR(newpol)) { 1540 /* Create pseudo-vma that contains just the policy */ 1541 struct vm_area_struct pvma; 1542 1543 memset(&pvma, 0, sizeof(struct vm_area_struct)); 1544 /* Policy covers entire file */ 1545 pvma.vm_end = TASK_SIZE; 1546 mpol_set_shared_policy(info, &pvma, newpol); 1547 mpol_free(newpol); 1548 } 1549 } 1550} 1551 1552int mpol_set_shared_policy(struct shared_policy *info, 1553 struct vm_area_struct *vma, struct mempolicy *npol) 1554{ 1555 int err; 1556 struct sp_node *new = NULL; 1557 unsigned long sz = vma_pages(vma); 1558 1559 PDprintk("set_shared_policy %lx sz %lu %d %lx\n", 1560 vma->vm_pgoff, 1561 sz, npol? npol->policy : -1, 1562 npol ? nodes_addr(npol->v.nodes)[0] : -1); 1563 1564 if (npol) { 1565 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); 1566 if (!new) 1567 return -ENOMEM; 1568 } 1569 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); 1570 if (err && new) 1571 kmem_cache_free(sn_cache, new); 1572 return err; 1573} 1574 1575/* Free a backing policy store on inode delete. */ 1576void mpol_free_shared_policy(struct shared_policy *p) 1577{ 1578 struct sp_node *n; 1579 struct rb_node *next; 1580 1581 if (!p->root.rb_node) 1582 return; 1583 spin_lock(&p->lock); 1584 next = rb_first(&p->root); 1585 while (next) { 1586 n = rb_entry(next, struct sp_node, nd); 1587 next = rb_next(&n->nd); 1588 rb_erase(&n->nd, &p->root); 1589 mpol_free(n->policy); 1590 kmem_cache_free(sn_cache, n); 1591 } 1592 spin_unlock(&p->lock); 1593} 1594 1595/* assumes fs == KERNEL_DS */ 1596void __init numa_policy_init(void) 1597{ 1598 policy_cache = kmem_cache_create("numa_policy", 1599 sizeof(struct mempolicy), 1600 0, SLAB_PANIC, NULL, NULL); 1601 1602 sn_cache = kmem_cache_create("shared_policy_node", 1603 sizeof(struct sp_node), 1604 0, SLAB_PANIC, NULL, NULL); 1605 1606 /* Set interleaving policy for system init. This way not all 1607 the data structures allocated at system boot end up in node zero. */ 1608 1609 if (do_set_mempolicy(MPOL_INTERLEAVE, &node_online_map)) 1610 printk("numa_policy_init: interleaving failed\n"); 1611} 1612 1613/* Reset policy of current process to default */ 1614void numa_default_policy(void) 1615{ 1616 do_set_mempolicy(MPOL_DEFAULT, NULL); 1617} 1618 1619/* Migrate a policy to a different set of nodes */ 1620void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask) 1621{ 1622 nodemask_t *mpolmask; 1623 nodemask_t tmp; 1624 1625 if (!pol) 1626 return; 1627 mpolmask = &pol->cpuset_mems_allowed; 1628 if (nodes_equal(*mpolmask, *newmask)) 1629 return; 1630 1631 switch (pol->policy) { 1632 case MPOL_DEFAULT: 1633 break; 1634 case MPOL_INTERLEAVE: 1635 nodes_remap(tmp, pol->v.nodes, *mpolmask, *newmask); 1636 pol->v.nodes = tmp; 1637 *mpolmask = *newmask; 1638 current->il_next = node_remap(current->il_next, 1639 *mpolmask, *newmask); 1640 break; 1641 case MPOL_PREFERRED: 1642 pol->v.preferred_node = node_remap(pol->v.preferred_node, 1643 *mpolmask, *newmask); 1644 *mpolmask = *newmask; 1645 break; 1646 case MPOL_BIND: { 1647 nodemask_t nodes; 1648 struct zone **z; 1649 struct zonelist *zonelist; 1650 1651 nodes_clear(nodes); 1652 for (z = pol->v.zonelist->zones; *z; z++) 1653 node_set(zone_to_nid(*z), nodes); 1654 nodes_remap(tmp, nodes, *mpolmask, *newmask); 1655 nodes = tmp; 1656 1657 zonelist = bind_zonelist(&nodes); 1658 1659 /* If no mem, then zonelist is NULL and we keep old zonelist. 1660 * If that old zonelist has no remaining mems_allowed nodes, 1661 * then zonelist_policy() will "FALL THROUGH" to MPOL_DEFAULT. 1662 */ 1663 1664 if (zonelist) { 1665 /* Good - got mem - substitute new zonelist */ 1666 kfree(pol->v.zonelist); 1667 pol->v.zonelist = zonelist; 1668 } 1669 *mpolmask = *newmask; 1670 break; 1671 } 1672 default: 1673 BUG(); 1674 break; 1675 } 1676} 1677 1678/* 1679 * Wrapper for mpol_rebind_policy() that just requires task 1680 * pointer, and updates task mempolicy. 1681 */ 1682 1683void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new) 1684{ 1685 mpol_rebind_policy(tsk->mempolicy, new); 1686} 1687 1688/* 1689 * Rebind each vma in mm to new nodemask. 1690 * 1691 * Call holding a reference to mm. Takes mm->mmap_sem during call. 1692 */ 1693 1694void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) 1695{ 1696 struct vm_area_struct *vma; 1697 1698 down_write(&mm->mmap_sem); 1699 for (vma = mm->mmap; vma; vma = vma->vm_next) 1700 mpol_rebind_policy(vma->vm_policy, new); 1701 up_write(&mm->mmap_sem); 1702} 1703 1704/* 1705 * Display pages allocated per node and memory policy via /proc. 1706 */ 1707 1708static const char *policy_types[] = { "default", "prefer", "bind", 1709 "interleave" }; 1710 1711/* 1712 * Convert a mempolicy into a string. 1713 * Returns the number of characters in buffer (if positive) 1714 * or an error (negative) 1715 */ 1716static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) 1717{ 1718 char *p = buffer; 1719 int l; 1720 nodemask_t nodes; 1721 int mode = pol ? pol->policy : MPOL_DEFAULT; 1722 1723 switch (mode) { 1724 case MPOL_DEFAULT: 1725 nodes_clear(nodes); 1726 break; 1727 1728 case MPOL_PREFERRED: 1729 nodes_clear(nodes); 1730 node_set(pol->v.preferred_node, nodes); 1731 break; 1732 1733 case MPOL_BIND: 1734 get_zonemask(pol, &nodes); 1735 break; 1736 1737 case MPOL_INTERLEAVE: 1738 nodes = pol->v.nodes; 1739 break; 1740 1741 default: 1742 BUG(); 1743 return -EFAULT; 1744 } 1745 1746 l = strlen(policy_types[mode]); 1747 if (buffer + maxlen < p + l + 1) 1748 return -ENOSPC; 1749 1750 strcpy(p, policy_types[mode]); 1751 p += l; 1752 1753 if (!nodes_empty(nodes)) { 1754 if (buffer + maxlen < p + 2) 1755 return -ENOSPC; 1756 *p++ = '='; 1757 p += nodelist_scnprintf(p, buffer + maxlen - p, nodes); 1758 } 1759 return p - buffer; 1760} 1761 1762struct numa_maps { 1763 unsigned long pages; 1764 unsigned long anon; 1765 unsigned long active; 1766 unsigned long writeback; 1767 unsigned long mapcount_max; 1768 unsigned long dirty; 1769 unsigned long swapcache; 1770 unsigned long node[MAX_NUMNODES]; 1771}; 1772 1773static void gather_stats(struct page *page, void *private, int pte_dirty) 1774{ 1775 struct numa_maps *md = private; 1776 int count = page_mapcount(page); 1777 1778 md->pages++; 1779 if (pte_dirty || PageDirty(page)) 1780 md->dirty++; 1781 1782 if (PageSwapCache(page)) 1783 md->swapcache++; 1784 1785 if (PageActive(page)) 1786 md->active++; 1787 1788 if (PageWriteback(page)) 1789 md->writeback++; 1790 1791 if (PageAnon(page)) 1792 md->anon++; 1793 1794 if (count > md->mapcount_max) 1795 md->mapcount_max = count; 1796 1797 md->node[page_to_nid(page)]++; 1798} 1799 1800#ifdef CONFIG_HUGETLB_PAGE 1801static void check_huge_range(struct vm_area_struct *vma, 1802 unsigned long start, unsigned long end, 1803 struct numa_maps *md) 1804{ 1805 unsigned long addr; 1806 struct page *page; 1807 1808 for (addr = start; addr < end; addr += HPAGE_SIZE) { 1809 pte_t *ptep = huge_pte_offset(vma->vm_mm, addr & HPAGE_MASK); 1810 pte_t pte; 1811 1812 if (!ptep) 1813 continue; 1814 1815 pte = *ptep; 1816 if (pte_none(pte)) 1817 continue; 1818 1819 page = pte_page(pte); 1820 if (!page) 1821 continue; 1822 1823 gather_stats(page, md, pte_dirty(*ptep)); 1824 } 1825} 1826#else 1827static inline void check_huge_range(struct vm_area_struct *vma, 1828 unsigned long start, unsigned long end, 1829 struct numa_maps *md) 1830{ 1831} 1832#endif 1833 1834int show_numa_map(struct seq_file *m, void *v) 1835{ 1836 struct proc_maps_private *priv = m->private; 1837 struct vm_area_struct *vma = v; 1838 struct numa_maps *md; 1839 struct file *file = vma->vm_file; 1840 struct mm_struct *mm = vma->vm_mm; 1841 int n; 1842 char buffer[50]; 1843 1844 if (!mm) 1845 return 0; 1846 1847 md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL); 1848 if (!md) 1849 return 0; 1850 1851 mpol_to_str(buffer, sizeof(buffer), 1852 get_vma_policy(priv->task, vma, vma->vm_start)); 1853 1854 seq_printf(m, "%08lx %s", vma->vm_start, buffer); 1855 1856 if (file) { 1857 seq_printf(m, " file="); 1858 seq_path(m, file->f_vfsmnt, file->f_dentry, "\n\t= "); 1859 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { 1860 seq_printf(m, " heap"); 1861 } else if (vma->vm_start <= mm->start_stack && 1862 vma->vm_end >= mm->start_stack) { 1863 seq_printf(m, " stack"); 1864 } 1865 1866 if (is_vm_hugetlb_page(vma)) { 1867 check_huge_range(vma, vma->vm_start, vma->vm_end, md); 1868 seq_printf(m, " huge"); 1869 } else { 1870 check_pgd_range(vma, vma->vm_start, vma->vm_end, 1871 &node_online_map, MPOL_MF_STATS, md); 1872 } 1873 1874 if (!md->pages) 1875 goto out; 1876 1877 if (md->anon) 1878 seq_printf(m," anon=%lu",md->anon); 1879 1880 if (md->dirty) 1881 seq_printf(m," dirty=%lu",md->dirty); 1882 1883 if (md->pages != md->anon && md->pages != md->dirty) 1884 seq_printf(m, " mapped=%lu", md->pages); 1885 1886 if (md->mapcount_max > 1) 1887 seq_printf(m, " mapmax=%lu", md->mapcount_max); 1888 1889 if (md->swapcache) 1890 seq_printf(m," swapcache=%lu", md->swapcache); 1891 1892 if (md->active < md->pages && !is_vm_hugetlb_page(vma)) 1893 seq_printf(m," active=%lu", md->active); 1894 1895 if (md->writeback) 1896 seq_printf(m," writeback=%lu", md->writeback); 1897 1898 for_each_online_node(n) 1899 if (md->node[n]) 1900 seq_printf(m, " N%d=%lu", n, md->node[n]); 1901out: 1902 seq_putc(m, '\n'); 1903 kfree(md); 1904 1905 if (m->count < m->size) 1906 m->version = (vma != priv->tail_vma) ? vma->vm_start : 0; 1907 return 0; 1908} 1909 1910