slub.c revision dcc3be6a548a1e51adaab3be6d9dfbb68bc0e3a0
1/* 2 * SLUB: A slab allocator that limits cache line use instead of queuing 3 * objects in per cpu and per node lists. 4 * 5 * The allocator synchronizes using per slab locks or atomic operatios 6 * and only uses a centralized lock to manage a pool of partial slabs. 7 * 8 * (C) 2007 SGI, Christoph Lameter 9 * (C) 2011 Linux Foundation, Christoph Lameter 10 */ 11 12#include <linux/mm.h> 13#include <linux/swap.h> /* struct reclaim_state */ 14#include <linux/module.h> 15#include <linux/bit_spinlock.h> 16#include <linux/interrupt.h> 17#include <linux/bitops.h> 18#include <linux/slab.h> 19#include <linux/proc_fs.h> 20#include <linux/seq_file.h> 21#include <linux/kmemcheck.h> 22#include <linux/cpu.h> 23#include <linux/cpuset.h> 24#include <linux/mempolicy.h> 25#include <linux/ctype.h> 26#include <linux/debugobjects.h> 27#include <linux/kallsyms.h> 28#include <linux/memory.h> 29#include <linux/math64.h> 30#include <linux/fault-inject.h> 31#include <linux/stacktrace.h> 32 33#include <trace/events/kmem.h> 34 35/* 36 * Lock order: 37 * 1. slub_lock (Global Semaphore) 38 * 2. node->list_lock 39 * 3. slab_lock(page) (Only on some arches and for debugging) 40 * 41 * slub_lock 42 * 43 * The role of the slub_lock is to protect the list of all the slabs 44 * and to synchronize major metadata changes to slab cache structures. 45 * 46 * The slab_lock is only used for debugging and on arches that do not 47 * have the ability to do a cmpxchg_double. It only protects the second 48 * double word in the page struct. Meaning 49 * A. page->freelist -> List of object free in a page 50 * B. page->counters -> Counters of objects 51 * C. page->frozen -> frozen state 52 * 53 * If a slab is frozen then it is exempt from list management. It is not 54 * on any list. The processor that froze the slab is the one who can 55 * perform list operations on the page. Other processors may put objects 56 * onto the freelist but the processor that froze the slab is the only 57 * one that can retrieve the objects from the page's freelist. 58 * 59 * The list_lock protects the partial and full list on each node and 60 * the partial slab counter. If taken then no new slabs may be added or 61 * removed from the lists nor make the number of partial slabs be modified. 62 * (Note that the total number of slabs is an atomic value that may be 63 * modified without taking the list lock). 64 * 65 * The list_lock is a centralized lock and thus we avoid taking it as 66 * much as possible. As long as SLUB does not have to handle partial 67 * slabs, operations can continue without any centralized lock. F.e. 68 * allocating a long series of objects that fill up slabs does not require 69 * the list lock. 70 * Interrupts are disabled during allocation and deallocation in order to 71 * make the slab allocator safe to use in the context of an irq. In addition 72 * interrupts are disabled to ensure that the processor does not change 73 * while handling per_cpu slabs, due to kernel preemption. 74 * 75 * SLUB assigns one slab for allocation to each processor. 76 * Allocations only occur from these slabs called cpu slabs. 77 * 78 * Slabs with free elements are kept on a partial list and during regular 79 * operations no list for full slabs is used. If an object in a full slab is 80 * freed then the slab will show up again on the partial lists. 81 * We track full slabs for debugging purposes though because otherwise we 82 * cannot scan all objects. 83 * 84 * Slabs are freed when they become empty. Teardown and setup is 85 * minimal so we rely on the page allocators per cpu caches for 86 * fast frees and allocs. 87 * 88 * Overloading of page flags that are otherwise used for LRU management. 89 * 90 * PageActive The slab is frozen and exempt from list processing. 91 * This means that the slab is dedicated to a purpose 92 * such as satisfying allocations for a specific 93 * processor. Objects may be freed in the slab while 94 * it is frozen but slab_free will then skip the usual 95 * list operations. It is up to the processor holding 96 * the slab to integrate the slab into the slab lists 97 * when the slab is no longer needed. 98 * 99 * One use of this flag is to mark slabs that are 100 * used for allocations. Then such a slab becomes a cpu 101 * slab. The cpu slab may be equipped with an additional 102 * freelist that allows lockless access to 103 * free objects in addition to the regular freelist 104 * that requires the slab lock. 105 * 106 * PageError Slab requires special handling due to debug 107 * options set. This moves slab handling out of 108 * the fast path and disables lockless freelists. 109 */ 110 111#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ 112 SLAB_TRACE | SLAB_DEBUG_FREE) 113 114static inline int kmem_cache_debug(struct kmem_cache *s) 115{ 116#ifdef CONFIG_SLUB_DEBUG 117 return unlikely(s->flags & SLAB_DEBUG_FLAGS); 118#else 119 return 0; 120#endif 121} 122 123/* 124 * Issues still to be resolved: 125 * 126 * - Support PAGE_ALLOC_DEBUG. Should be easy to do. 127 * 128 * - Variable sizing of the per node arrays 129 */ 130 131/* Enable to test recovery from slab corruption on boot */ 132#undef SLUB_RESILIENCY_TEST 133 134/* Enable to log cmpxchg failures */ 135#undef SLUB_DEBUG_CMPXCHG 136 137/* 138 * Mininum number of partial slabs. These will be left on the partial 139 * lists even if they are empty. kmem_cache_shrink may reclaim them. 140 */ 141#define MIN_PARTIAL 5 142 143/* 144 * Maximum number of desirable partial slabs. 145 * The existence of more partial slabs makes kmem_cache_shrink 146 * sort the partial list by the number of objects in the. 147 */ 148#define MAX_PARTIAL 10 149 150#define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \ 151 SLAB_POISON | SLAB_STORE_USER) 152 153/* 154 * Debugging flags that require metadata to be stored in the slab. These get 155 * disabled when slub_debug=O is used and a cache's min order increases with 156 * metadata. 157 */ 158#define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) 159 160/* 161 * Set of flags that will prevent slab merging 162 */ 163#define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ 164 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \ 165 SLAB_FAILSLAB) 166 167#define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ 168 SLAB_CACHE_DMA | SLAB_NOTRACK) 169 170#define OO_SHIFT 16 171#define OO_MASK ((1 << OO_SHIFT) - 1) 172#define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */ 173 174/* Internal SLUB flags */ 175#define __OBJECT_POISON 0x80000000UL /* Poison object */ 176#define __CMPXCHG_DOUBLE 0x40000000UL /* Use cmpxchg_double */ 177 178static int kmem_size = sizeof(struct kmem_cache); 179 180#ifdef CONFIG_SMP 181static struct notifier_block slab_notifier; 182#endif 183 184static enum { 185 DOWN, /* No slab functionality available */ 186 PARTIAL, /* Kmem_cache_node works */ 187 UP, /* Everything works but does not show up in sysfs */ 188 SYSFS /* Sysfs up */ 189} slab_state = DOWN; 190 191/* A list of all slab caches on the system */ 192static DECLARE_RWSEM(slub_lock); 193static LIST_HEAD(slab_caches); 194 195/* 196 * Tracking user of a slab. 197 */ 198#define TRACK_ADDRS_COUNT 16 199struct track { 200 unsigned long addr; /* Called from address */ 201#ifdef CONFIG_STACKTRACE 202 unsigned long addrs[TRACK_ADDRS_COUNT]; /* Called from address */ 203#endif 204 int cpu; /* Was running on cpu */ 205 int pid; /* Pid context */ 206 unsigned long when; /* When did the operation occur */ 207}; 208 209enum track_item { TRACK_ALLOC, TRACK_FREE }; 210 211#ifdef CONFIG_SYSFS 212static int sysfs_slab_add(struct kmem_cache *); 213static int sysfs_slab_alias(struct kmem_cache *, const char *); 214static void sysfs_slab_remove(struct kmem_cache *); 215 216#else 217static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } 218static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) 219 { return 0; } 220static inline void sysfs_slab_remove(struct kmem_cache *s) 221{ 222 kfree(s->name); 223 kfree(s); 224} 225 226#endif 227 228static inline void stat(const struct kmem_cache *s, enum stat_item si) 229{ 230#ifdef CONFIG_SLUB_STATS 231 __this_cpu_inc(s->cpu_slab->stat[si]); 232#endif 233} 234 235/******************************************************************** 236 * Core slab cache functions 237 *******************************************************************/ 238 239int slab_is_available(void) 240{ 241 return slab_state >= UP; 242} 243 244static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) 245{ 246 return s->node[node]; 247} 248 249/* Verify that a pointer has an address that is valid within a slab page */ 250static inline int check_valid_pointer(struct kmem_cache *s, 251 struct page *page, const void *object) 252{ 253 void *base; 254 255 if (!object) 256 return 1; 257 258 base = page_address(page); 259 if (object < base || object >= base + page->objects * s->size || 260 (object - base) % s->size) { 261 return 0; 262 } 263 264 return 1; 265} 266 267static inline void *get_freepointer(struct kmem_cache *s, void *object) 268{ 269 return *(void **)(object + s->offset); 270} 271 272static inline void *get_freepointer_safe(struct kmem_cache *s, void *object) 273{ 274 void *p; 275 276#ifdef CONFIG_DEBUG_PAGEALLOC 277 probe_kernel_read(&p, (void **)(object + s->offset), sizeof(p)); 278#else 279 p = get_freepointer(s, object); 280#endif 281 return p; 282} 283 284static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) 285{ 286 *(void **)(object + s->offset) = fp; 287} 288 289/* Loop over all objects in a slab */ 290#define for_each_object(__p, __s, __addr, __objects) \ 291 for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\ 292 __p += (__s)->size) 293 294/* Determine object index from a given position */ 295static inline int slab_index(void *p, struct kmem_cache *s, void *addr) 296{ 297 return (p - addr) / s->size; 298} 299 300static inline size_t slab_ksize(const struct kmem_cache *s) 301{ 302#ifdef CONFIG_SLUB_DEBUG 303 /* 304 * Debugging requires use of the padding between object 305 * and whatever may come after it. 306 */ 307 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) 308 return s->objsize; 309 310#endif 311 /* 312 * If we have the need to store the freelist pointer 313 * back there or track user information then we can 314 * only use the space before that information. 315 */ 316 if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER)) 317 return s->inuse; 318 /* 319 * Else we can use all the padding etc for the allocation 320 */ 321 return s->size; 322} 323 324static inline int order_objects(int order, unsigned long size, int reserved) 325{ 326 return ((PAGE_SIZE << order) - reserved) / size; 327} 328 329static inline struct kmem_cache_order_objects oo_make(int order, 330 unsigned long size, int reserved) 331{ 332 struct kmem_cache_order_objects x = { 333 (order << OO_SHIFT) + order_objects(order, size, reserved) 334 }; 335 336 return x; 337} 338 339static inline int oo_order(struct kmem_cache_order_objects x) 340{ 341 return x.x >> OO_SHIFT; 342} 343 344static inline int oo_objects(struct kmem_cache_order_objects x) 345{ 346 return x.x & OO_MASK; 347} 348 349/* 350 * Per slab locking using the pagelock 351 */ 352static __always_inline void slab_lock(struct page *page) 353{ 354 bit_spin_lock(PG_locked, &page->flags); 355} 356 357static __always_inline void slab_unlock(struct page *page) 358{ 359 __bit_spin_unlock(PG_locked, &page->flags); 360} 361 362/* Interrupts must be disabled (for the fallback code to work right) */ 363static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page, 364 void *freelist_old, unsigned long counters_old, 365 void *freelist_new, unsigned long counters_new, 366 const char *n) 367{ 368 VM_BUG_ON(!irqs_disabled()); 369#ifdef CONFIG_CMPXCHG_DOUBLE 370 if (s->flags & __CMPXCHG_DOUBLE) { 371 if (cmpxchg_double(&page->freelist, 372 freelist_old, counters_old, 373 freelist_new, counters_new)) 374 return 1; 375 } else 376#endif 377 { 378 slab_lock(page); 379 if (page->freelist == freelist_old && page->counters == counters_old) { 380 page->freelist = freelist_new; 381 page->counters = counters_new; 382 slab_unlock(page); 383 return 1; 384 } 385 slab_unlock(page); 386 } 387 388 cpu_relax(); 389 stat(s, CMPXCHG_DOUBLE_FAIL); 390 391#ifdef SLUB_DEBUG_CMPXCHG 392 printk(KERN_INFO "%s %s: cmpxchg double redo ", n, s->name); 393#endif 394 395 return 0; 396} 397 398static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page, 399 void *freelist_old, unsigned long counters_old, 400 void *freelist_new, unsigned long counters_new, 401 const char *n) 402{ 403#ifdef CONFIG_CMPXCHG_DOUBLE 404 if (s->flags & __CMPXCHG_DOUBLE) { 405 if (cmpxchg_double(&page->freelist, 406 freelist_old, counters_old, 407 freelist_new, counters_new)) 408 return 1; 409 } else 410#endif 411 { 412 unsigned long flags; 413 414 local_irq_save(flags); 415 slab_lock(page); 416 if (page->freelist == freelist_old && page->counters == counters_old) { 417 page->freelist = freelist_new; 418 page->counters = counters_new; 419 slab_unlock(page); 420 local_irq_restore(flags); 421 return 1; 422 } 423 slab_unlock(page); 424 local_irq_restore(flags); 425 } 426 427 cpu_relax(); 428 stat(s, CMPXCHG_DOUBLE_FAIL); 429 430#ifdef SLUB_DEBUG_CMPXCHG 431 printk(KERN_INFO "%s %s: cmpxchg double redo ", n, s->name); 432#endif 433 434 return 0; 435} 436 437#ifdef CONFIG_SLUB_DEBUG 438/* 439 * Determine a map of object in use on a page. 440 * 441 * Node listlock must be held to guarantee that the page does 442 * not vanish from under us. 443 */ 444static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map) 445{ 446 void *p; 447 void *addr = page_address(page); 448 449 for (p = page->freelist; p; p = get_freepointer(s, p)) 450 set_bit(slab_index(p, s, addr), map); 451} 452 453/* 454 * Debug settings: 455 */ 456#ifdef CONFIG_SLUB_DEBUG_ON 457static int slub_debug = DEBUG_DEFAULT_FLAGS; 458#else 459static int slub_debug; 460#endif 461 462static char *slub_debug_slabs; 463static int disable_higher_order_debug; 464 465/* 466 * Object debugging 467 */ 468static void print_section(char *text, u8 *addr, unsigned int length) 469{ 470 int i, offset; 471 int newline = 1; 472 char ascii[17]; 473 474 ascii[16] = 0; 475 476 for (i = 0; i < length; i++) { 477 if (newline) { 478 printk(KERN_ERR "%8s 0x%p: ", text, addr + i); 479 newline = 0; 480 } 481 printk(KERN_CONT " %02x", addr[i]); 482 offset = i % 16; 483 ascii[offset] = isgraph(addr[i]) ? addr[i] : '.'; 484 if (offset == 15) { 485 printk(KERN_CONT " %s\n", ascii); 486 newline = 1; 487 } 488 } 489 if (!newline) { 490 i %= 16; 491 while (i < 16) { 492 printk(KERN_CONT " "); 493 ascii[i] = ' '; 494 i++; 495 } 496 printk(KERN_CONT " %s\n", ascii); 497 } 498} 499 500static struct track *get_track(struct kmem_cache *s, void *object, 501 enum track_item alloc) 502{ 503 struct track *p; 504 505 if (s->offset) 506 p = object + s->offset + sizeof(void *); 507 else 508 p = object + s->inuse; 509 510 return p + alloc; 511} 512 513static void set_track(struct kmem_cache *s, void *object, 514 enum track_item alloc, unsigned long addr) 515{ 516 struct track *p = get_track(s, object, alloc); 517 518 if (addr) { 519#ifdef CONFIG_STACKTRACE 520 struct stack_trace trace; 521 int i; 522 523 trace.nr_entries = 0; 524 trace.max_entries = TRACK_ADDRS_COUNT; 525 trace.entries = p->addrs; 526 trace.skip = 3; 527 save_stack_trace(&trace); 528 529 /* See rant in lockdep.c */ 530 if (trace.nr_entries != 0 && 531 trace.entries[trace.nr_entries - 1] == ULONG_MAX) 532 trace.nr_entries--; 533 534 for (i = trace.nr_entries; i < TRACK_ADDRS_COUNT; i++) 535 p->addrs[i] = 0; 536#endif 537 p->addr = addr; 538 p->cpu = smp_processor_id(); 539 p->pid = current->pid; 540 p->when = jiffies; 541 } else 542 memset(p, 0, sizeof(struct track)); 543} 544 545static void init_tracking(struct kmem_cache *s, void *object) 546{ 547 if (!(s->flags & SLAB_STORE_USER)) 548 return; 549 550 set_track(s, object, TRACK_FREE, 0UL); 551 set_track(s, object, TRACK_ALLOC, 0UL); 552} 553 554static void print_track(const char *s, struct track *t) 555{ 556 if (!t->addr) 557 return; 558 559 printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n", 560 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid); 561#ifdef CONFIG_STACKTRACE 562 { 563 int i; 564 for (i = 0; i < TRACK_ADDRS_COUNT; i++) 565 if (t->addrs[i]) 566 printk(KERN_ERR "\t%pS\n", (void *)t->addrs[i]); 567 else 568 break; 569 } 570#endif 571} 572 573static void print_tracking(struct kmem_cache *s, void *object) 574{ 575 if (!(s->flags & SLAB_STORE_USER)) 576 return; 577 578 print_track("Allocated", get_track(s, object, TRACK_ALLOC)); 579 print_track("Freed", get_track(s, object, TRACK_FREE)); 580} 581 582static void print_page_info(struct page *page) 583{ 584 printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n", 585 page, page->objects, page->inuse, page->freelist, page->flags); 586 587} 588 589static void slab_bug(struct kmem_cache *s, char *fmt, ...) 590{ 591 va_list args; 592 char buf[100]; 593 594 va_start(args, fmt); 595 vsnprintf(buf, sizeof(buf), fmt, args); 596 va_end(args); 597 printk(KERN_ERR "========================================" 598 "=====================================\n"); 599 printk(KERN_ERR "BUG %s: %s\n", s->name, buf); 600 printk(KERN_ERR "----------------------------------------" 601 "-------------------------------------\n\n"); 602} 603 604static void slab_fix(struct kmem_cache *s, char *fmt, ...) 605{ 606 va_list args; 607 char buf[100]; 608 609 va_start(args, fmt); 610 vsnprintf(buf, sizeof(buf), fmt, args); 611 va_end(args); 612 printk(KERN_ERR "FIX %s: %s\n", s->name, buf); 613} 614 615static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) 616{ 617 unsigned int off; /* Offset of last byte */ 618 u8 *addr = page_address(page); 619 620 print_tracking(s, p); 621 622 print_page_info(page); 623 624 printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p\n\n", 625 p, p - addr, get_freepointer(s, p)); 626 627 if (p > addr + 16) 628 print_section("Bytes b4", p - 16, 16); 629 630 print_section("Object", p, min_t(unsigned long, s->objsize, PAGE_SIZE)); 631 632 if (s->flags & SLAB_RED_ZONE) 633 print_section("Redzone", p + s->objsize, 634 s->inuse - s->objsize); 635 636 if (s->offset) 637 off = s->offset + sizeof(void *); 638 else 639 off = s->inuse; 640 641 if (s->flags & SLAB_STORE_USER) 642 off += 2 * sizeof(struct track); 643 644 if (off != s->size) 645 /* Beginning of the filler is the free pointer */ 646 print_section("Padding", p + off, s->size - off); 647 648 dump_stack(); 649} 650 651static void object_err(struct kmem_cache *s, struct page *page, 652 u8 *object, char *reason) 653{ 654 slab_bug(s, "%s", reason); 655 print_trailer(s, page, object); 656} 657 658static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...) 659{ 660 va_list args; 661 char buf[100]; 662 663 va_start(args, fmt); 664 vsnprintf(buf, sizeof(buf), fmt, args); 665 va_end(args); 666 slab_bug(s, "%s", buf); 667 print_page_info(page); 668 dump_stack(); 669} 670 671static void init_object(struct kmem_cache *s, void *object, u8 val) 672{ 673 u8 *p = object; 674 675 if (s->flags & __OBJECT_POISON) { 676 memset(p, POISON_FREE, s->objsize - 1); 677 p[s->objsize - 1] = POISON_END; 678 } 679 680 if (s->flags & SLAB_RED_ZONE) 681 memset(p + s->objsize, val, s->inuse - s->objsize); 682} 683 684static u8 *check_bytes8(u8 *start, u8 value, unsigned int bytes) 685{ 686 while (bytes) { 687 if (*start != value) 688 return start; 689 start++; 690 bytes--; 691 } 692 return NULL; 693} 694 695static u8 *check_bytes(u8 *start, u8 value, unsigned int bytes) 696{ 697 u64 value64; 698 unsigned int words, prefix; 699 700 if (bytes <= 16) 701 return check_bytes8(start, value, bytes); 702 703 value64 = value | value << 8 | value << 16 | value << 24; 704 value64 = (value64 & 0xffffffff) | value64 << 32; 705 prefix = 8 - ((unsigned long)start) % 8; 706 707 if (prefix) { 708 u8 *r = check_bytes8(start, value, prefix); 709 if (r) 710 return r; 711 start += prefix; 712 bytes -= prefix; 713 } 714 715 words = bytes / 8; 716 717 while (words) { 718 if (*(u64 *)start != value64) 719 return check_bytes8(start, value, 8); 720 start += 8; 721 words--; 722 } 723 724 return check_bytes8(start, value, bytes % 8); 725} 726 727static void restore_bytes(struct kmem_cache *s, char *message, u8 data, 728 void *from, void *to) 729{ 730 slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data); 731 memset(from, data, to - from); 732} 733 734static int check_bytes_and_report(struct kmem_cache *s, struct page *page, 735 u8 *object, char *what, 736 u8 *start, unsigned int value, unsigned int bytes) 737{ 738 u8 *fault; 739 u8 *end; 740 741 fault = check_bytes(start, value, bytes); 742 if (!fault) 743 return 1; 744 745 end = start + bytes; 746 while (end > fault && end[-1] == value) 747 end--; 748 749 slab_bug(s, "%s overwritten", what); 750 printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n", 751 fault, end - 1, fault[0], value); 752 print_trailer(s, page, object); 753 754 restore_bytes(s, what, value, fault, end); 755 return 0; 756} 757 758/* 759 * Object layout: 760 * 761 * object address 762 * Bytes of the object to be managed. 763 * If the freepointer may overlay the object then the free 764 * pointer is the first word of the object. 765 * 766 * Poisoning uses 0x6b (POISON_FREE) and the last byte is 767 * 0xa5 (POISON_END) 768 * 769 * object + s->objsize 770 * Padding to reach word boundary. This is also used for Redzoning. 771 * Padding is extended by another word if Redzoning is enabled and 772 * objsize == inuse. 773 * 774 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with 775 * 0xcc (RED_ACTIVE) for objects in use. 776 * 777 * object + s->inuse 778 * Meta data starts here. 779 * 780 * A. Free pointer (if we cannot overwrite object on free) 781 * B. Tracking data for SLAB_STORE_USER 782 * C. Padding to reach required alignment boundary or at mininum 783 * one word if debugging is on to be able to detect writes 784 * before the word boundary. 785 * 786 * Padding is done using 0x5a (POISON_INUSE) 787 * 788 * object + s->size 789 * Nothing is used beyond s->size. 790 * 791 * If slabcaches are merged then the objsize and inuse boundaries are mostly 792 * ignored. And therefore no slab options that rely on these boundaries 793 * may be used with merged slabcaches. 794 */ 795 796static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p) 797{ 798 unsigned long off = s->inuse; /* The end of info */ 799 800 if (s->offset) 801 /* Freepointer is placed after the object. */ 802 off += sizeof(void *); 803 804 if (s->flags & SLAB_STORE_USER) 805 /* We also have user information there */ 806 off += 2 * sizeof(struct track); 807 808 if (s->size == off) 809 return 1; 810 811 return check_bytes_and_report(s, page, p, "Object padding", 812 p + off, POISON_INUSE, s->size - off); 813} 814 815/* Check the pad bytes at the end of a slab page */ 816static int slab_pad_check(struct kmem_cache *s, struct page *page) 817{ 818 u8 *start; 819 u8 *fault; 820 u8 *end; 821 int length; 822 int remainder; 823 824 if (!(s->flags & SLAB_POISON)) 825 return 1; 826 827 start = page_address(page); 828 length = (PAGE_SIZE << compound_order(page)) - s->reserved; 829 end = start + length; 830 remainder = length % s->size; 831 if (!remainder) 832 return 1; 833 834 fault = check_bytes(end - remainder, POISON_INUSE, remainder); 835 if (!fault) 836 return 1; 837 while (end > fault && end[-1] == POISON_INUSE) 838 end--; 839 840 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1); 841 print_section("Padding", end - remainder, remainder); 842 843 restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end); 844 return 0; 845} 846 847static int check_object(struct kmem_cache *s, struct page *page, 848 void *object, u8 val) 849{ 850 u8 *p = object; 851 u8 *endobject = object + s->objsize; 852 853 if (s->flags & SLAB_RED_ZONE) { 854 if (!check_bytes_and_report(s, page, object, "Redzone", 855 endobject, val, s->inuse - s->objsize)) 856 return 0; 857 } else { 858 if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) { 859 check_bytes_and_report(s, page, p, "Alignment padding", 860 endobject, POISON_INUSE, s->inuse - s->objsize); 861 } 862 } 863 864 if (s->flags & SLAB_POISON) { 865 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) && 866 (!check_bytes_and_report(s, page, p, "Poison", p, 867 POISON_FREE, s->objsize - 1) || 868 !check_bytes_and_report(s, page, p, "Poison", 869 p + s->objsize - 1, POISON_END, 1))) 870 return 0; 871 /* 872 * check_pad_bytes cleans up on its own. 873 */ 874 check_pad_bytes(s, page, p); 875 } 876 877 if (!s->offset && val == SLUB_RED_ACTIVE) 878 /* 879 * Object and freepointer overlap. Cannot check 880 * freepointer while object is allocated. 881 */ 882 return 1; 883 884 /* Check free pointer validity */ 885 if (!check_valid_pointer(s, page, get_freepointer(s, p))) { 886 object_err(s, page, p, "Freepointer corrupt"); 887 /* 888 * No choice but to zap it and thus lose the remainder 889 * of the free objects in this slab. May cause 890 * another error because the object count is now wrong. 891 */ 892 set_freepointer(s, p, NULL); 893 return 0; 894 } 895 return 1; 896} 897 898static int check_slab(struct kmem_cache *s, struct page *page) 899{ 900 int maxobj; 901 902 VM_BUG_ON(!irqs_disabled()); 903 904 if (!PageSlab(page)) { 905 slab_err(s, page, "Not a valid slab page"); 906 return 0; 907 } 908 909 maxobj = order_objects(compound_order(page), s->size, s->reserved); 910 if (page->objects > maxobj) { 911 slab_err(s, page, "objects %u > max %u", 912 s->name, page->objects, maxobj); 913 return 0; 914 } 915 if (page->inuse > page->objects) { 916 slab_err(s, page, "inuse %u > max %u", 917 s->name, page->inuse, page->objects); 918 return 0; 919 } 920 /* Slab_pad_check fixes things up after itself */ 921 slab_pad_check(s, page); 922 return 1; 923} 924 925/* 926 * Determine if a certain object on a page is on the freelist. Must hold the 927 * slab lock to guarantee that the chains are in a consistent state. 928 */ 929static int on_freelist(struct kmem_cache *s, struct page *page, void *search) 930{ 931 int nr = 0; 932 void *fp; 933 void *object = NULL; 934 unsigned long max_objects; 935 936 fp = page->freelist; 937 while (fp && nr <= page->objects) { 938 if (fp == search) 939 return 1; 940 if (!check_valid_pointer(s, page, fp)) { 941 if (object) { 942 object_err(s, page, object, 943 "Freechain corrupt"); 944 set_freepointer(s, object, NULL); 945 break; 946 } else { 947 slab_err(s, page, "Freepointer corrupt"); 948 page->freelist = NULL; 949 page->inuse = page->objects; 950 slab_fix(s, "Freelist cleared"); 951 return 0; 952 } 953 break; 954 } 955 object = fp; 956 fp = get_freepointer(s, object); 957 nr++; 958 } 959 960 max_objects = order_objects(compound_order(page), s->size, s->reserved); 961 if (max_objects > MAX_OBJS_PER_PAGE) 962 max_objects = MAX_OBJS_PER_PAGE; 963 964 if (page->objects != max_objects) { 965 slab_err(s, page, "Wrong number of objects. Found %d but " 966 "should be %d", page->objects, max_objects); 967 page->objects = max_objects; 968 slab_fix(s, "Number of objects adjusted."); 969 } 970 if (page->inuse != page->objects - nr) { 971 slab_err(s, page, "Wrong object count. Counter is %d but " 972 "counted were %d", page->inuse, page->objects - nr); 973 page->inuse = page->objects - nr; 974 slab_fix(s, "Object count adjusted."); 975 } 976 return search == NULL; 977} 978 979static void trace(struct kmem_cache *s, struct page *page, void *object, 980 int alloc) 981{ 982 if (s->flags & SLAB_TRACE) { 983 printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n", 984 s->name, 985 alloc ? "alloc" : "free", 986 object, page->inuse, 987 page->freelist); 988 989 if (!alloc) 990 print_section("Object", (void *)object, s->objsize); 991 992 dump_stack(); 993 } 994} 995 996/* 997 * Hooks for other subsystems that check memory allocations. In a typical 998 * production configuration these hooks all should produce no code at all. 999 */ 1000static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) 1001{ 1002 flags &= gfp_allowed_mask; 1003 lockdep_trace_alloc(flags); 1004 might_sleep_if(flags & __GFP_WAIT); 1005 1006 return should_failslab(s->objsize, flags, s->flags); 1007} 1008 1009static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object) 1010{ 1011 flags &= gfp_allowed_mask; 1012 kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); 1013 kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, flags); 1014} 1015 1016static inline void slab_free_hook(struct kmem_cache *s, void *x) 1017{ 1018 kmemleak_free_recursive(x, s->flags); 1019 1020 /* 1021 * Trouble is that we may no longer disable interupts in the fast path 1022 * So in order to make the debug calls that expect irqs to be 1023 * disabled we need to disable interrupts temporarily. 1024 */ 1025#if defined(CONFIG_KMEMCHECK) || defined(CONFIG_LOCKDEP) 1026 { 1027 unsigned long flags; 1028 1029 local_irq_save(flags); 1030 kmemcheck_slab_free(s, x, s->objsize); 1031 debug_check_no_locks_freed(x, s->objsize); 1032 local_irq_restore(flags); 1033 } 1034#endif 1035 if (!(s->flags & SLAB_DEBUG_OBJECTS)) 1036 debug_check_no_obj_freed(x, s->objsize); 1037} 1038 1039/* 1040 * Tracking of fully allocated slabs for debugging purposes. 1041 * 1042 * list_lock must be held. 1043 */ 1044static void add_full(struct kmem_cache *s, 1045 struct kmem_cache_node *n, struct page *page) 1046{ 1047 if (!(s->flags & SLAB_STORE_USER)) 1048 return; 1049 1050 list_add(&page->lru, &n->full); 1051} 1052 1053/* 1054 * list_lock must be held. 1055 */ 1056static void remove_full(struct kmem_cache *s, struct page *page) 1057{ 1058 if (!(s->flags & SLAB_STORE_USER)) 1059 return; 1060 1061 list_del(&page->lru); 1062} 1063 1064/* Tracking of the number of slabs for debugging purposes */ 1065static inline unsigned long slabs_node(struct kmem_cache *s, int node) 1066{ 1067 struct kmem_cache_node *n = get_node(s, node); 1068 1069 return atomic_long_read(&n->nr_slabs); 1070} 1071 1072static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) 1073{ 1074 return atomic_long_read(&n->nr_slabs); 1075} 1076 1077static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) 1078{ 1079 struct kmem_cache_node *n = get_node(s, node); 1080 1081 /* 1082 * May be called early in order to allocate a slab for the 1083 * kmem_cache_node structure. Solve the chicken-egg 1084 * dilemma by deferring the increment of the count during 1085 * bootstrap (see early_kmem_cache_node_alloc). 1086 */ 1087 if (n) { 1088 atomic_long_inc(&n->nr_slabs); 1089 atomic_long_add(objects, &n->total_objects); 1090 } 1091} 1092static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) 1093{ 1094 struct kmem_cache_node *n = get_node(s, node); 1095 1096 atomic_long_dec(&n->nr_slabs); 1097 atomic_long_sub(objects, &n->total_objects); 1098} 1099 1100/* Object debug checks for alloc/free paths */ 1101static void setup_object_debug(struct kmem_cache *s, struct page *page, 1102 void *object) 1103{ 1104 if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))) 1105 return; 1106 1107 init_object(s, object, SLUB_RED_INACTIVE); 1108 init_tracking(s, object); 1109} 1110 1111static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *page, 1112 void *object, unsigned long addr) 1113{ 1114 if (!check_slab(s, page)) 1115 goto bad; 1116 1117 if (!check_valid_pointer(s, page, object)) { 1118 object_err(s, page, object, "Freelist Pointer check fails"); 1119 goto bad; 1120 } 1121 1122 if (!check_object(s, page, object, SLUB_RED_INACTIVE)) 1123 goto bad; 1124 1125 /* Success perform special debug activities for allocs */ 1126 if (s->flags & SLAB_STORE_USER) 1127 set_track(s, object, TRACK_ALLOC, addr); 1128 trace(s, page, object, 1); 1129 init_object(s, object, SLUB_RED_ACTIVE); 1130 return 1; 1131 1132bad: 1133 if (PageSlab(page)) { 1134 /* 1135 * If this is a slab page then lets do the best we can 1136 * to avoid issues in the future. Marking all objects 1137 * as used avoids touching the remaining objects. 1138 */ 1139 slab_fix(s, "Marking all objects used"); 1140 page->inuse = page->objects; 1141 page->freelist = NULL; 1142 } 1143 return 0; 1144} 1145 1146static noinline int free_debug_processing(struct kmem_cache *s, 1147 struct page *page, void *object, unsigned long addr) 1148{ 1149 unsigned long flags; 1150 int rc = 0; 1151 1152 local_irq_save(flags); 1153 slab_lock(page); 1154 1155 if (!check_slab(s, page)) 1156 goto fail; 1157 1158 if (!check_valid_pointer(s, page, object)) { 1159 slab_err(s, page, "Invalid object pointer 0x%p", object); 1160 goto fail; 1161 } 1162 1163 if (on_freelist(s, page, object)) { 1164 object_err(s, page, object, "Object already free"); 1165 goto fail; 1166 } 1167 1168 if (!check_object(s, page, object, SLUB_RED_ACTIVE)) 1169 goto out; 1170 1171 if (unlikely(s != page->slab)) { 1172 if (!PageSlab(page)) { 1173 slab_err(s, page, "Attempt to free object(0x%p) " 1174 "outside of slab", object); 1175 } else if (!page->slab) { 1176 printk(KERN_ERR 1177 "SLUB <none>: no slab for object 0x%p.\n", 1178 object); 1179 dump_stack(); 1180 } else 1181 object_err(s, page, object, 1182 "page slab pointer corrupt."); 1183 goto fail; 1184 } 1185 1186 if (s->flags & SLAB_STORE_USER) 1187 set_track(s, object, TRACK_FREE, addr); 1188 trace(s, page, object, 0); 1189 init_object(s, object, SLUB_RED_INACTIVE); 1190 rc = 1; 1191out: 1192 slab_unlock(page); 1193 local_irq_restore(flags); 1194 return rc; 1195 1196fail: 1197 slab_fix(s, "Object at 0x%p not freed", object); 1198 goto out; 1199} 1200 1201static int __init setup_slub_debug(char *str) 1202{ 1203 slub_debug = DEBUG_DEFAULT_FLAGS; 1204 if (*str++ != '=' || !*str) 1205 /* 1206 * No options specified. Switch on full debugging. 1207 */ 1208 goto out; 1209 1210 if (*str == ',') 1211 /* 1212 * No options but restriction on slabs. This means full 1213 * debugging for slabs matching a pattern. 1214 */ 1215 goto check_slabs; 1216 1217 if (tolower(*str) == 'o') { 1218 /* 1219 * Avoid enabling debugging on caches if its minimum order 1220 * would increase as a result. 1221 */ 1222 disable_higher_order_debug = 1; 1223 goto out; 1224 } 1225 1226 slub_debug = 0; 1227 if (*str == '-') 1228 /* 1229 * Switch off all debugging measures. 1230 */ 1231 goto out; 1232 1233 /* 1234 * Determine which debug features should be switched on 1235 */ 1236 for (; *str && *str != ','; str++) { 1237 switch (tolower(*str)) { 1238 case 'f': 1239 slub_debug |= SLAB_DEBUG_FREE; 1240 break; 1241 case 'z': 1242 slub_debug |= SLAB_RED_ZONE; 1243 break; 1244 case 'p': 1245 slub_debug |= SLAB_POISON; 1246 break; 1247 case 'u': 1248 slub_debug |= SLAB_STORE_USER; 1249 break; 1250 case 't': 1251 slub_debug |= SLAB_TRACE; 1252 break; 1253 case 'a': 1254 slub_debug |= SLAB_FAILSLAB; 1255 break; 1256 default: 1257 printk(KERN_ERR "slub_debug option '%c' " 1258 "unknown. skipped\n", *str); 1259 } 1260 } 1261 1262check_slabs: 1263 if (*str == ',') 1264 slub_debug_slabs = str + 1; 1265out: 1266 return 1; 1267} 1268 1269__setup("slub_debug", setup_slub_debug); 1270 1271static unsigned long kmem_cache_flags(unsigned long objsize, 1272 unsigned long flags, const char *name, 1273 void (*ctor)(void *)) 1274{ 1275 /* 1276 * Enable debugging if selected on the kernel commandline. 1277 */ 1278 if (slub_debug && (!slub_debug_slabs || 1279 !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)))) 1280 flags |= slub_debug; 1281 1282 return flags; 1283} 1284#else 1285static inline void setup_object_debug(struct kmem_cache *s, 1286 struct page *page, void *object) {} 1287 1288static inline int alloc_debug_processing(struct kmem_cache *s, 1289 struct page *page, void *object, unsigned long addr) { return 0; } 1290 1291static inline int free_debug_processing(struct kmem_cache *s, 1292 struct page *page, void *object, unsigned long addr) { return 0; } 1293 1294static inline int slab_pad_check(struct kmem_cache *s, struct page *page) 1295 { return 1; } 1296static inline int check_object(struct kmem_cache *s, struct page *page, 1297 void *object, u8 val) { return 1; } 1298static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, 1299 struct page *page) {} 1300static inline void remove_full(struct kmem_cache *s, struct page *page) {} 1301static inline unsigned long kmem_cache_flags(unsigned long objsize, 1302 unsigned long flags, const char *name, 1303 void (*ctor)(void *)) 1304{ 1305 return flags; 1306} 1307#define slub_debug 0 1308 1309#define disable_higher_order_debug 0 1310 1311static inline unsigned long slabs_node(struct kmem_cache *s, int node) 1312 { return 0; } 1313static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) 1314 { return 0; } 1315static inline void inc_slabs_node(struct kmem_cache *s, int node, 1316 int objects) {} 1317static inline void dec_slabs_node(struct kmem_cache *s, int node, 1318 int objects) {} 1319 1320static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) 1321 { return 0; } 1322 1323static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, 1324 void *object) {} 1325 1326static inline void slab_free_hook(struct kmem_cache *s, void *x) {} 1327 1328#endif /* CONFIG_SLUB_DEBUG */ 1329 1330/* 1331 * Slab allocation and freeing 1332 */ 1333static inline struct page *alloc_slab_page(gfp_t flags, int node, 1334 struct kmem_cache_order_objects oo) 1335{ 1336 int order = oo_order(oo); 1337 1338 flags |= __GFP_NOTRACK; 1339 1340 if (node == NUMA_NO_NODE) 1341 return alloc_pages(flags, order); 1342 else 1343 return alloc_pages_exact_node(node, flags, order); 1344} 1345 1346static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) 1347{ 1348 struct page *page; 1349 struct kmem_cache_order_objects oo = s->oo; 1350 gfp_t alloc_gfp; 1351 1352 flags &= gfp_allowed_mask; 1353 1354 if (flags & __GFP_WAIT) 1355 local_irq_enable(); 1356 1357 flags |= s->allocflags; 1358 1359 /* 1360 * Let the initial higher-order allocation fail under memory pressure 1361 * so we fall-back to the minimum order allocation. 1362 */ 1363 alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL; 1364 1365 page = alloc_slab_page(alloc_gfp, node, oo); 1366 if (unlikely(!page)) { 1367 oo = s->min; 1368 /* 1369 * Allocation may have failed due to fragmentation. 1370 * Try a lower order alloc if possible 1371 */ 1372 page = alloc_slab_page(flags, node, oo); 1373 1374 if (page) 1375 stat(s, ORDER_FALLBACK); 1376 } 1377 1378 if (flags & __GFP_WAIT) 1379 local_irq_disable(); 1380 1381 if (!page) 1382 return NULL; 1383 1384 if (kmemcheck_enabled 1385 && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) { 1386 int pages = 1 << oo_order(oo); 1387 1388 kmemcheck_alloc_shadow(page, oo_order(oo), flags, node); 1389 1390 /* 1391 * Objects from caches that have a constructor don't get 1392 * cleared when they're allocated, so we need to do it here. 1393 */ 1394 if (s->ctor) 1395 kmemcheck_mark_uninitialized_pages(page, pages); 1396 else 1397 kmemcheck_mark_unallocated_pages(page, pages); 1398 } 1399 1400 page->objects = oo_objects(oo); 1401 mod_zone_page_state(page_zone(page), 1402 (s->flags & SLAB_RECLAIM_ACCOUNT) ? 1403 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, 1404 1 << oo_order(oo)); 1405 1406 return page; 1407} 1408 1409static void setup_object(struct kmem_cache *s, struct page *page, 1410 void *object) 1411{ 1412 setup_object_debug(s, page, object); 1413 if (unlikely(s->ctor)) 1414 s->ctor(object); 1415} 1416 1417static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) 1418{ 1419 struct page *page; 1420 void *start; 1421 void *last; 1422 void *p; 1423 1424 BUG_ON(flags & GFP_SLAB_BUG_MASK); 1425 1426 page = allocate_slab(s, 1427 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); 1428 if (!page) 1429 goto out; 1430 1431 inc_slabs_node(s, page_to_nid(page), page->objects); 1432 page->slab = s; 1433 page->flags |= 1 << PG_slab; 1434 1435 start = page_address(page); 1436 1437 if (unlikely(s->flags & SLAB_POISON)) 1438 memset(start, POISON_INUSE, PAGE_SIZE << compound_order(page)); 1439 1440 last = start; 1441 for_each_object(p, s, start, page->objects) { 1442 setup_object(s, page, last); 1443 set_freepointer(s, last, p); 1444 last = p; 1445 } 1446 setup_object(s, page, last); 1447 set_freepointer(s, last, NULL); 1448 1449 page->freelist = start; 1450 page->inuse = page->objects; 1451 page->frozen = 1; 1452out: 1453 return page; 1454} 1455 1456static void __free_slab(struct kmem_cache *s, struct page *page) 1457{ 1458 int order = compound_order(page); 1459 int pages = 1 << order; 1460 1461 if (kmem_cache_debug(s)) { 1462 void *p; 1463 1464 slab_pad_check(s, page); 1465 for_each_object(p, s, page_address(page), 1466 page->objects) 1467 check_object(s, page, p, SLUB_RED_INACTIVE); 1468 } 1469 1470 kmemcheck_free_shadow(page, compound_order(page)); 1471 1472 mod_zone_page_state(page_zone(page), 1473 (s->flags & SLAB_RECLAIM_ACCOUNT) ? 1474 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, 1475 -pages); 1476 1477 __ClearPageSlab(page); 1478 reset_page_mapcount(page); 1479 if (current->reclaim_state) 1480 current->reclaim_state->reclaimed_slab += pages; 1481 __free_pages(page, order); 1482} 1483 1484#define need_reserve_slab_rcu \ 1485 (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head)) 1486 1487static void rcu_free_slab(struct rcu_head *h) 1488{ 1489 struct page *page; 1490 1491 if (need_reserve_slab_rcu) 1492 page = virt_to_head_page(h); 1493 else 1494 page = container_of((struct list_head *)h, struct page, lru); 1495 1496 __free_slab(page->slab, page); 1497} 1498 1499static void free_slab(struct kmem_cache *s, struct page *page) 1500{ 1501 if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) { 1502 struct rcu_head *head; 1503 1504 if (need_reserve_slab_rcu) { 1505 int order = compound_order(page); 1506 int offset = (PAGE_SIZE << order) - s->reserved; 1507 1508 VM_BUG_ON(s->reserved != sizeof(*head)); 1509 head = page_address(page) + offset; 1510 } else { 1511 /* 1512 * RCU free overloads the RCU head over the LRU 1513 */ 1514 head = (void *)&page->lru; 1515 } 1516 1517 call_rcu(head, rcu_free_slab); 1518 } else 1519 __free_slab(s, page); 1520} 1521 1522static void discard_slab(struct kmem_cache *s, struct page *page) 1523{ 1524 dec_slabs_node(s, page_to_nid(page), page->objects); 1525 free_slab(s, page); 1526} 1527 1528/* 1529 * Management of partially allocated slabs. 1530 * 1531 * list_lock must be held. 1532 */ 1533static inline void add_partial(struct kmem_cache_node *n, 1534 struct page *page, int tail) 1535{ 1536 n->nr_partial++; 1537 if (tail) 1538 list_add_tail(&page->lru, &n->partial); 1539 else 1540 list_add(&page->lru, &n->partial); 1541} 1542 1543/* 1544 * list_lock must be held. 1545 */ 1546static inline void remove_partial(struct kmem_cache_node *n, 1547 struct page *page) 1548{ 1549 list_del(&page->lru); 1550 n->nr_partial--; 1551} 1552 1553/* 1554 * Lock slab, remove from the partial list and put the object into the 1555 * per cpu freelist. 1556 * 1557 * Returns a list of objects or NULL if it fails. 1558 * 1559 * Must hold list_lock. 1560 */ 1561static inline void *acquire_slab(struct kmem_cache *s, 1562 struct kmem_cache_node *n, struct page *page, 1563 int mode) 1564{ 1565 void *freelist; 1566 unsigned long counters; 1567 struct page new; 1568 1569 /* 1570 * Zap the freelist and set the frozen bit. 1571 * The old freelist is the list of objects for the 1572 * per cpu allocation list. 1573 */ 1574 do { 1575 freelist = page->freelist; 1576 counters = page->counters; 1577 new.counters = counters; 1578 if (mode) 1579 new.inuse = page->objects; 1580 1581 VM_BUG_ON(new.frozen); 1582 new.frozen = 1; 1583 1584 } while (!__cmpxchg_double_slab(s, page, 1585 freelist, counters, 1586 NULL, new.counters, 1587 "lock and freeze")); 1588 1589 remove_partial(n, page); 1590 return freelist; 1591} 1592 1593static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain); 1594 1595/* 1596 * Try to allocate a partial slab from a specific node. 1597 */ 1598static void *get_partial_node(struct kmem_cache *s, 1599 struct kmem_cache_node *n, struct kmem_cache_cpu *c) 1600{ 1601 struct page *page, *page2; 1602 void *object = NULL; 1603 1604 /* 1605 * Racy check. If we mistakenly see no partial slabs then we 1606 * just allocate an empty slab. If we mistakenly try to get a 1607 * partial slab and there is none available then get_partials() 1608 * will return NULL. 1609 */ 1610 if (!n || !n->nr_partial) 1611 return NULL; 1612 1613 spin_lock(&n->list_lock); 1614 list_for_each_entry_safe(page, page2, &n->partial, lru) { 1615 void *t = acquire_slab(s, n, page, object == NULL); 1616 int available; 1617 1618 if (!t) 1619 break; 1620 1621 if (!object) { 1622 c->page = page; 1623 c->node = page_to_nid(page); 1624 stat(s, ALLOC_FROM_PARTIAL); 1625 object = t; 1626 available = page->objects - page->inuse; 1627 } else { 1628 page->freelist = t; 1629 available = put_cpu_partial(s, page, 0); 1630 } 1631 if (kmem_cache_debug(s) || available > s->cpu_partial / 2) 1632 break; 1633 1634 } 1635 spin_unlock(&n->list_lock); 1636 return object; 1637} 1638 1639/* 1640 * Get a page from somewhere. Search in increasing NUMA distances. 1641 */ 1642static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags, 1643 struct kmem_cache_cpu *c) 1644{ 1645#ifdef CONFIG_NUMA 1646 struct zonelist *zonelist; 1647 struct zoneref *z; 1648 struct zone *zone; 1649 enum zone_type high_zoneidx = gfp_zone(flags); 1650 void *object; 1651 1652 /* 1653 * The defrag ratio allows a configuration of the tradeoffs between 1654 * inter node defragmentation and node local allocations. A lower 1655 * defrag_ratio increases the tendency to do local allocations 1656 * instead of attempting to obtain partial slabs from other nodes. 1657 * 1658 * If the defrag_ratio is set to 0 then kmalloc() always 1659 * returns node local objects. If the ratio is higher then kmalloc() 1660 * may return off node objects because partial slabs are obtained 1661 * from other nodes and filled up. 1662 * 1663 * If /sys/kernel/slab/xx/defrag_ratio is set to 100 (which makes 1664 * defrag_ratio = 1000) then every (well almost) allocation will 1665 * first attempt to defrag slab caches on other nodes. This means 1666 * scanning over all nodes to look for partial slabs which may be 1667 * expensive if we do it every time we are trying to find a slab 1668 * with available objects. 1669 */ 1670 if (!s->remote_node_defrag_ratio || 1671 get_cycles() % 1024 > s->remote_node_defrag_ratio) 1672 return NULL; 1673 1674 get_mems_allowed(); 1675 zonelist = node_zonelist(slab_node(current->mempolicy), flags); 1676 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { 1677 struct kmem_cache_node *n; 1678 1679 n = get_node(s, zone_to_nid(zone)); 1680 1681 if (n && cpuset_zone_allowed_hardwall(zone, flags) && 1682 n->nr_partial > s->min_partial) { 1683 object = get_partial_node(s, n, c); 1684 if (object) { 1685 put_mems_allowed(); 1686 return object; 1687 } 1688 } 1689 } 1690 put_mems_allowed(); 1691#endif 1692 return NULL; 1693} 1694 1695/* 1696 * Get a partial page, lock it and return it. 1697 */ 1698static void *get_partial(struct kmem_cache *s, gfp_t flags, int node, 1699 struct kmem_cache_cpu *c) 1700{ 1701 void *object; 1702 int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node; 1703 1704 object = get_partial_node(s, get_node(s, searchnode), c); 1705 if (object || node != NUMA_NO_NODE) 1706 return object; 1707 1708 return get_any_partial(s, flags, c); 1709} 1710 1711#ifdef CONFIG_PREEMPT 1712/* 1713 * Calculate the next globally unique transaction for disambiguiation 1714 * during cmpxchg. The transactions start with the cpu number and are then 1715 * incremented by CONFIG_NR_CPUS. 1716 */ 1717#define TID_STEP roundup_pow_of_two(CONFIG_NR_CPUS) 1718#else 1719/* 1720 * No preemption supported therefore also no need to check for 1721 * different cpus. 1722 */ 1723#define TID_STEP 1 1724#endif 1725 1726static inline unsigned long next_tid(unsigned long tid) 1727{ 1728 return tid + TID_STEP; 1729} 1730 1731static inline unsigned int tid_to_cpu(unsigned long tid) 1732{ 1733 return tid % TID_STEP; 1734} 1735 1736static inline unsigned long tid_to_event(unsigned long tid) 1737{ 1738 return tid / TID_STEP; 1739} 1740 1741static inline unsigned int init_tid(int cpu) 1742{ 1743 return cpu; 1744} 1745 1746static inline void note_cmpxchg_failure(const char *n, 1747 const struct kmem_cache *s, unsigned long tid) 1748{ 1749#ifdef SLUB_DEBUG_CMPXCHG 1750 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid); 1751 1752 printk(KERN_INFO "%s %s: cmpxchg redo ", n, s->name); 1753 1754#ifdef CONFIG_PREEMPT 1755 if (tid_to_cpu(tid) != tid_to_cpu(actual_tid)) 1756 printk("due to cpu change %d -> %d\n", 1757 tid_to_cpu(tid), tid_to_cpu(actual_tid)); 1758 else 1759#endif 1760 if (tid_to_event(tid) != tid_to_event(actual_tid)) 1761 printk("due to cpu running other code. Event %ld->%ld\n", 1762 tid_to_event(tid), tid_to_event(actual_tid)); 1763 else 1764 printk("for unknown reason: actual=%lx was=%lx target=%lx\n", 1765 actual_tid, tid, next_tid(tid)); 1766#endif 1767 stat(s, CMPXCHG_DOUBLE_CPU_FAIL); 1768} 1769 1770void init_kmem_cache_cpus(struct kmem_cache *s) 1771{ 1772 int cpu; 1773 1774 for_each_possible_cpu(cpu) 1775 per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu); 1776} 1777 1778/* 1779 * Remove the cpu slab 1780 */ 1781static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) 1782{ 1783 enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE }; 1784 struct page *page = c->page; 1785 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1786 int lock = 0; 1787 enum slab_modes l = M_NONE, m = M_NONE; 1788 void *freelist; 1789 void *nextfree; 1790 int tail = 0; 1791 struct page new; 1792 struct page old; 1793 1794 if (page->freelist) { 1795 stat(s, DEACTIVATE_REMOTE_FREES); 1796 tail = 1; 1797 } 1798 1799 c->tid = next_tid(c->tid); 1800 c->page = NULL; 1801 freelist = c->freelist; 1802 c->freelist = NULL; 1803 1804 /* 1805 * Stage one: Free all available per cpu objects back 1806 * to the page freelist while it is still frozen. Leave the 1807 * last one. 1808 * 1809 * There is no need to take the list->lock because the page 1810 * is still frozen. 1811 */ 1812 while (freelist && (nextfree = get_freepointer(s, freelist))) { 1813 void *prior; 1814 unsigned long counters; 1815 1816 do { 1817 prior = page->freelist; 1818 counters = page->counters; 1819 set_freepointer(s, freelist, prior); 1820 new.counters = counters; 1821 new.inuse--; 1822 VM_BUG_ON(!new.frozen); 1823 1824 } while (!__cmpxchg_double_slab(s, page, 1825 prior, counters, 1826 freelist, new.counters, 1827 "drain percpu freelist")); 1828 1829 freelist = nextfree; 1830 } 1831 1832 /* 1833 * Stage two: Ensure that the page is unfrozen while the 1834 * list presence reflects the actual number of objects 1835 * during unfreeze. 1836 * 1837 * We setup the list membership and then perform a cmpxchg 1838 * with the count. If there is a mismatch then the page 1839 * is not unfrozen but the page is on the wrong list. 1840 * 1841 * Then we restart the process which may have to remove 1842 * the page from the list that we just put it on again 1843 * because the number of objects in the slab may have 1844 * changed. 1845 */ 1846redo: 1847 1848 old.freelist = page->freelist; 1849 old.counters = page->counters; 1850 VM_BUG_ON(!old.frozen); 1851 1852 /* Determine target state of the slab */ 1853 new.counters = old.counters; 1854 if (freelist) { 1855 new.inuse--; 1856 set_freepointer(s, freelist, old.freelist); 1857 new.freelist = freelist; 1858 } else 1859 new.freelist = old.freelist; 1860 1861 new.frozen = 0; 1862 1863 if (!new.inuse && n->nr_partial > s->min_partial) 1864 m = M_FREE; 1865 else if (new.freelist) { 1866 m = M_PARTIAL; 1867 if (!lock) { 1868 lock = 1; 1869 /* 1870 * Taking the spinlock removes the possiblity 1871 * that acquire_slab() will see a slab page that 1872 * is frozen 1873 */ 1874 spin_lock(&n->list_lock); 1875 } 1876 } else { 1877 m = M_FULL; 1878 if (kmem_cache_debug(s) && !lock) { 1879 lock = 1; 1880 /* 1881 * This also ensures that the scanning of full 1882 * slabs from diagnostic functions will not see 1883 * any frozen slabs. 1884 */ 1885 spin_lock(&n->list_lock); 1886 } 1887 } 1888 1889 if (l != m) { 1890 1891 if (l == M_PARTIAL) 1892 1893 remove_partial(n, page); 1894 1895 else if (l == M_FULL) 1896 1897 remove_full(s, page); 1898 1899 if (m == M_PARTIAL) { 1900 1901 add_partial(n, page, tail); 1902 stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD); 1903 1904 } else if (m == M_FULL) { 1905 1906 stat(s, DEACTIVATE_FULL); 1907 add_full(s, n, page); 1908 1909 } 1910 } 1911 1912 l = m; 1913 if (!__cmpxchg_double_slab(s, page, 1914 old.freelist, old.counters, 1915 new.freelist, new.counters, 1916 "unfreezing slab")) 1917 goto redo; 1918 1919 if (lock) 1920 spin_unlock(&n->list_lock); 1921 1922 if (m == M_FREE) { 1923 stat(s, DEACTIVATE_EMPTY); 1924 discard_slab(s, page); 1925 stat(s, FREE_SLAB); 1926 } 1927} 1928 1929/* Unfreeze all the cpu partial slabs */ 1930static void unfreeze_partials(struct kmem_cache *s) 1931{ 1932 struct kmem_cache_node *n = NULL; 1933 struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab); 1934 struct page *page; 1935 1936 while ((page = c->partial)) { 1937 enum slab_modes { M_PARTIAL, M_FREE }; 1938 enum slab_modes l, m; 1939 struct page new; 1940 struct page old; 1941 1942 c->partial = page->next; 1943 l = M_FREE; 1944 1945 do { 1946 1947 old.freelist = page->freelist; 1948 old.counters = page->counters; 1949 VM_BUG_ON(!old.frozen); 1950 1951 new.counters = old.counters; 1952 new.freelist = old.freelist; 1953 1954 new.frozen = 0; 1955 1956 if (!new.inuse && (!n || n->nr_partial > s->min_partial)) 1957 m = M_FREE; 1958 else { 1959 struct kmem_cache_node *n2 = get_node(s, 1960 page_to_nid(page)); 1961 1962 m = M_PARTIAL; 1963 if (n != n2) { 1964 if (n) 1965 spin_unlock(&n->list_lock); 1966 1967 n = n2; 1968 spin_lock(&n->list_lock); 1969 } 1970 } 1971 1972 if (l != m) { 1973 if (l == M_PARTIAL) 1974 remove_partial(n, page); 1975 else 1976 add_partial(n, page, 1); 1977 1978 l = m; 1979 } 1980 1981 } while (!cmpxchg_double_slab(s, page, 1982 old.freelist, old.counters, 1983 new.freelist, new.counters, 1984 "unfreezing slab")); 1985 1986 if (m == M_FREE) { 1987 stat(s, DEACTIVATE_EMPTY); 1988 discard_slab(s, page); 1989 stat(s, FREE_SLAB); 1990 } 1991 } 1992 1993 if (n) 1994 spin_unlock(&n->list_lock); 1995} 1996 1997/* 1998 * Put a page that was just frozen (in __slab_free) into a partial page 1999 * slot if available. This is done without interrupts disabled and without 2000 * preemption disabled. The cmpxchg is racy and may put the partial page 2001 * onto a random cpus partial slot. 2002 * 2003 * If we did not find a slot then simply move all the partials to the 2004 * per node partial list. 2005 */ 2006int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) 2007{ 2008 struct page *oldpage; 2009 int pages; 2010 int pobjects; 2011 2012 do { 2013 pages = 0; 2014 pobjects = 0; 2015 oldpage = this_cpu_read(s->cpu_slab->partial); 2016 2017 if (oldpage) { 2018 pobjects = oldpage->pobjects; 2019 pages = oldpage->pages; 2020 if (drain && pobjects > s->cpu_partial) { 2021 unsigned long flags; 2022 /* 2023 * partial array is full. Move the existing 2024 * set to the per node partial list. 2025 */ 2026 local_irq_save(flags); 2027 unfreeze_partials(s); 2028 local_irq_restore(flags); 2029 pobjects = 0; 2030 pages = 0; 2031 } 2032 } 2033 2034 pages++; 2035 pobjects += page->objects - page->inuse; 2036 2037 page->pages = pages; 2038 page->pobjects = pobjects; 2039 page->next = oldpage; 2040 2041 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage); 2042 stat(s, CPU_PARTIAL_FREE); 2043 return pobjects; 2044} 2045 2046static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) 2047{ 2048 stat(s, CPUSLAB_FLUSH); 2049 deactivate_slab(s, c); 2050} 2051 2052/* 2053 * Flush cpu slab. 2054 * 2055 * Called from IPI handler with interrupts disabled. 2056 */ 2057static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) 2058{ 2059 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); 2060 2061 if (likely(c)) { 2062 if (c->page) 2063 flush_slab(s, c); 2064 2065 unfreeze_partials(s); 2066 } 2067} 2068 2069static void flush_cpu_slab(void *d) 2070{ 2071 struct kmem_cache *s = d; 2072 2073 __flush_cpu_slab(s, smp_processor_id()); 2074} 2075 2076static void flush_all(struct kmem_cache *s) 2077{ 2078 on_each_cpu(flush_cpu_slab, s, 1); 2079} 2080 2081/* 2082 * Check if the objects in a per cpu structure fit numa 2083 * locality expectations. 2084 */ 2085static inline int node_match(struct kmem_cache_cpu *c, int node) 2086{ 2087#ifdef CONFIG_NUMA 2088 if (node != NUMA_NO_NODE && c->node != node) 2089 return 0; 2090#endif 2091 return 1; 2092} 2093 2094static int count_free(struct page *page) 2095{ 2096 return page->objects - page->inuse; 2097} 2098 2099static unsigned long count_partial(struct kmem_cache_node *n, 2100 int (*get_count)(struct page *)) 2101{ 2102 unsigned long flags; 2103 unsigned long x = 0; 2104 struct page *page; 2105 2106 spin_lock_irqsave(&n->list_lock, flags); 2107 list_for_each_entry(page, &n->partial, lru) 2108 x += get_count(page); 2109 spin_unlock_irqrestore(&n->list_lock, flags); 2110 return x; 2111} 2112 2113static inline unsigned long node_nr_objs(struct kmem_cache_node *n) 2114{ 2115#ifdef CONFIG_SLUB_DEBUG 2116 return atomic_long_read(&n->total_objects); 2117#else 2118 return 0; 2119#endif 2120} 2121 2122static noinline void 2123slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) 2124{ 2125 int node; 2126 2127 printk(KERN_WARNING 2128 "SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n", 2129 nid, gfpflags); 2130 printk(KERN_WARNING " cache: %s, object size: %d, buffer size: %d, " 2131 "default order: %d, min order: %d\n", s->name, s->objsize, 2132 s->size, oo_order(s->oo), oo_order(s->min)); 2133 2134 if (oo_order(s->min) > get_order(s->objsize)) 2135 printk(KERN_WARNING " %s debugging increased min order, use " 2136 "slub_debug=O to disable.\n", s->name); 2137 2138 for_each_online_node(node) { 2139 struct kmem_cache_node *n = get_node(s, node); 2140 unsigned long nr_slabs; 2141 unsigned long nr_objs; 2142 unsigned long nr_free; 2143 2144 if (!n) 2145 continue; 2146 2147 nr_free = count_partial(n, count_free); 2148 nr_slabs = node_nr_slabs(n); 2149 nr_objs = node_nr_objs(n); 2150 2151 printk(KERN_WARNING 2152 " node %d: slabs: %ld, objs: %ld, free: %ld\n", 2153 node, nr_slabs, nr_objs, nr_free); 2154 } 2155} 2156 2157static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags, 2158 int node, struct kmem_cache_cpu **pc) 2159{ 2160 void *object; 2161 struct kmem_cache_cpu *c; 2162 struct page *page = new_slab(s, flags, node); 2163 2164 if (page) { 2165 c = __this_cpu_ptr(s->cpu_slab); 2166 if (c->page) 2167 flush_slab(s, c); 2168 2169 /* 2170 * No other reference to the page yet so we can 2171 * muck around with it freely without cmpxchg 2172 */ 2173 object = page->freelist; 2174 page->freelist = NULL; 2175 2176 stat(s, ALLOC_SLAB); 2177 c->node = page_to_nid(page); 2178 c->page = page; 2179 *pc = c; 2180 } else 2181 object = NULL; 2182 2183 return object; 2184} 2185 2186/* 2187 * Slow path. The lockless freelist is empty or we need to perform 2188 * debugging duties. 2189 * 2190 * Processing is still very fast if new objects have been freed to the 2191 * regular freelist. In that case we simply take over the regular freelist 2192 * as the lockless freelist and zap the regular freelist. 2193 * 2194 * If that is not working then we fall back to the partial lists. We take the 2195 * first element of the freelist as the object to allocate now and move the 2196 * rest of the freelist to the lockless freelist. 2197 * 2198 * And if we were unable to get a new slab from the partial slab lists then 2199 * we need to allocate a new slab. This is the slowest path since it involves 2200 * a call to the page allocator and the setup of a new slab. 2201 */ 2202static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, 2203 unsigned long addr, struct kmem_cache_cpu *c) 2204{ 2205 void **object; 2206 unsigned long flags; 2207 struct page new; 2208 unsigned long counters; 2209 2210 local_irq_save(flags); 2211#ifdef CONFIG_PREEMPT 2212 /* 2213 * We may have been preempted and rescheduled on a different 2214 * cpu before disabling interrupts. Need to reload cpu area 2215 * pointer. 2216 */ 2217 c = this_cpu_ptr(s->cpu_slab); 2218#endif 2219 2220 if (!c->page) 2221 goto new_slab; 2222redo: 2223 if (unlikely(!node_match(c, node))) { 2224 stat(s, ALLOC_NODE_MISMATCH); 2225 deactivate_slab(s, c); 2226 goto new_slab; 2227 } 2228 2229 stat(s, ALLOC_SLOWPATH); 2230 2231 do { 2232 object = c->page->freelist; 2233 counters = c->page->counters; 2234 new.counters = counters; 2235 VM_BUG_ON(!new.frozen); 2236 2237 /* 2238 * If there is no object left then we use this loop to 2239 * deactivate the slab which is simple since no objects 2240 * are left in the slab and therefore we do not need to 2241 * put the page back onto the partial list. 2242 * 2243 * If there are objects left then we retrieve them 2244 * and use them to refill the per cpu queue. 2245 */ 2246 2247 new.inuse = c->page->objects; 2248 new.frozen = object != NULL; 2249 2250 } while (!__cmpxchg_double_slab(s, c->page, 2251 object, counters, 2252 NULL, new.counters, 2253 "__slab_alloc")); 2254 2255 if (!object) { 2256 c->page = NULL; 2257 stat(s, DEACTIVATE_BYPASS); 2258 goto new_slab; 2259 } 2260 2261 stat(s, ALLOC_REFILL); 2262 2263load_freelist: 2264 c->freelist = get_freepointer(s, object); 2265 c->tid = next_tid(c->tid); 2266 local_irq_restore(flags); 2267 return object; 2268 2269new_slab: 2270 2271 if (c->partial) { 2272 c->page = c->partial; 2273 c->partial = c->page->next; 2274 c->node = page_to_nid(c->page); 2275 stat(s, CPU_PARTIAL_ALLOC); 2276 c->freelist = NULL; 2277 goto redo; 2278 } 2279 2280 /* Then do expensive stuff like retrieving pages from the partial lists */ 2281 object = get_partial(s, gfpflags, node, c); 2282 2283 if (unlikely(!object)) { 2284 2285 object = new_slab_objects(s, gfpflags, node, &c); 2286 2287 if (unlikely(!object)) { 2288 if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit()) 2289 slab_out_of_memory(s, gfpflags, node); 2290 2291 local_irq_restore(flags); 2292 return NULL; 2293 } 2294 } 2295 2296 if (likely(!kmem_cache_debug(s))) 2297 goto load_freelist; 2298 2299 /* Only entered in the debug case */ 2300 if (!alloc_debug_processing(s, c->page, object, addr)) 2301 goto new_slab; /* Slab failed checks. Next slab needed */ 2302 2303 c->freelist = get_freepointer(s, object); 2304 deactivate_slab(s, c); 2305 c->node = NUMA_NO_NODE; 2306 local_irq_restore(flags); 2307 return object; 2308} 2309 2310/* 2311 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) 2312 * have the fastpath folded into their functions. So no function call 2313 * overhead for requests that can be satisfied on the fastpath. 2314 * 2315 * The fastpath works by first checking if the lockless freelist can be used. 2316 * If not then __slab_alloc is called for slow processing. 2317 * 2318 * Otherwise we can simply pick the next object from the lockless free list. 2319 */ 2320static __always_inline void *slab_alloc(struct kmem_cache *s, 2321 gfp_t gfpflags, int node, unsigned long addr) 2322{ 2323 void **object; 2324 struct kmem_cache_cpu *c; 2325 unsigned long tid; 2326 2327 if (slab_pre_alloc_hook(s, gfpflags)) 2328 return NULL; 2329 2330redo: 2331 2332 /* 2333 * Must read kmem_cache cpu data via this cpu ptr. Preemption is 2334 * enabled. We may switch back and forth between cpus while 2335 * reading from one cpu area. That does not matter as long 2336 * as we end up on the original cpu again when doing the cmpxchg. 2337 */ 2338 c = __this_cpu_ptr(s->cpu_slab); 2339 2340 /* 2341 * The transaction ids are globally unique per cpu and per operation on 2342 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double 2343 * occurs on the right processor and that there was no operation on the 2344 * linked list in between. 2345 */ 2346 tid = c->tid; 2347 barrier(); 2348 2349 object = c->freelist; 2350 if (unlikely(!object || !node_match(c, node))) 2351 2352 object = __slab_alloc(s, gfpflags, node, addr, c); 2353 2354 else { 2355 /* 2356 * The cmpxchg will only match if there was no additional 2357 * operation and if we are on the right processor. 2358 * 2359 * The cmpxchg does the following atomically (without lock semantics!) 2360 * 1. Relocate first pointer to the current per cpu area. 2361 * 2. Verify that tid and freelist have not been changed 2362 * 3. If they were not changed replace tid and freelist 2363 * 2364 * Since this is without lock semantics the protection is only against 2365 * code executing on this cpu *not* from access by other cpus. 2366 */ 2367 if (unlikely(!irqsafe_cpu_cmpxchg_double( 2368 s->cpu_slab->freelist, s->cpu_slab->tid, 2369 object, tid, 2370 get_freepointer_safe(s, object), next_tid(tid)))) { 2371 2372 note_cmpxchg_failure("slab_alloc", s, tid); 2373 goto redo; 2374 } 2375 stat(s, ALLOC_FASTPATH); 2376 } 2377 2378 if (unlikely(gfpflags & __GFP_ZERO) && object) 2379 memset(object, 0, s->objsize); 2380 2381 slab_post_alloc_hook(s, gfpflags, object); 2382 2383 return object; 2384} 2385 2386void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) 2387{ 2388 void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); 2389 2390 trace_kmem_cache_alloc(_RET_IP_, ret, s->objsize, s->size, gfpflags); 2391 2392 return ret; 2393} 2394EXPORT_SYMBOL(kmem_cache_alloc); 2395 2396#ifdef CONFIG_TRACING 2397void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) 2398{ 2399 void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); 2400 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); 2401 return ret; 2402} 2403EXPORT_SYMBOL(kmem_cache_alloc_trace); 2404 2405void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) 2406{ 2407 void *ret = kmalloc_order(size, flags, order); 2408 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags); 2409 return ret; 2410} 2411EXPORT_SYMBOL(kmalloc_order_trace); 2412#endif 2413 2414#ifdef CONFIG_NUMA 2415void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) 2416{ 2417 void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); 2418 2419 trace_kmem_cache_alloc_node(_RET_IP_, ret, 2420 s->objsize, s->size, gfpflags, node); 2421 2422 return ret; 2423} 2424EXPORT_SYMBOL(kmem_cache_alloc_node); 2425 2426#ifdef CONFIG_TRACING 2427void *kmem_cache_alloc_node_trace(struct kmem_cache *s, 2428 gfp_t gfpflags, 2429 int node, size_t size) 2430{ 2431 void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); 2432 2433 trace_kmalloc_node(_RET_IP_, ret, 2434 size, s->size, gfpflags, node); 2435 return ret; 2436} 2437EXPORT_SYMBOL(kmem_cache_alloc_node_trace); 2438#endif 2439#endif 2440 2441/* 2442 * Slow patch handling. This may still be called frequently since objects 2443 * have a longer lifetime than the cpu slabs in most processing loads. 2444 * 2445 * So we still attempt to reduce cache line usage. Just take the slab 2446 * lock and free the item. If there is no additional partial page 2447 * handling required then we can return immediately. 2448 */ 2449static void __slab_free(struct kmem_cache *s, struct page *page, 2450 void *x, unsigned long addr) 2451{ 2452 void *prior; 2453 void **object = (void *)x; 2454 int was_frozen; 2455 int inuse; 2456 struct page new; 2457 unsigned long counters; 2458 struct kmem_cache_node *n = NULL; 2459 unsigned long uninitialized_var(flags); 2460 2461 stat(s, FREE_SLOWPATH); 2462 2463 if (kmem_cache_debug(s) && !free_debug_processing(s, page, x, addr)) 2464 return; 2465 2466 do { 2467 prior = page->freelist; 2468 counters = page->counters; 2469 set_freepointer(s, object, prior); 2470 new.counters = counters; 2471 was_frozen = new.frozen; 2472 new.inuse--; 2473 if ((!new.inuse || !prior) && !was_frozen && !n) { 2474 2475 if (!kmem_cache_debug(s) && !prior) 2476 2477 /* 2478 * Slab was on no list before and will be partially empty 2479 * We can defer the list move and instead freeze it. 2480 */ 2481 new.frozen = 1; 2482 2483 else { /* Needs to be taken off a list */ 2484 2485 n = get_node(s, page_to_nid(page)); 2486 /* 2487 * Speculatively acquire the list_lock. 2488 * If the cmpxchg does not succeed then we may 2489 * drop the list_lock without any processing. 2490 * 2491 * Otherwise the list_lock will synchronize with 2492 * other processors updating the list of slabs. 2493 */ 2494 spin_lock_irqsave(&n->list_lock, flags); 2495 2496 } 2497 } 2498 inuse = new.inuse; 2499 2500 } while (!cmpxchg_double_slab(s, page, 2501 prior, counters, 2502 object, new.counters, 2503 "__slab_free")); 2504 2505 if (likely(!n)) { 2506 2507 /* 2508 * If we just froze the page then put it onto the 2509 * per cpu partial list. 2510 */ 2511 if (new.frozen && !was_frozen) 2512 put_cpu_partial(s, page, 1); 2513 2514 /* 2515 * The list lock was not taken therefore no list 2516 * activity can be necessary. 2517 */ 2518 if (was_frozen) 2519 stat(s, FREE_FROZEN); 2520 return; 2521 } 2522 2523 /* 2524 * was_frozen may have been set after we acquired the list_lock in 2525 * an earlier loop. So we need to check it here again. 2526 */ 2527 if (was_frozen) 2528 stat(s, FREE_FROZEN); 2529 else { 2530 if (unlikely(!inuse && n->nr_partial > s->min_partial)) 2531 goto slab_empty; 2532 2533 /* 2534 * Objects left in the slab. If it was not on the partial list before 2535 * then add it. 2536 */ 2537 if (unlikely(!prior)) { 2538 remove_full(s, page); 2539 add_partial(n, page, 0); 2540 stat(s, FREE_ADD_PARTIAL); 2541 } 2542 } 2543 spin_unlock_irqrestore(&n->list_lock, flags); 2544 return; 2545 2546slab_empty: 2547 if (prior) { 2548 /* 2549 * Slab on the partial list. 2550 */ 2551 remove_partial(n, page); 2552 stat(s, FREE_REMOVE_PARTIAL); 2553 } else 2554 /* Slab must be on the full list */ 2555 remove_full(s, page); 2556 2557 spin_unlock_irqrestore(&n->list_lock, flags); 2558 stat(s, FREE_SLAB); 2559 discard_slab(s, page); 2560} 2561 2562/* 2563 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that 2564 * can perform fastpath freeing without additional function calls. 2565 * 2566 * The fastpath is only possible if we are freeing to the current cpu slab 2567 * of this processor. This typically the case if we have just allocated 2568 * the item before. 2569 * 2570 * If fastpath is not possible then fall back to __slab_free where we deal 2571 * with all sorts of special processing. 2572 */ 2573static __always_inline void slab_free(struct kmem_cache *s, 2574 struct page *page, void *x, unsigned long addr) 2575{ 2576 void **object = (void *)x; 2577 struct kmem_cache_cpu *c; 2578 unsigned long tid; 2579 2580 slab_free_hook(s, x); 2581 2582redo: 2583 /* 2584 * Determine the currently cpus per cpu slab. 2585 * The cpu may change afterward. However that does not matter since 2586 * data is retrieved via this pointer. If we are on the same cpu 2587 * during the cmpxchg then the free will succedd. 2588 */ 2589 c = __this_cpu_ptr(s->cpu_slab); 2590 2591 tid = c->tid; 2592 barrier(); 2593 2594 if (likely(page == c->page)) { 2595 set_freepointer(s, object, c->freelist); 2596 2597 if (unlikely(!irqsafe_cpu_cmpxchg_double( 2598 s->cpu_slab->freelist, s->cpu_slab->tid, 2599 c->freelist, tid, 2600 object, next_tid(tid)))) { 2601 2602 note_cmpxchg_failure("slab_free", s, tid); 2603 goto redo; 2604 } 2605 stat(s, FREE_FASTPATH); 2606 } else 2607 __slab_free(s, page, x, addr); 2608 2609} 2610 2611void kmem_cache_free(struct kmem_cache *s, void *x) 2612{ 2613 struct page *page; 2614 2615 page = virt_to_head_page(x); 2616 2617 slab_free(s, page, x, _RET_IP_); 2618 2619 trace_kmem_cache_free(_RET_IP_, x); 2620} 2621EXPORT_SYMBOL(kmem_cache_free); 2622 2623/* 2624 * Object placement in a slab is made very easy because we always start at 2625 * offset 0. If we tune the size of the object to the alignment then we can 2626 * get the required alignment by putting one properly sized object after 2627 * another. 2628 * 2629 * Notice that the allocation order determines the sizes of the per cpu 2630 * caches. Each processor has always one slab available for allocations. 2631 * Increasing the allocation order reduces the number of times that slabs 2632 * must be moved on and off the partial lists and is therefore a factor in 2633 * locking overhead. 2634 */ 2635 2636/* 2637 * Mininum / Maximum order of slab pages. This influences locking overhead 2638 * and slab fragmentation. A higher order reduces the number of partial slabs 2639 * and increases the number of allocations possible without having to 2640 * take the list_lock. 2641 */ 2642static int slub_min_order; 2643static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER; 2644static int slub_min_objects; 2645 2646/* 2647 * Merge control. If this is set then no merging of slab caches will occur. 2648 * (Could be removed. This was introduced to pacify the merge skeptics.) 2649 */ 2650static int slub_nomerge; 2651 2652/* 2653 * Calculate the order of allocation given an slab object size. 2654 * 2655 * The order of allocation has significant impact on performance and other 2656 * system components. Generally order 0 allocations should be preferred since 2657 * order 0 does not cause fragmentation in the page allocator. Larger objects 2658 * be problematic to put into order 0 slabs because there may be too much 2659 * unused space left. We go to a higher order if more than 1/16th of the slab 2660 * would be wasted. 2661 * 2662 * In order to reach satisfactory performance we must ensure that a minimum 2663 * number of objects is in one slab. Otherwise we may generate too much 2664 * activity on the partial lists which requires taking the list_lock. This is 2665 * less a concern for large slabs though which are rarely used. 2666 * 2667 * slub_max_order specifies the order where we begin to stop considering the 2668 * number of objects in a slab as critical. If we reach slub_max_order then 2669 * we try to keep the page order as low as possible. So we accept more waste 2670 * of space in favor of a small page order. 2671 * 2672 * Higher order allocations also allow the placement of more objects in a 2673 * slab and thereby reduce object handling overhead. If the user has 2674 * requested a higher mininum order then we start with that one instead of 2675 * the smallest order which will fit the object. 2676 */ 2677static inline int slab_order(int size, int min_objects, 2678 int max_order, int fract_leftover, int reserved) 2679{ 2680 int order; 2681 int rem; 2682 int min_order = slub_min_order; 2683 2684 if (order_objects(min_order, size, reserved) > MAX_OBJS_PER_PAGE) 2685 return get_order(size * MAX_OBJS_PER_PAGE) - 1; 2686 2687 for (order = max(min_order, 2688 fls(min_objects * size - 1) - PAGE_SHIFT); 2689 order <= max_order; order++) { 2690 2691 unsigned long slab_size = PAGE_SIZE << order; 2692 2693 if (slab_size < min_objects * size + reserved) 2694 continue; 2695 2696 rem = (slab_size - reserved) % size; 2697 2698 if (rem <= slab_size / fract_leftover) 2699 break; 2700 2701 } 2702 2703 return order; 2704} 2705 2706static inline int calculate_order(int size, int reserved) 2707{ 2708 int order; 2709 int min_objects; 2710 int fraction; 2711 int max_objects; 2712 2713 /* 2714 * Attempt to find best configuration for a slab. This 2715 * works by first attempting to generate a layout with 2716 * the best configuration and backing off gradually. 2717 * 2718 * First we reduce the acceptable waste in a slab. Then 2719 * we reduce the minimum objects required in a slab. 2720 */ 2721 min_objects = slub_min_objects; 2722 if (!min_objects) 2723 min_objects = 4 * (fls(nr_cpu_ids) + 1); 2724 max_objects = order_objects(slub_max_order, size, reserved); 2725 min_objects = min(min_objects, max_objects); 2726 2727 while (min_objects > 1) { 2728 fraction = 16; 2729 while (fraction >= 4) { 2730 order = slab_order(size, min_objects, 2731 slub_max_order, fraction, reserved); 2732 if (order <= slub_max_order) 2733 return order; 2734 fraction /= 2; 2735 } 2736 min_objects--; 2737 } 2738 2739 /* 2740 * We were unable to place multiple objects in a slab. Now 2741 * lets see if we can place a single object there. 2742 */ 2743 order = slab_order(size, 1, slub_max_order, 1, reserved); 2744 if (order <= slub_max_order) 2745 return order; 2746 2747 /* 2748 * Doh this slab cannot be placed using slub_max_order. 2749 */ 2750 order = slab_order(size, 1, MAX_ORDER, 1, reserved); 2751 if (order < MAX_ORDER) 2752 return order; 2753 return -ENOSYS; 2754} 2755 2756/* 2757 * Figure out what the alignment of the objects will be. 2758 */ 2759static unsigned long calculate_alignment(unsigned long flags, 2760 unsigned long align, unsigned long size) 2761{ 2762 /* 2763 * If the user wants hardware cache aligned objects then follow that 2764 * suggestion if the object is sufficiently large. 2765 * 2766 * The hardware cache alignment cannot override the specified 2767 * alignment though. If that is greater then use it. 2768 */ 2769 if (flags & SLAB_HWCACHE_ALIGN) { 2770 unsigned long ralign = cache_line_size(); 2771 while (size <= ralign / 2) 2772 ralign /= 2; 2773 align = max(align, ralign); 2774 } 2775 2776 if (align < ARCH_SLAB_MINALIGN) 2777 align = ARCH_SLAB_MINALIGN; 2778 2779 return ALIGN(align, sizeof(void *)); 2780} 2781 2782static void 2783init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s) 2784{ 2785 n->nr_partial = 0; 2786 spin_lock_init(&n->list_lock); 2787 INIT_LIST_HEAD(&n->partial); 2788#ifdef CONFIG_SLUB_DEBUG 2789 atomic_long_set(&n->nr_slabs, 0); 2790 atomic_long_set(&n->total_objects, 0); 2791 INIT_LIST_HEAD(&n->full); 2792#endif 2793} 2794 2795static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) 2796{ 2797 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < 2798 SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu)); 2799 2800 /* 2801 * Must align to double word boundary for the double cmpxchg 2802 * instructions to work; see __pcpu_double_call_return_bool(). 2803 */ 2804 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu), 2805 2 * sizeof(void *)); 2806 2807 if (!s->cpu_slab) 2808 return 0; 2809 2810 init_kmem_cache_cpus(s); 2811 2812 return 1; 2813} 2814 2815static struct kmem_cache *kmem_cache_node; 2816 2817/* 2818 * No kmalloc_node yet so do it by hand. We know that this is the first 2819 * slab on the node for this slabcache. There are no concurrent accesses 2820 * possible. 2821 * 2822 * Note that this function only works on the kmalloc_node_cache 2823 * when allocating for the kmalloc_node_cache. This is used for bootstrapping 2824 * memory on a fresh node that has no slab structures yet. 2825 */ 2826static void early_kmem_cache_node_alloc(int node) 2827{ 2828 struct page *page; 2829 struct kmem_cache_node *n; 2830 2831 BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node)); 2832 2833 page = new_slab(kmem_cache_node, GFP_NOWAIT, node); 2834 2835 BUG_ON(!page); 2836 if (page_to_nid(page) != node) { 2837 printk(KERN_ERR "SLUB: Unable to allocate memory from " 2838 "node %d\n", node); 2839 printk(KERN_ERR "SLUB: Allocating a useless per node structure " 2840 "in order to be able to continue\n"); 2841 } 2842 2843 n = page->freelist; 2844 BUG_ON(!n); 2845 page->freelist = get_freepointer(kmem_cache_node, n); 2846 page->inuse = 1; 2847 page->frozen = 0; 2848 kmem_cache_node->node[node] = n; 2849#ifdef CONFIG_SLUB_DEBUG 2850 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); 2851 init_tracking(kmem_cache_node, n); 2852#endif 2853 init_kmem_cache_node(n, kmem_cache_node); 2854 inc_slabs_node(kmem_cache_node, node, page->objects); 2855 2856 add_partial(n, page, 0); 2857} 2858 2859static void free_kmem_cache_nodes(struct kmem_cache *s) 2860{ 2861 int node; 2862 2863 for_each_node_state(node, N_NORMAL_MEMORY) { 2864 struct kmem_cache_node *n = s->node[node]; 2865 2866 if (n) 2867 kmem_cache_free(kmem_cache_node, n); 2868 2869 s->node[node] = NULL; 2870 } 2871} 2872 2873static int init_kmem_cache_nodes(struct kmem_cache *s) 2874{ 2875 int node; 2876 2877 for_each_node_state(node, N_NORMAL_MEMORY) { 2878 struct kmem_cache_node *n; 2879 2880 if (slab_state == DOWN) { 2881 early_kmem_cache_node_alloc(node); 2882 continue; 2883 } 2884 n = kmem_cache_alloc_node(kmem_cache_node, 2885 GFP_KERNEL, node); 2886 2887 if (!n) { 2888 free_kmem_cache_nodes(s); 2889 return 0; 2890 } 2891 2892 s->node[node] = n; 2893 init_kmem_cache_node(n, s); 2894 } 2895 return 1; 2896} 2897 2898static void set_min_partial(struct kmem_cache *s, unsigned long min) 2899{ 2900 if (min < MIN_PARTIAL) 2901 min = MIN_PARTIAL; 2902 else if (min > MAX_PARTIAL) 2903 min = MAX_PARTIAL; 2904 s->min_partial = min; 2905} 2906 2907/* 2908 * calculate_sizes() determines the order and the distribution of data within 2909 * a slab object. 2910 */ 2911static int calculate_sizes(struct kmem_cache *s, int forced_order) 2912{ 2913 unsigned long flags = s->flags; 2914 unsigned long size = s->objsize; 2915 unsigned long align = s->align; 2916 int order; 2917 2918 /* 2919 * Round up object size to the next word boundary. We can only 2920 * place the free pointer at word boundaries and this determines 2921 * the possible location of the free pointer. 2922 */ 2923 size = ALIGN(size, sizeof(void *)); 2924 2925#ifdef CONFIG_SLUB_DEBUG 2926 /* 2927 * Determine if we can poison the object itself. If the user of 2928 * the slab may touch the object after free or before allocation 2929 * then we should never poison the object itself. 2930 */ 2931 if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) && 2932 !s->ctor) 2933 s->flags |= __OBJECT_POISON; 2934 else 2935 s->flags &= ~__OBJECT_POISON; 2936 2937 2938 /* 2939 * If we are Redzoning then check if there is some space between the 2940 * end of the object and the free pointer. If not then add an 2941 * additional word to have some bytes to store Redzone information. 2942 */ 2943 if ((flags & SLAB_RED_ZONE) && size == s->objsize) 2944 size += sizeof(void *); 2945#endif 2946 2947 /* 2948 * With that we have determined the number of bytes in actual use 2949 * by the object. This is the potential offset to the free pointer. 2950 */ 2951 s->inuse = size; 2952 2953 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) || 2954 s->ctor)) { 2955 /* 2956 * Relocate free pointer after the object if it is not 2957 * permitted to overwrite the first word of the object on 2958 * kmem_cache_free. 2959 * 2960 * This is the case if we do RCU, have a constructor or 2961 * destructor or are poisoning the objects. 2962 */ 2963 s->offset = size; 2964 size += sizeof(void *); 2965 } 2966 2967#ifdef CONFIG_SLUB_DEBUG 2968 if (flags & SLAB_STORE_USER) 2969 /* 2970 * Need to store information about allocs and frees after 2971 * the object. 2972 */ 2973 size += 2 * sizeof(struct track); 2974 2975 if (flags & SLAB_RED_ZONE) 2976 /* 2977 * Add some empty padding so that we can catch 2978 * overwrites from earlier objects rather than let 2979 * tracking information or the free pointer be 2980 * corrupted if a user writes before the start 2981 * of the object. 2982 */ 2983 size += sizeof(void *); 2984#endif 2985 2986 /* 2987 * Determine the alignment based on various parameters that the 2988 * user specified and the dynamic determination of cache line size 2989 * on bootup. 2990 */ 2991 align = calculate_alignment(flags, align, s->objsize); 2992 s->align = align; 2993 2994 /* 2995 * SLUB stores one object immediately after another beginning from 2996 * offset 0. In order to align the objects we have to simply size 2997 * each object to conform to the alignment. 2998 */ 2999 size = ALIGN(size, align); 3000 s->size = size; 3001 if (forced_order >= 0) 3002 order = forced_order; 3003 else 3004 order = calculate_order(size, s->reserved); 3005 3006 if (order < 0) 3007 return 0; 3008 3009 s->allocflags = 0; 3010 if (order) 3011 s->allocflags |= __GFP_COMP; 3012 3013 if (s->flags & SLAB_CACHE_DMA) 3014 s->allocflags |= SLUB_DMA; 3015 3016 if (s->flags & SLAB_RECLAIM_ACCOUNT) 3017 s->allocflags |= __GFP_RECLAIMABLE; 3018 3019 /* 3020 * Determine the number of objects per slab 3021 */ 3022 s->oo = oo_make(order, size, s->reserved); 3023 s->min = oo_make(get_order(size), size, s->reserved); 3024 if (oo_objects(s->oo) > oo_objects(s->max)) 3025 s->max = s->oo; 3026 3027 return !!oo_objects(s->oo); 3028 3029} 3030 3031static int kmem_cache_open(struct kmem_cache *s, 3032 const char *name, size_t size, 3033 size_t align, unsigned long flags, 3034 void (*ctor)(void *)) 3035{ 3036 memset(s, 0, kmem_size); 3037 s->name = name; 3038 s->ctor = ctor; 3039 s->objsize = size; 3040 s->align = align; 3041 s->flags = kmem_cache_flags(size, flags, name, ctor); 3042 s->reserved = 0; 3043 3044 if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU)) 3045 s->reserved = sizeof(struct rcu_head); 3046 3047 if (!calculate_sizes(s, -1)) 3048 goto error; 3049 if (disable_higher_order_debug) { 3050 /* 3051 * Disable debugging flags that store metadata if the min slab 3052 * order increased. 3053 */ 3054 if (get_order(s->size) > get_order(s->objsize)) { 3055 s->flags &= ~DEBUG_METADATA_FLAGS; 3056 s->offset = 0; 3057 if (!calculate_sizes(s, -1)) 3058 goto error; 3059 } 3060 } 3061 3062#ifdef CONFIG_CMPXCHG_DOUBLE 3063 if (system_has_cmpxchg_double() && (s->flags & SLAB_DEBUG_FLAGS) == 0) 3064 /* Enable fast mode */ 3065 s->flags |= __CMPXCHG_DOUBLE; 3066#endif 3067 3068 /* 3069 * The larger the object size is, the more pages we want on the partial 3070 * list to avoid pounding the page allocator excessively. 3071 */ 3072 set_min_partial(s, ilog2(s->size) / 2); 3073 3074 /* 3075 * cpu_partial determined the maximum number of objects kept in the 3076 * per cpu partial lists of a processor. 3077 * 3078 * Per cpu partial lists mainly contain slabs that just have one 3079 * object freed. If they are used for allocation then they can be 3080 * filled up again with minimal effort. The slab will never hit the 3081 * per node partial lists and therefore no locking will be required. 3082 * 3083 * This setting also determines 3084 * 3085 * A) The number of objects from per cpu partial slabs dumped to the 3086 * per node list when we reach the limit. 3087 * B) The number of objects in cpu partial slabs to extract from the 3088 * per node list when we run out of per cpu objects. We only fetch 50% 3089 * to keep some capacity around for frees. 3090 */ 3091 if (s->size >= PAGE_SIZE) 3092 s->cpu_partial = 2; 3093 else if (s->size >= 1024) 3094 s->cpu_partial = 6; 3095 else if (s->size >= 256) 3096 s->cpu_partial = 13; 3097 else 3098 s->cpu_partial = 30; 3099 3100 s->refcount = 1; 3101#ifdef CONFIG_NUMA 3102 s->remote_node_defrag_ratio = 1000; 3103#endif 3104 if (!init_kmem_cache_nodes(s)) 3105 goto error; 3106 3107 if (alloc_kmem_cache_cpus(s)) 3108 return 1; 3109 3110 free_kmem_cache_nodes(s); 3111error: 3112 if (flags & SLAB_PANIC) 3113 panic("Cannot create slab %s size=%lu realsize=%u " 3114 "order=%u offset=%u flags=%lx\n", 3115 s->name, (unsigned long)size, s->size, oo_order(s->oo), 3116 s->offset, flags); 3117 return 0; 3118} 3119 3120/* 3121 * Determine the size of a slab object 3122 */ 3123unsigned int kmem_cache_size(struct kmem_cache *s) 3124{ 3125 return s->objsize; 3126} 3127EXPORT_SYMBOL(kmem_cache_size); 3128 3129static void list_slab_objects(struct kmem_cache *s, struct page *page, 3130 const char *text) 3131{ 3132#ifdef CONFIG_SLUB_DEBUG 3133 void *addr = page_address(page); 3134 void *p; 3135 unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) * 3136 sizeof(long), GFP_ATOMIC); 3137 if (!map) 3138 return; 3139 slab_err(s, page, "%s", text); 3140 slab_lock(page); 3141 3142 get_map(s, page, map); 3143 for_each_object(p, s, addr, page->objects) { 3144 3145 if (!test_bit(slab_index(p, s, addr), map)) { 3146 printk(KERN_ERR "INFO: Object 0x%p @offset=%tu\n", 3147 p, p - addr); 3148 print_tracking(s, p); 3149 } 3150 } 3151 slab_unlock(page); 3152 kfree(map); 3153#endif 3154} 3155 3156/* 3157 * Attempt to free all partial slabs on a node. 3158 * This is called from kmem_cache_close(). We must be the last thread 3159 * using the cache and therefore we do not need to lock anymore. 3160 */ 3161static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) 3162{ 3163 struct page *page, *h; 3164 3165 list_for_each_entry_safe(page, h, &n->partial, lru) { 3166 if (!page->inuse) { 3167 remove_partial(n, page); 3168 discard_slab(s, page); 3169 } else { 3170 list_slab_objects(s, page, 3171 "Objects remaining on kmem_cache_close()"); 3172 } 3173 } 3174} 3175 3176/* 3177 * Release all resources used by a slab cache. 3178 */ 3179static inline int kmem_cache_close(struct kmem_cache *s) 3180{ 3181 int node; 3182 3183 flush_all(s); 3184 free_percpu(s->cpu_slab); 3185 /* Attempt to free all objects */ 3186 for_each_node_state(node, N_NORMAL_MEMORY) { 3187 struct kmem_cache_node *n = get_node(s, node); 3188 3189 free_partial(s, n); 3190 if (n->nr_partial || slabs_node(s, node)) 3191 return 1; 3192 } 3193 free_kmem_cache_nodes(s); 3194 return 0; 3195} 3196 3197/* 3198 * Close a cache and release the kmem_cache structure 3199 * (must be used for caches created using kmem_cache_create) 3200 */ 3201void kmem_cache_destroy(struct kmem_cache *s) 3202{ 3203 down_write(&slub_lock); 3204 s->refcount--; 3205 if (!s->refcount) { 3206 list_del(&s->list); 3207 up_write(&slub_lock); 3208 if (kmem_cache_close(s)) { 3209 printk(KERN_ERR "SLUB %s: %s called for cache that " 3210 "still has objects.\n", s->name, __func__); 3211 dump_stack(); 3212 } 3213 if (s->flags & SLAB_DESTROY_BY_RCU) 3214 rcu_barrier(); 3215 sysfs_slab_remove(s); 3216 } else 3217 up_write(&slub_lock); 3218} 3219EXPORT_SYMBOL(kmem_cache_destroy); 3220 3221/******************************************************************** 3222 * Kmalloc subsystem 3223 *******************************************************************/ 3224 3225struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT]; 3226EXPORT_SYMBOL(kmalloc_caches); 3227 3228static struct kmem_cache *kmem_cache; 3229 3230#ifdef CONFIG_ZONE_DMA 3231static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT]; 3232#endif 3233 3234static int __init setup_slub_min_order(char *str) 3235{ 3236 get_option(&str, &slub_min_order); 3237 3238 return 1; 3239} 3240 3241__setup("slub_min_order=", setup_slub_min_order); 3242 3243static int __init setup_slub_max_order(char *str) 3244{ 3245 get_option(&str, &slub_max_order); 3246 slub_max_order = min(slub_max_order, MAX_ORDER - 1); 3247 3248 return 1; 3249} 3250 3251__setup("slub_max_order=", setup_slub_max_order); 3252 3253static int __init setup_slub_min_objects(char *str) 3254{ 3255 get_option(&str, &slub_min_objects); 3256 3257 return 1; 3258} 3259 3260__setup("slub_min_objects=", setup_slub_min_objects); 3261 3262static int __init setup_slub_nomerge(char *str) 3263{ 3264 slub_nomerge = 1; 3265 return 1; 3266} 3267 3268__setup("slub_nomerge", setup_slub_nomerge); 3269 3270static struct kmem_cache *__init create_kmalloc_cache(const char *name, 3271 int size, unsigned int flags) 3272{ 3273 struct kmem_cache *s; 3274 3275 s = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); 3276 3277 /* 3278 * This function is called with IRQs disabled during early-boot on 3279 * single CPU so there's no need to take slub_lock here. 3280 */ 3281 if (!kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN, 3282 flags, NULL)) 3283 goto panic; 3284 3285 list_add(&s->list, &slab_caches); 3286 return s; 3287 3288panic: 3289 panic("Creation of kmalloc slab %s size=%d failed.\n", name, size); 3290 return NULL; 3291} 3292 3293/* 3294 * Conversion table for small slabs sizes / 8 to the index in the 3295 * kmalloc array. This is necessary for slabs < 192 since we have non power 3296 * of two cache sizes there. The size of larger slabs can be determined using 3297 * fls. 3298 */ 3299static s8 size_index[24] = { 3300 3, /* 8 */ 3301 4, /* 16 */ 3302 5, /* 24 */ 3303 5, /* 32 */ 3304 6, /* 40 */ 3305 6, /* 48 */ 3306 6, /* 56 */ 3307 6, /* 64 */ 3308 1, /* 72 */ 3309 1, /* 80 */ 3310 1, /* 88 */ 3311 1, /* 96 */ 3312 7, /* 104 */ 3313 7, /* 112 */ 3314 7, /* 120 */ 3315 7, /* 128 */ 3316 2, /* 136 */ 3317 2, /* 144 */ 3318 2, /* 152 */ 3319 2, /* 160 */ 3320 2, /* 168 */ 3321 2, /* 176 */ 3322 2, /* 184 */ 3323 2 /* 192 */ 3324}; 3325 3326static inline int size_index_elem(size_t bytes) 3327{ 3328 return (bytes - 1) / 8; 3329} 3330 3331static struct kmem_cache *get_slab(size_t size, gfp_t flags) 3332{ 3333 int index; 3334 3335 if (size <= 192) { 3336 if (!size) 3337 return ZERO_SIZE_PTR; 3338 3339 index = size_index[size_index_elem(size)]; 3340 } else 3341 index = fls(size - 1); 3342 3343#ifdef CONFIG_ZONE_DMA 3344 if (unlikely((flags & SLUB_DMA))) 3345 return kmalloc_dma_caches[index]; 3346 3347#endif 3348 return kmalloc_caches[index]; 3349} 3350 3351void *__kmalloc(size_t size, gfp_t flags) 3352{ 3353 struct kmem_cache *s; 3354 void *ret; 3355 3356 if (unlikely(size > SLUB_MAX_SIZE)) 3357 return kmalloc_large(size, flags); 3358 3359 s = get_slab(size, flags); 3360 3361 if (unlikely(ZERO_OR_NULL_PTR(s))) 3362 return s; 3363 3364 ret = slab_alloc(s, flags, NUMA_NO_NODE, _RET_IP_); 3365 3366 trace_kmalloc(_RET_IP_, ret, size, s->size, flags); 3367 3368 return ret; 3369} 3370EXPORT_SYMBOL(__kmalloc); 3371 3372#ifdef CONFIG_NUMA 3373static void *kmalloc_large_node(size_t size, gfp_t flags, int node) 3374{ 3375 struct page *page; 3376 void *ptr = NULL; 3377 3378 flags |= __GFP_COMP | __GFP_NOTRACK; 3379 page = alloc_pages_node(node, flags, get_order(size)); 3380 if (page) 3381 ptr = page_address(page); 3382 3383 kmemleak_alloc(ptr, size, 1, flags); 3384 return ptr; 3385} 3386 3387void *__kmalloc_node(size_t size, gfp_t flags, int node) 3388{ 3389 struct kmem_cache *s; 3390 void *ret; 3391 3392 if (unlikely(size > SLUB_MAX_SIZE)) { 3393 ret = kmalloc_large_node(size, flags, node); 3394 3395 trace_kmalloc_node(_RET_IP_, ret, 3396 size, PAGE_SIZE << get_order(size), 3397 flags, node); 3398 3399 return ret; 3400 } 3401 3402 s = get_slab(size, flags); 3403 3404 if (unlikely(ZERO_OR_NULL_PTR(s))) 3405 return s; 3406 3407 ret = slab_alloc(s, flags, node, _RET_IP_); 3408 3409 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); 3410 3411 return ret; 3412} 3413EXPORT_SYMBOL(__kmalloc_node); 3414#endif 3415 3416size_t ksize(const void *object) 3417{ 3418 struct page *page; 3419 3420 if (unlikely(object == ZERO_SIZE_PTR)) 3421 return 0; 3422 3423 page = virt_to_head_page(object); 3424 3425 if (unlikely(!PageSlab(page))) { 3426 WARN_ON(!PageCompound(page)); 3427 return PAGE_SIZE << compound_order(page); 3428 } 3429 3430 return slab_ksize(page->slab); 3431} 3432EXPORT_SYMBOL(ksize); 3433 3434#ifdef CONFIG_SLUB_DEBUG 3435bool verify_mem_not_deleted(const void *x) 3436{ 3437 struct page *page; 3438 void *object = (void *)x; 3439 unsigned long flags; 3440 bool rv; 3441 3442 if (unlikely(ZERO_OR_NULL_PTR(x))) 3443 return false; 3444 3445 local_irq_save(flags); 3446 3447 page = virt_to_head_page(x); 3448 if (unlikely(!PageSlab(page))) { 3449 /* maybe it was from stack? */ 3450 rv = true; 3451 goto out_unlock; 3452 } 3453 3454 slab_lock(page); 3455 if (on_freelist(page->slab, page, object)) { 3456 object_err(page->slab, page, object, "Object is on free-list"); 3457 rv = false; 3458 } else { 3459 rv = true; 3460 } 3461 slab_unlock(page); 3462 3463out_unlock: 3464 local_irq_restore(flags); 3465 return rv; 3466} 3467EXPORT_SYMBOL(verify_mem_not_deleted); 3468#endif 3469 3470void kfree(const void *x) 3471{ 3472 struct page *page; 3473 void *object = (void *)x; 3474 3475 trace_kfree(_RET_IP_, x); 3476 3477 if (unlikely(ZERO_OR_NULL_PTR(x))) 3478 return; 3479 3480 page = virt_to_head_page(x); 3481 if (unlikely(!PageSlab(page))) { 3482 BUG_ON(!PageCompound(page)); 3483 kmemleak_free(x); 3484 put_page(page); 3485 return; 3486 } 3487 slab_free(page->slab, page, object, _RET_IP_); 3488} 3489EXPORT_SYMBOL(kfree); 3490 3491/* 3492 * kmem_cache_shrink removes empty slabs from the partial lists and sorts 3493 * the remaining slabs by the number of items in use. The slabs with the 3494 * most items in use come first. New allocations will then fill those up 3495 * and thus they can be removed from the partial lists. 3496 * 3497 * The slabs with the least items are placed last. This results in them 3498 * being allocated from last increasing the chance that the last objects 3499 * are freed in them. 3500 */ 3501int kmem_cache_shrink(struct kmem_cache *s) 3502{ 3503 int node; 3504 int i; 3505 struct kmem_cache_node *n; 3506 struct page *page; 3507 struct page *t; 3508 int objects = oo_objects(s->max); 3509 struct list_head *slabs_by_inuse = 3510 kmalloc(sizeof(struct list_head) * objects, GFP_KERNEL); 3511 unsigned long flags; 3512 3513 if (!slabs_by_inuse) 3514 return -ENOMEM; 3515 3516 flush_all(s); 3517 for_each_node_state(node, N_NORMAL_MEMORY) { 3518 n = get_node(s, node); 3519 3520 if (!n->nr_partial) 3521 continue; 3522 3523 for (i = 0; i < objects; i++) 3524 INIT_LIST_HEAD(slabs_by_inuse + i); 3525 3526 spin_lock_irqsave(&n->list_lock, flags); 3527 3528 /* 3529 * Build lists indexed by the items in use in each slab. 3530 * 3531 * Note that concurrent frees may occur while we hold the 3532 * list_lock. page->inuse here is the upper limit. 3533 */ 3534 list_for_each_entry_safe(page, t, &n->partial, lru) { 3535 list_move(&page->lru, slabs_by_inuse + page->inuse); 3536 if (!page->inuse) 3537 n->nr_partial--; 3538 } 3539 3540 /* 3541 * Rebuild the partial list with the slabs filled up most 3542 * first and the least used slabs at the end. 3543 */ 3544 for (i = objects - 1; i > 0; i--) 3545 list_splice(slabs_by_inuse + i, n->partial.prev); 3546 3547 spin_unlock_irqrestore(&n->list_lock, flags); 3548 3549 /* Release empty slabs */ 3550 list_for_each_entry_safe(page, t, slabs_by_inuse, lru) 3551 discard_slab(s, page); 3552 } 3553 3554 kfree(slabs_by_inuse); 3555 return 0; 3556} 3557EXPORT_SYMBOL(kmem_cache_shrink); 3558 3559#if defined(CONFIG_MEMORY_HOTPLUG) 3560static int slab_mem_going_offline_callback(void *arg) 3561{ 3562 struct kmem_cache *s; 3563 3564 down_read(&slub_lock); 3565 list_for_each_entry(s, &slab_caches, list) 3566 kmem_cache_shrink(s); 3567 up_read(&slub_lock); 3568 3569 return 0; 3570} 3571 3572static void slab_mem_offline_callback(void *arg) 3573{ 3574 struct kmem_cache_node *n; 3575 struct kmem_cache *s; 3576 struct memory_notify *marg = arg; 3577 int offline_node; 3578 3579 offline_node = marg->status_change_nid; 3580 3581 /* 3582 * If the node still has available memory. we need kmem_cache_node 3583 * for it yet. 3584 */ 3585 if (offline_node < 0) 3586 return; 3587 3588 down_read(&slub_lock); 3589 list_for_each_entry(s, &slab_caches, list) { 3590 n = get_node(s, offline_node); 3591 if (n) { 3592 /* 3593 * if n->nr_slabs > 0, slabs still exist on the node 3594 * that is going down. We were unable to free them, 3595 * and offline_pages() function shouldn't call this 3596 * callback. So, we must fail. 3597 */ 3598 BUG_ON(slabs_node(s, offline_node)); 3599 3600 s->node[offline_node] = NULL; 3601 kmem_cache_free(kmem_cache_node, n); 3602 } 3603 } 3604 up_read(&slub_lock); 3605} 3606 3607static int slab_mem_going_online_callback(void *arg) 3608{ 3609 struct kmem_cache_node *n; 3610 struct kmem_cache *s; 3611 struct memory_notify *marg = arg; 3612 int nid = marg->status_change_nid; 3613 int ret = 0; 3614 3615 /* 3616 * If the node's memory is already available, then kmem_cache_node is 3617 * already created. Nothing to do. 3618 */ 3619 if (nid < 0) 3620 return 0; 3621 3622 /* 3623 * We are bringing a node online. No memory is available yet. We must 3624 * allocate a kmem_cache_node structure in order to bring the node 3625 * online. 3626 */ 3627 down_read(&slub_lock); 3628 list_for_each_entry(s, &slab_caches, list) { 3629 /* 3630 * XXX: kmem_cache_alloc_node will fallback to other nodes 3631 * since memory is not yet available from the node that 3632 * is brought up. 3633 */ 3634 n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL); 3635 if (!n) { 3636 ret = -ENOMEM; 3637 goto out; 3638 } 3639 init_kmem_cache_node(n, s); 3640 s->node[nid] = n; 3641 } 3642out: 3643 up_read(&slub_lock); 3644 return ret; 3645} 3646 3647static int slab_memory_callback(struct notifier_block *self, 3648 unsigned long action, void *arg) 3649{ 3650 int ret = 0; 3651 3652 switch (action) { 3653 case MEM_GOING_ONLINE: 3654 ret = slab_mem_going_online_callback(arg); 3655 break; 3656 case MEM_GOING_OFFLINE: 3657 ret = slab_mem_going_offline_callback(arg); 3658 break; 3659 case MEM_OFFLINE: 3660 case MEM_CANCEL_ONLINE: 3661 slab_mem_offline_callback(arg); 3662 break; 3663 case MEM_ONLINE: 3664 case MEM_CANCEL_OFFLINE: 3665 break; 3666 } 3667 if (ret) 3668 ret = notifier_from_errno(ret); 3669 else 3670 ret = NOTIFY_OK; 3671 return ret; 3672} 3673 3674#endif /* CONFIG_MEMORY_HOTPLUG */ 3675 3676/******************************************************************** 3677 * Basic setup of slabs 3678 *******************************************************************/ 3679 3680/* 3681 * Used for early kmem_cache structures that were allocated using 3682 * the page allocator 3683 */ 3684 3685static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s) 3686{ 3687 int node; 3688 3689 list_add(&s->list, &slab_caches); 3690 s->refcount = -1; 3691 3692 for_each_node_state(node, N_NORMAL_MEMORY) { 3693 struct kmem_cache_node *n = get_node(s, node); 3694 struct page *p; 3695 3696 if (n) { 3697 list_for_each_entry(p, &n->partial, lru) 3698 p->slab = s; 3699 3700#ifdef CONFIG_SLUB_DEBUG 3701 list_for_each_entry(p, &n->full, lru) 3702 p->slab = s; 3703#endif 3704 } 3705 } 3706} 3707 3708void __init kmem_cache_init(void) 3709{ 3710 int i; 3711 int caches = 0; 3712 struct kmem_cache *temp_kmem_cache; 3713 int order; 3714 struct kmem_cache *temp_kmem_cache_node; 3715 unsigned long kmalloc_size; 3716 3717 kmem_size = offsetof(struct kmem_cache, node) + 3718 nr_node_ids * sizeof(struct kmem_cache_node *); 3719 3720 /* Allocate two kmem_caches from the page allocator */ 3721 kmalloc_size = ALIGN(kmem_size, cache_line_size()); 3722 order = get_order(2 * kmalloc_size); 3723 kmem_cache = (void *)__get_free_pages(GFP_NOWAIT, order); 3724 3725 /* 3726 * Must first have the slab cache available for the allocations of the 3727 * struct kmem_cache_node's. There is special bootstrap code in 3728 * kmem_cache_open for slab_state == DOWN. 3729 */ 3730 kmem_cache_node = (void *)kmem_cache + kmalloc_size; 3731 3732 kmem_cache_open(kmem_cache_node, "kmem_cache_node", 3733 sizeof(struct kmem_cache_node), 3734 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); 3735 3736 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); 3737 3738 /* Able to allocate the per node structures */ 3739 slab_state = PARTIAL; 3740 3741 temp_kmem_cache = kmem_cache; 3742 kmem_cache_open(kmem_cache, "kmem_cache", kmem_size, 3743 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); 3744 kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); 3745 memcpy(kmem_cache, temp_kmem_cache, kmem_size); 3746 3747 /* 3748 * Allocate kmem_cache_node properly from the kmem_cache slab. 3749 * kmem_cache_node is separately allocated so no need to 3750 * update any list pointers. 3751 */ 3752 temp_kmem_cache_node = kmem_cache_node; 3753 3754 kmem_cache_node = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); 3755 memcpy(kmem_cache_node, temp_kmem_cache_node, kmem_size); 3756 3757 kmem_cache_bootstrap_fixup(kmem_cache_node); 3758 3759 caches++; 3760 kmem_cache_bootstrap_fixup(kmem_cache); 3761 caches++; 3762 /* Free temporary boot structure */ 3763 free_pages((unsigned long)temp_kmem_cache, order); 3764 3765 /* Now we can use the kmem_cache to allocate kmalloc slabs */ 3766 3767 /* 3768 * Patch up the size_index table if we have strange large alignment 3769 * requirements for the kmalloc array. This is only the case for 3770 * MIPS it seems. The standard arches will not generate any code here. 3771 * 3772 * Largest permitted alignment is 256 bytes due to the way we 3773 * handle the index determination for the smaller caches. 3774 * 3775 * Make sure that nothing crazy happens if someone starts tinkering 3776 * around with ARCH_KMALLOC_MINALIGN 3777 */ 3778 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 || 3779 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1))); 3780 3781 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) { 3782 int elem = size_index_elem(i); 3783 if (elem >= ARRAY_SIZE(size_index)) 3784 break; 3785 size_index[elem] = KMALLOC_SHIFT_LOW; 3786 } 3787 3788 if (KMALLOC_MIN_SIZE == 64) { 3789 /* 3790 * The 96 byte size cache is not used if the alignment 3791 * is 64 byte. 3792 */ 3793 for (i = 64 + 8; i <= 96; i += 8) 3794 size_index[size_index_elem(i)] = 7; 3795 } else if (KMALLOC_MIN_SIZE == 128) { 3796 /* 3797 * The 192 byte sized cache is not used if the alignment 3798 * is 128 byte. Redirect kmalloc to use the 256 byte cache 3799 * instead. 3800 */ 3801 for (i = 128 + 8; i <= 192; i += 8) 3802 size_index[size_index_elem(i)] = 8; 3803 } 3804 3805 /* Caches that are not of the two-to-the-power-of size */ 3806 if (KMALLOC_MIN_SIZE <= 32) { 3807 kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0); 3808 caches++; 3809 } 3810 3811 if (KMALLOC_MIN_SIZE <= 64) { 3812 kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0); 3813 caches++; 3814 } 3815 3816 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { 3817 kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0); 3818 caches++; 3819 } 3820 3821 slab_state = UP; 3822 3823 /* Provide the correct kmalloc names now that the caches are up */ 3824 if (KMALLOC_MIN_SIZE <= 32) { 3825 kmalloc_caches[1]->name = kstrdup(kmalloc_caches[1]->name, GFP_NOWAIT); 3826 BUG_ON(!kmalloc_caches[1]->name); 3827 } 3828 3829 if (KMALLOC_MIN_SIZE <= 64) { 3830 kmalloc_caches[2]->name = kstrdup(kmalloc_caches[2]->name, GFP_NOWAIT); 3831 BUG_ON(!kmalloc_caches[2]->name); 3832 } 3833 3834 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { 3835 char *s = kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i); 3836 3837 BUG_ON(!s); 3838 kmalloc_caches[i]->name = s; 3839 } 3840 3841#ifdef CONFIG_SMP 3842 register_cpu_notifier(&slab_notifier); 3843#endif 3844 3845#ifdef CONFIG_ZONE_DMA 3846 for (i = 0; i < SLUB_PAGE_SHIFT; i++) { 3847 struct kmem_cache *s = kmalloc_caches[i]; 3848 3849 if (s && s->size) { 3850 char *name = kasprintf(GFP_NOWAIT, 3851 "dma-kmalloc-%d", s->objsize); 3852 3853 BUG_ON(!name); 3854 kmalloc_dma_caches[i] = create_kmalloc_cache(name, 3855 s->objsize, SLAB_CACHE_DMA); 3856 } 3857 } 3858#endif 3859 printk(KERN_INFO 3860 "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d," 3861 " CPUs=%d, Nodes=%d\n", 3862 caches, cache_line_size(), 3863 slub_min_order, slub_max_order, slub_min_objects, 3864 nr_cpu_ids, nr_node_ids); 3865} 3866 3867void __init kmem_cache_init_late(void) 3868{ 3869} 3870 3871/* 3872 * Find a mergeable slab cache 3873 */ 3874static int slab_unmergeable(struct kmem_cache *s) 3875{ 3876 if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) 3877 return 1; 3878 3879 if (s->ctor) 3880 return 1; 3881 3882 /* 3883 * We may have set a slab to be unmergeable during bootstrap. 3884 */ 3885 if (s->refcount < 0) 3886 return 1; 3887 3888 return 0; 3889} 3890 3891static struct kmem_cache *find_mergeable(size_t size, 3892 size_t align, unsigned long flags, const char *name, 3893 void (*ctor)(void *)) 3894{ 3895 struct kmem_cache *s; 3896 3897 if (slub_nomerge || (flags & SLUB_NEVER_MERGE)) 3898 return NULL; 3899 3900 if (ctor) 3901 return NULL; 3902 3903 size = ALIGN(size, sizeof(void *)); 3904 align = calculate_alignment(flags, align, size); 3905 size = ALIGN(size, align); 3906 flags = kmem_cache_flags(size, flags, name, NULL); 3907 3908 list_for_each_entry(s, &slab_caches, list) { 3909 if (slab_unmergeable(s)) 3910 continue; 3911 3912 if (size > s->size) 3913 continue; 3914 3915 if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME)) 3916 continue; 3917 /* 3918 * Check if alignment is compatible. 3919 * Courtesy of Adrian Drzewiecki 3920 */ 3921 if ((s->size & ~(align - 1)) != s->size) 3922 continue; 3923 3924 if (s->size - size >= sizeof(void *)) 3925 continue; 3926 3927 return s; 3928 } 3929 return NULL; 3930} 3931 3932struct kmem_cache *kmem_cache_create(const char *name, size_t size, 3933 size_t align, unsigned long flags, void (*ctor)(void *)) 3934{ 3935 struct kmem_cache *s; 3936 char *n; 3937 3938 if (WARN_ON(!name)) 3939 return NULL; 3940 3941 down_write(&slub_lock); 3942 s = find_mergeable(size, align, flags, name, ctor); 3943 if (s) { 3944 s->refcount++; 3945 /* 3946 * Adjust the object sizes so that we clear 3947 * the complete object on kzalloc. 3948 */ 3949 s->objsize = max(s->objsize, (int)size); 3950 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); 3951 3952 if (sysfs_slab_alias(s, name)) { 3953 s->refcount--; 3954 goto err; 3955 } 3956 up_write(&slub_lock); 3957 return s; 3958 } 3959 3960 n = kstrdup(name, GFP_KERNEL); 3961 if (!n) 3962 goto err; 3963 3964 s = kmalloc(kmem_size, GFP_KERNEL); 3965 if (s) { 3966 if (kmem_cache_open(s, n, 3967 size, align, flags, ctor)) { 3968 list_add(&s->list, &slab_caches); 3969 if (sysfs_slab_add(s)) { 3970 list_del(&s->list); 3971 kfree(n); 3972 kfree(s); 3973 goto err; 3974 } 3975 up_write(&slub_lock); 3976 return s; 3977 } 3978 kfree(n); 3979 kfree(s); 3980 } 3981err: 3982 up_write(&slub_lock); 3983 3984 if (flags & SLAB_PANIC) 3985 panic("Cannot create slabcache %s\n", name); 3986 else 3987 s = NULL; 3988 return s; 3989} 3990EXPORT_SYMBOL(kmem_cache_create); 3991 3992#ifdef CONFIG_SMP 3993/* 3994 * Use the cpu notifier to insure that the cpu slabs are flushed when 3995 * necessary. 3996 */ 3997static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb, 3998 unsigned long action, void *hcpu) 3999{ 4000 long cpu = (long)hcpu; 4001 struct kmem_cache *s; 4002 unsigned long flags; 4003 4004 switch (action) { 4005 case CPU_UP_CANCELED: 4006 case CPU_UP_CANCELED_FROZEN: 4007 case CPU_DEAD: 4008 case CPU_DEAD_FROZEN: 4009 down_read(&slub_lock); 4010 list_for_each_entry(s, &slab_caches, list) { 4011 local_irq_save(flags); 4012 __flush_cpu_slab(s, cpu); 4013 local_irq_restore(flags); 4014 } 4015 up_read(&slub_lock); 4016 break; 4017 default: 4018 break; 4019 } 4020 return NOTIFY_OK; 4021} 4022 4023static struct notifier_block __cpuinitdata slab_notifier = { 4024 .notifier_call = slab_cpuup_callback 4025}; 4026 4027#endif 4028 4029void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) 4030{ 4031 struct kmem_cache *s; 4032 void *ret; 4033 4034 if (unlikely(size > SLUB_MAX_SIZE)) 4035 return kmalloc_large(size, gfpflags); 4036 4037 s = get_slab(size, gfpflags); 4038 4039 if (unlikely(ZERO_OR_NULL_PTR(s))) 4040 return s; 4041 4042 ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, caller); 4043 4044 /* Honor the call site pointer we received. */ 4045 trace_kmalloc(caller, ret, size, s->size, gfpflags); 4046 4047 return ret; 4048} 4049 4050#ifdef CONFIG_NUMA 4051void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, 4052 int node, unsigned long caller) 4053{ 4054 struct kmem_cache *s; 4055 void *ret; 4056 4057 if (unlikely(size > SLUB_MAX_SIZE)) { 4058 ret = kmalloc_large_node(size, gfpflags, node); 4059 4060 trace_kmalloc_node(caller, ret, 4061 size, PAGE_SIZE << get_order(size), 4062 gfpflags, node); 4063 4064 return ret; 4065 } 4066 4067 s = get_slab(size, gfpflags); 4068 4069 if (unlikely(ZERO_OR_NULL_PTR(s))) 4070 return s; 4071 4072 ret = slab_alloc(s, gfpflags, node, caller); 4073 4074 /* Honor the call site pointer we received. */ 4075 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node); 4076 4077 return ret; 4078} 4079#endif 4080 4081#ifdef CONFIG_SYSFS 4082static int count_inuse(struct page *page) 4083{ 4084 return page->inuse; 4085} 4086 4087static int count_total(struct page *page) 4088{ 4089 return page->objects; 4090} 4091#endif 4092 4093#ifdef CONFIG_SLUB_DEBUG 4094static int validate_slab(struct kmem_cache *s, struct page *page, 4095 unsigned long *map) 4096{ 4097 void *p; 4098 void *addr = page_address(page); 4099 4100 if (!check_slab(s, page) || 4101 !on_freelist(s, page, NULL)) 4102 return 0; 4103 4104 /* Now we know that a valid freelist exists */ 4105 bitmap_zero(map, page->objects); 4106 4107 get_map(s, page, map); 4108 for_each_object(p, s, addr, page->objects) { 4109 if (test_bit(slab_index(p, s, addr), map)) 4110 if (!check_object(s, page, p, SLUB_RED_INACTIVE)) 4111 return 0; 4112 } 4113 4114 for_each_object(p, s, addr, page->objects) 4115 if (!test_bit(slab_index(p, s, addr), map)) 4116 if (!check_object(s, page, p, SLUB_RED_ACTIVE)) 4117 return 0; 4118 return 1; 4119} 4120 4121static void validate_slab_slab(struct kmem_cache *s, struct page *page, 4122 unsigned long *map) 4123{ 4124 slab_lock(page); 4125 validate_slab(s, page, map); 4126 slab_unlock(page); 4127} 4128 4129static int validate_slab_node(struct kmem_cache *s, 4130 struct kmem_cache_node *n, unsigned long *map) 4131{ 4132 unsigned long count = 0; 4133 struct page *page; 4134 unsigned long flags; 4135 4136 spin_lock_irqsave(&n->list_lock, flags); 4137 4138 list_for_each_entry(page, &n->partial, lru) { 4139 validate_slab_slab(s, page, map); 4140 count++; 4141 } 4142 if (count != n->nr_partial) 4143 printk(KERN_ERR "SLUB %s: %ld partial slabs counted but " 4144 "counter=%ld\n", s->name, count, n->nr_partial); 4145 4146 if (!(s->flags & SLAB_STORE_USER)) 4147 goto out; 4148 4149 list_for_each_entry(page, &n->full, lru) { 4150 validate_slab_slab(s, page, map); 4151 count++; 4152 } 4153 if (count != atomic_long_read(&n->nr_slabs)) 4154 printk(KERN_ERR "SLUB: %s %ld slabs counted but " 4155 "counter=%ld\n", s->name, count, 4156 atomic_long_read(&n->nr_slabs)); 4157 4158out: 4159 spin_unlock_irqrestore(&n->list_lock, flags); 4160 return count; 4161} 4162 4163static long validate_slab_cache(struct kmem_cache *s) 4164{ 4165 int node; 4166 unsigned long count = 0; 4167 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) * 4168 sizeof(unsigned long), GFP_KERNEL); 4169 4170 if (!map) 4171 return -ENOMEM; 4172 4173 flush_all(s); 4174 for_each_node_state(node, N_NORMAL_MEMORY) { 4175 struct kmem_cache_node *n = get_node(s, node); 4176 4177 count += validate_slab_node(s, n, map); 4178 } 4179 kfree(map); 4180 return count; 4181} 4182/* 4183 * Generate lists of code addresses where slabcache objects are allocated 4184 * and freed. 4185 */ 4186 4187struct location { 4188 unsigned long count; 4189 unsigned long addr; 4190 long long sum_time; 4191 long min_time; 4192 long max_time; 4193 long min_pid; 4194 long max_pid; 4195 DECLARE_BITMAP(cpus, NR_CPUS); 4196 nodemask_t nodes; 4197}; 4198 4199struct loc_track { 4200 unsigned long max; 4201 unsigned long count; 4202 struct location *loc; 4203}; 4204 4205static void free_loc_track(struct loc_track *t) 4206{ 4207 if (t->max) 4208 free_pages((unsigned long)t->loc, 4209 get_order(sizeof(struct location) * t->max)); 4210} 4211 4212static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags) 4213{ 4214 struct location *l; 4215 int order; 4216 4217 order = get_order(sizeof(struct location) * max); 4218 4219 l = (void *)__get_free_pages(flags, order); 4220 if (!l) 4221 return 0; 4222 4223 if (t->count) { 4224 memcpy(l, t->loc, sizeof(struct location) * t->count); 4225 free_loc_track(t); 4226 } 4227 t->max = max; 4228 t->loc = l; 4229 return 1; 4230} 4231 4232static int add_location(struct loc_track *t, struct kmem_cache *s, 4233 const struct track *track) 4234{ 4235 long start, end, pos; 4236 struct location *l; 4237 unsigned long caddr; 4238 unsigned long age = jiffies - track->when; 4239 4240 start = -1; 4241 end = t->count; 4242 4243 for ( ; ; ) { 4244 pos = start + (end - start + 1) / 2; 4245 4246 /* 4247 * There is nothing at "end". If we end up there 4248 * we need to add something to before end. 4249 */ 4250 if (pos == end) 4251 break; 4252 4253 caddr = t->loc[pos].addr; 4254 if (track->addr == caddr) { 4255 4256 l = &t->loc[pos]; 4257 l->count++; 4258 if (track->when) { 4259 l->sum_time += age; 4260 if (age < l->min_time) 4261 l->min_time = age; 4262 if (age > l->max_time) 4263 l->max_time = age; 4264 4265 if (track->pid < l->min_pid) 4266 l->min_pid = track->pid; 4267 if (track->pid > l->max_pid) 4268 l->max_pid = track->pid; 4269 4270 cpumask_set_cpu(track->cpu, 4271 to_cpumask(l->cpus)); 4272 } 4273 node_set(page_to_nid(virt_to_page(track)), l->nodes); 4274 return 1; 4275 } 4276 4277 if (track->addr < caddr) 4278 end = pos; 4279 else 4280 start = pos; 4281 } 4282 4283 /* 4284 * Not found. Insert new tracking element. 4285 */ 4286 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC)) 4287 return 0; 4288 4289 l = t->loc + pos; 4290 if (pos < t->count) 4291 memmove(l + 1, l, 4292 (t->count - pos) * sizeof(struct location)); 4293 t->count++; 4294 l->count = 1; 4295 l->addr = track->addr; 4296 l->sum_time = age; 4297 l->min_time = age; 4298 l->max_time = age; 4299 l->min_pid = track->pid; 4300 l->max_pid = track->pid; 4301 cpumask_clear(to_cpumask(l->cpus)); 4302 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus)); 4303 nodes_clear(l->nodes); 4304 node_set(page_to_nid(virt_to_page(track)), l->nodes); 4305 return 1; 4306} 4307 4308static void process_slab(struct loc_track *t, struct kmem_cache *s, 4309 struct page *page, enum track_item alloc, 4310 unsigned long *map) 4311{ 4312 void *addr = page_address(page); 4313 void *p; 4314 4315 bitmap_zero(map, page->objects); 4316 get_map(s, page, map); 4317 4318 for_each_object(p, s, addr, page->objects) 4319 if (!test_bit(slab_index(p, s, addr), map)) 4320 add_location(t, s, get_track(s, p, alloc)); 4321} 4322 4323static int list_locations(struct kmem_cache *s, char *buf, 4324 enum track_item alloc) 4325{ 4326 int len = 0; 4327 unsigned long i; 4328 struct loc_track t = { 0, 0, NULL }; 4329 int node; 4330 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) * 4331 sizeof(unsigned long), GFP_KERNEL); 4332 4333 if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location), 4334 GFP_TEMPORARY)) { 4335 kfree(map); 4336 return sprintf(buf, "Out of memory\n"); 4337 } 4338 /* Push back cpu slabs */ 4339 flush_all(s); 4340 4341 for_each_node_state(node, N_NORMAL_MEMORY) { 4342 struct kmem_cache_node *n = get_node(s, node); 4343 unsigned long flags; 4344 struct page *page; 4345 4346 if (!atomic_long_read(&n->nr_slabs)) 4347 continue; 4348 4349 spin_lock_irqsave(&n->list_lock, flags); 4350 list_for_each_entry(page, &n->partial, lru) 4351 process_slab(&t, s, page, alloc, map); 4352 list_for_each_entry(page, &n->full, lru) 4353 process_slab(&t, s, page, alloc, map); 4354 spin_unlock_irqrestore(&n->list_lock, flags); 4355 } 4356 4357 for (i = 0; i < t.count; i++) { 4358 struct location *l = &t.loc[i]; 4359 4360 if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100) 4361 break; 4362 len += sprintf(buf + len, "%7ld ", l->count); 4363 4364 if (l->addr) 4365 len += sprintf(buf + len, "%pS", (void *)l->addr); 4366 else 4367 len += sprintf(buf + len, "<not-available>"); 4368 4369 if (l->sum_time != l->min_time) { 4370 len += sprintf(buf + len, " age=%ld/%ld/%ld", 4371 l->min_time, 4372 (long)div_u64(l->sum_time, l->count), 4373 l->max_time); 4374 } else 4375 len += sprintf(buf + len, " age=%ld", 4376 l->min_time); 4377 4378 if (l->min_pid != l->max_pid) 4379 len += sprintf(buf + len, " pid=%ld-%ld", 4380 l->min_pid, l->max_pid); 4381 else 4382 len += sprintf(buf + len, " pid=%ld", 4383 l->min_pid); 4384 4385 if (num_online_cpus() > 1 && 4386 !cpumask_empty(to_cpumask(l->cpus)) && 4387 len < PAGE_SIZE - 60) { 4388 len += sprintf(buf + len, " cpus="); 4389 len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50, 4390 to_cpumask(l->cpus)); 4391 } 4392 4393 if (nr_online_nodes > 1 && !nodes_empty(l->nodes) && 4394 len < PAGE_SIZE - 60) { 4395 len += sprintf(buf + len, " nodes="); 4396 len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50, 4397 l->nodes); 4398 } 4399 4400 len += sprintf(buf + len, "\n"); 4401 } 4402 4403 free_loc_track(&t); 4404 kfree(map); 4405 if (!t.count) 4406 len += sprintf(buf, "No data\n"); 4407 return len; 4408} 4409#endif 4410 4411#ifdef SLUB_RESILIENCY_TEST 4412static void resiliency_test(void) 4413{ 4414 u8 *p; 4415 4416 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || SLUB_PAGE_SHIFT < 10); 4417 4418 printk(KERN_ERR "SLUB resiliency testing\n"); 4419 printk(KERN_ERR "-----------------------\n"); 4420 printk(KERN_ERR "A. Corruption after allocation\n"); 4421 4422 p = kzalloc(16, GFP_KERNEL); 4423 p[16] = 0x12; 4424 printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer" 4425 " 0x12->0x%p\n\n", p + 16); 4426 4427 validate_slab_cache(kmalloc_caches[4]); 4428 4429 /* Hmmm... The next two are dangerous */ 4430 p = kzalloc(32, GFP_KERNEL); 4431 p[32 + sizeof(void *)] = 0x34; 4432 printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab" 4433 " 0x34 -> -0x%p\n", p); 4434 printk(KERN_ERR 4435 "If allocated object is overwritten then not detectable\n\n"); 4436 4437 validate_slab_cache(kmalloc_caches[5]); 4438 p = kzalloc(64, GFP_KERNEL); 4439 p += 64 + (get_cycles() & 0xff) * sizeof(void *); 4440 *p = 0x56; 4441 printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n", 4442 p); 4443 printk(KERN_ERR 4444 "If allocated object is overwritten then not detectable\n\n"); 4445 validate_slab_cache(kmalloc_caches[6]); 4446 4447 printk(KERN_ERR "\nB. Corruption after free\n"); 4448 p = kzalloc(128, GFP_KERNEL); 4449 kfree(p); 4450 *p = 0x78; 4451 printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p); 4452 validate_slab_cache(kmalloc_caches[7]); 4453 4454 p = kzalloc(256, GFP_KERNEL); 4455 kfree(p); 4456 p[50] = 0x9a; 4457 printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", 4458 p); 4459 validate_slab_cache(kmalloc_caches[8]); 4460 4461 p = kzalloc(512, GFP_KERNEL); 4462 kfree(p); 4463 p[512] = 0xab; 4464 printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p); 4465 validate_slab_cache(kmalloc_caches[9]); 4466} 4467#else 4468#ifdef CONFIG_SYSFS 4469static void resiliency_test(void) {}; 4470#endif 4471#endif 4472 4473#ifdef CONFIG_SYSFS 4474enum slab_stat_type { 4475 SL_ALL, /* All slabs */ 4476 SL_PARTIAL, /* Only partially allocated slabs */ 4477 SL_CPU, /* Only slabs used for cpu caches */ 4478 SL_OBJECTS, /* Determine allocated objects not slabs */ 4479 SL_TOTAL /* Determine object capacity not slabs */ 4480}; 4481 4482#define SO_ALL (1 << SL_ALL) 4483#define SO_PARTIAL (1 << SL_PARTIAL) 4484#define SO_CPU (1 << SL_CPU) 4485#define SO_OBJECTS (1 << SL_OBJECTS) 4486#define SO_TOTAL (1 << SL_TOTAL) 4487 4488static ssize_t show_slab_objects(struct kmem_cache *s, 4489 char *buf, unsigned long flags) 4490{ 4491 unsigned long total = 0; 4492 int node; 4493 int x; 4494 unsigned long *nodes; 4495 unsigned long *per_cpu; 4496 4497 nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL); 4498 if (!nodes) 4499 return -ENOMEM; 4500 per_cpu = nodes + nr_node_ids; 4501 4502 if (flags & SO_CPU) { 4503 int cpu; 4504 4505 for_each_possible_cpu(cpu) { 4506 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); 4507 struct page *page; 4508 4509 if (!c || c->node < 0) 4510 continue; 4511 4512 if (c->page) { 4513 if (flags & SO_TOTAL) 4514 x = c->page->objects; 4515 else if (flags & SO_OBJECTS) 4516 x = c->page->inuse; 4517 else 4518 x = 1; 4519 4520 total += x; 4521 nodes[c->node] += x; 4522 } 4523 page = c->partial; 4524 4525 if (page) { 4526 x = page->pobjects; 4527 total += x; 4528 nodes[c->node] += x; 4529 } 4530 per_cpu[c->node]++; 4531 } 4532 } 4533 4534 lock_memory_hotplug(); 4535#ifdef CONFIG_SLUB_DEBUG 4536 if (flags & SO_ALL) { 4537 for_each_node_state(node, N_NORMAL_MEMORY) { 4538 struct kmem_cache_node *n = get_node(s, node); 4539 4540 if (flags & SO_TOTAL) 4541 x = atomic_long_read(&n->total_objects); 4542 else if (flags & SO_OBJECTS) 4543 x = atomic_long_read(&n->total_objects) - 4544 count_partial(n, count_free); 4545 4546 else 4547 x = atomic_long_read(&n->nr_slabs); 4548 total += x; 4549 nodes[node] += x; 4550 } 4551 4552 } else 4553#endif 4554 if (flags & SO_PARTIAL) { 4555 for_each_node_state(node, N_NORMAL_MEMORY) { 4556 struct kmem_cache_node *n = get_node(s, node); 4557 4558 if (flags & SO_TOTAL) 4559 x = count_partial(n, count_total); 4560 else if (flags & SO_OBJECTS) 4561 x = count_partial(n, count_inuse); 4562 else 4563 x = n->nr_partial; 4564 total += x; 4565 nodes[node] += x; 4566 } 4567 } 4568 x = sprintf(buf, "%lu", total); 4569#ifdef CONFIG_NUMA 4570 for_each_node_state(node, N_NORMAL_MEMORY) 4571 if (nodes[node]) 4572 x += sprintf(buf + x, " N%d=%lu", 4573 node, nodes[node]); 4574#endif 4575 unlock_memory_hotplug(); 4576 kfree(nodes); 4577 return x + sprintf(buf + x, "\n"); 4578} 4579 4580#ifdef CONFIG_SLUB_DEBUG 4581static int any_slab_objects(struct kmem_cache *s) 4582{ 4583 int node; 4584 4585 for_each_online_node(node) { 4586 struct kmem_cache_node *n = get_node(s, node); 4587 4588 if (!n) 4589 continue; 4590 4591 if (atomic_long_read(&n->total_objects)) 4592 return 1; 4593 } 4594 return 0; 4595} 4596#endif 4597 4598#define to_slab_attr(n) container_of(n, struct slab_attribute, attr) 4599#define to_slab(n) container_of(n, struct kmem_cache, kobj) 4600 4601struct slab_attribute { 4602 struct attribute attr; 4603 ssize_t (*show)(struct kmem_cache *s, char *buf); 4604 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count); 4605}; 4606 4607#define SLAB_ATTR_RO(_name) \ 4608 static struct slab_attribute _name##_attr = __ATTR_RO(_name) 4609 4610#define SLAB_ATTR(_name) \ 4611 static struct slab_attribute _name##_attr = \ 4612 __ATTR(_name, 0644, _name##_show, _name##_store) 4613 4614static ssize_t slab_size_show(struct kmem_cache *s, char *buf) 4615{ 4616 return sprintf(buf, "%d\n", s->size); 4617} 4618SLAB_ATTR_RO(slab_size); 4619 4620static ssize_t align_show(struct kmem_cache *s, char *buf) 4621{ 4622 return sprintf(buf, "%d\n", s->align); 4623} 4624SLAB_ATTR_RO(align); 4625 4626static ssize_t object_size_show(struct kmem_cache *s, char *buf) 4627{ 4628 return sprintf(buf, "%d\n", s->objsize); 4629} 4630SLAB_ATTR_RO(object_size); 4631 4632static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf) 4633{ 4634 return sprintf(buf, "%d\n", oo_objects(s->oo)); 4635} 4636SLAB_ATTR_RO(objs_per_slab); 4637 4638static ssize_t order_store(struct kmem_cache *s, 4639 const char *buf, size_t length) 4640{ 4641 unsigned long order; 4642 int err; 4643 4644 err = strict_strtoul(buf, 10, &order); 4645 if (err) 4646 return err; 4647 4648 if (order > slub_max_order || order < slub_min_order) 4649 return -EINVAL; 4650 4651 calculate_sizes(s, order); 4652 return length; 4653} 4654 4655static ssize_t order_show(struct kmem_cache *s, char *buf) 4656{ 4657 return sprintf(buf, "%d\n", oo_order(s->oo)); 4658} 4659SLAB_ATTR(order); 4660 4661static ssize_t min_partial_show(struct kmem_cache *s, char *buf) 4662{ 4663 return sprintf(buf, "%lu\n", s->min_partial); 4664} 4665 4666static ssize_t min_partial_store(struct kmem_cache *s, const char *buf, 4667 size_t length) 4668{ 4669 unsigned long min; 4670 int err; 4671 4672 err = strict_strtoul(buf, 10, &min); 4673 if (err) 4674 return err; 4675 4676 set_min_partial(s, min); 4677 return length; 4678} 4679SLAB_ATTR(min_partial); 4680 4681static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf) 4682{ 4683 return sprintf(buf, "%u\n", s->cpu_partial); 4684} 4685 4686static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf, 4687 size_t length) 4688{ 4689 unsigned long objects; 4690 int err; 4691 4692 err = strict_strtoul(buf, 10, &objects); 4693 if (err) 4694 return err; 4695 4696 s->cpu_partial = objects; 4697 flush_all(s); 4698 return length; 4699} 4700SLAB_ATTR(cpu_partial); 4701 4702static ssize_t ctor_show(struct kmem_cache *s, char *buf) 4703{ 4704 if (!s->ctor) 4705 return 0; 4706 return sprintf(buf, "%pS\n", s->ctor); 4707} 4708SLAB_ATTR_RO(ctor); 4709 4710static ssize_t aliases_show(struct kmem_cache *s, char *buf) 4711{ 4712 return sprintf(buf, "%d\n", s->refcount - 1); 4713} 4714SLAB_ATTR_RO(aliases); 4715 4716static ssize_t partial_show(struct kmem_cache *s, char *buf) 4717{ 4718 return show_slab_objects(s, buf, SO_PARTIAL); 4719} 4720SLAB_ATTR_RO(partial); 4721 4722static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf) 4723{ 4724 return show_slab_objects(s, buf, SO_CPU); 4725} 4726SLAB_ATTR_RO(cpu_slabs); 4727 4728static ssize_t objects_show(struct kmem_cache *s, char *buf) 4729{ 4730 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS); 4731} 4732SLAB_ATTR_RO(objects); 4733 4734static ssize_t objects_partial_show(struct kmem_cache *s, char *buf) 4735{ 4736 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS); 4737} 4738SLAB_ATTR_RO(objects_partial); 4739 4740static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf) 4741{ 4742 int objects = 0; 4743 int pages = 0; 4744 int cpu; 4745 int len; 4746 4747 for_each_online_cpu(cpu) { 4748 struct page *page = per_cpu_ptr(s->cpu_slab, cpu)->partial; 4749 4750 if (page) { 4751 pages += page->pages; 4752 objects += page->pobjects; 4753 } 4754 } 4755 4756 len = sprintf(buf, "%d(%d)", objects, pages); 4757 4758#ifdef CONFIG_SMP 4759 for_each_online_cpu(cpu) { 4760 struct page *page = per_cpu_ptr(s->cpu_slab, cpu) ->partial; 4761 4762 if (page && len < PAGE_SIZE - 20) 4763 len += sprintf(buf + len, " C%d=%d(%d)", cpu, 4764 page->pobjects, page->pages); 4765 } 4766#endif 4767 return len + sprintf(buf + len, "\n"); 4768} 4769SLAB_ATTR_RO(slabs_cpu_partial); 4770 4771static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) 4772{ 4773 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); 4774} 4775 4776static ssize_t reclaim_account_store(struct kmem_cache *s, 4777 const char *buf, size_t length) 4778{ 4779 s->flags &= ~SLAB_RECLAIM_ACCOUNT; 4780 if (buf[0] == '1') 4781 s->flags |= SLAB_RECLAIM_ACCOUNT; 4782 return length; 4783} 4784SLAB_ATTR(reclaim_account); 4785 4786static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) 4787{ 4788 return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); 4789} 4790SLAB_ATTR_RO(hwcache_align); 4791 4792#ifdef CONFIG_ZONE_DMA 4793static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) 4794{ 4795 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA)); 4796} 4797SLAB_ATTR_RO(cache_dma); 4798#endif 4799 4800static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) 4801{ 4802 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU)); 4803} 4804SLAB_ATTR_RO(destroy_by_rcu); 4805 4806static ssize_t reserved_show(struct kmem_cache *s, char *buf) 4807{ 4808 return sprintf(buf, "%d\n", s->reserved); 4809} 4810SLAB_ATTR_RO(reserved); 4811 4812#ifdef CONFIG_SLUB_DEBUG 4813static ssize_t slabs_show(struct kmem_cache *s, char *buf) 4814{ 4815 return show_slab_objects(s, buf, SO_ALL); 4816} 4817SLAB_ATTR_RO(slabs); 4818 4819static ssize_t total_objects_show(struct kmem_cache *s, char *buf) 4820{ 4821 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL); 4822} 4823SLAB_ATTR_RO(total_objects); 4824 4825static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf) 4826{ 4827 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE)); 4828} 4829 4830static ssize_t sanity_checks_store(struct kmem_cache *s, 4831 const char *buf, size_t length) 4832{ 4833 s->flags &= ~SLAB_DEBUG_FREE; 4834 if (buf[0] == '1') { 4835 s->flags &= ~__CMPXCHG_DOUBLE; 4836 s->flags |= SLAB_DEBUG_FREE; 4837 } 4838 return length; 4839} 4840SLAB_ATTR(sanity_checks); 4841 4842static ssize_t trace_show(struct kmem_cache *s, char *buf) 4843{ 4844 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE)); 4845} 4846 4847static ssize_t trace_store(struct kmem_cache *s, const char *buf, 4848 size_t length) 4849{ 4850 s->flags &= ~SLAB_TRACE; 4851 if (buf[0] == '1') { 4852 s->flags &= ~__CMPXCHG_DOUBLE; 4853 s->flags |= SLAB_TRACE; 4854 } 4855 return length; 4856} 4857SLAB_ATTR(trace); 4858 4859static ssize_t red_zone_show(struct kmem_cache *s, char *buf) 4860{ 4861 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE)); 4862} 4863 4864static ssize_t red_zone_store(struct kmem_cache *s, 4865 const char *buf, size_t length) 4866{ 4867 if (any_slab_objects(s)) 4868 return -EBUSY; 4869 4870 s->flags &= ~SLAB_RED_ZONE; 4871 if (buf[0] == '1') { 4872 s->flags &= ~__CMPXCHG_DOUBLE; 4873 s->flags |= SLAB_RED_ZONE; 4874 } 4875 calculate_sizes(s, -1); 4876 return length; 4877} 4878SLAB_ATTR(red_zone); 4879 4880static ssize_t poison_show(struct kmem_cache *s, char *buf) 4881{ 4882 return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON)); 4883} 4884 4885static ssize_t poison_store(struct kmem_cache *s, 4886 const char *buf, size_t length) 4887{ 4888 if (any_slab_objects(s)) 4889 return -EBUSY; 4890 4891 s->flags &= ~SLAB_POISON; 4892 if (buf[0] == '1') { 4893 s->flags &= ~__CMPXCHG_DOUBLE; 4894 s->flags |= SLAB_POISON; 4895 } 4896 calculate_sizes(s, -1); 4897 return length; 4898} 4899SLAB_ATTR(poison); 4900 4901static ssize_t store_user_show(struct kmem_cache *s, char *buf) 4902{ 4903 return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER)); 4904} 4905 4906static ssize_t store_user_store(struct kmem_cache *s, 4907 const char *buf, size_t length) 4908{ 4909 if (any_slab_objects(s)) 4910 return -EBUSY; 4911 4912 s->flags &= ~SLAB_STORE_USER; 4913 if (buf[0] == '1') { 4914 s->flags &= ~__CMPXCHG_DOUBLE; 4915 s->flags |= SLAB_STORE_USER; 4916 } 4917 calculate_sizes(s, -1); 4918 return length; 4919} 4920SLAB_ATTR(store_user); 4921 4922static ssize_t validate_show(struct kmem_cache *s, char *buf) 4923{ 4924 return 0; 4925} 4926 4927static ssize_t validate_store(struct kmem_cache *s, 4928 const char *buf, size_t length) 4929{ 4930 int ret = -EINVAL; 4931 4932 if (buf[0] == '1') { 4933 ret = validate_slab_cache(s); 4934 if (ret >= 0) 4935 ret = length; 4936 } 4937 return ret; 4938} 4939SLAB_ATTR(validate); 4940 4941static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf) 4942{ 4943 if (!(s->flags & SLAB_STORE_USER)) 4944 return -ENOSYS; 4945 return list_locations(s, buf, TRACK_ALLOC); 4946} 4947SLAB_ATTR_RO(alloc_calls); 4948 4949static ssize_t free_calls_show(struct kmem_cache *s, char *buf) 4950{ 4951 if (!(s->flags & SLAB_STORE_USER)) 4952 return -ENOSYS; 4953 return list_locations(s, buf, TRACK_FREE); 4954} 4955SLAB_ATTR_RO(free_calls); 4956#endif /* CONFIG_SLUB_DEBUG */ 4957 4958#ifdef CONFIG_FAILSLAB 4959static ssize_t failslab_show(struct kmem_cache *s, char *buf) 4960{ 4961 return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB)); 4962} 4963 4964static ssize_t failslab_store(struct kmem_cache *s, const char *buf, 4965 size_t length) 4966{ 4967 s->flags &= ~SLAB_FAILSLAB; 4968 if (buf[0] == '1') 4969 s->flags |= SLAB_FAILSLAB; 4970 return length; 4971} 4972SLAB_ATTR(failslab); 4973#endif 4974 4975static ssize_t shrink_show(struct kmem_cache *s, char *buf) 4976{ 4977 return 0; 4978} 4979 4980static ssize_t shrink_store(struct kmem_cache *s, 4981 const char *buf, size_t length) 4982{ 4983 if (buf[0] == '1') { 4984 int rc = kmem_cache_shrink(s); 4985 4986 if (rc) 4987 return rc; 4988 } else 4989 return -EINVAL; 4990 return length; 4991} 4992SLAB_ATTR(shrink); 4993 4994#ifdef CONFIG_NUMA 4995static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) 4996{ 4997 return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10); 4998} 4999 5000static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s, 5001 const char *buf, size_t length) 5002{ 5003 unsigned long ratio; 5004 int err; 5005 5006 err = strict_strtoul(buf, 10, &ratio); 5007 if (err) 5008 return err; 5009 5010 if (ratio <= 100) 5011 s->remote_node_defrag_ratio = ratio * 10; 5012 5013 return length; 5014} 5015SLAB_ATTR(remote_node_defrag_ratio); 5016#endif 5017 5018#ifdef CONFIG_SLUB_STATS 5019static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) 5020{ 5021 unsigned long sum = 0; 5022 int cpu; 5023 int len; 5024 int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL); 5025 5026 if (!data) 5027 return -ENOMEM; 5028 5029 for_each_online_cpu(cpu) { 5030 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si]; 5031 5032 data[cpu] = x; 5033 sum += x; 5034 } 5035 5036 len = sprintf(buf, "%lu", sum); 5037 5038#ifdef CONFIG_SMP 5039 for_each_online_cpu(cpu) { 5040 if (data[cpu] && len < PAGE_SIZE - 20) 5041 len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]); 5042 } 5043#endif 5044 kfree(data); 5045 return len + sprintf(buf + len, "\n"); 5046} 5047 5048static void clear_stat(struct kmem_cache *s, enum stat_item si) 5049{ 5050 int cpu; 5051 5052 for_each_online_cpu(cpu) 5053 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0; 5054} 5055 5056#define STAT_ATTR(si, text) \ 5057static ssize_t text##_show(struct kmem_cache *s, char *buf) \ 5058{ \ 5059 return show_stat(s, buf, si); \ 5060} \ 5061static ssize_t text##_store(struct kmem_cache *s, \ 5062 const char *buf, size_t length) \ 5063{ \ 5064 if (buf[0] != '0') \ 5065 return -EINVAL; \ 5066 clear_stat(s, si); \ 5067 return length; \ 5068} \ 5069SLAB_ATTR(text); \ 5070 5071STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath); 5072STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath); 5073STAT_ATTR(FREE_FASTPATH, free_fastpath); 5074STAT_ATTR(FREE_SLOWPATH, free_slowpath); 5075STAT_ATTR(FREE_FROZEN, free_frozen); 5076STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial); 5077STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial); 5078STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial); 5079STAT_ATTR(ALLOC_SLAB, alloc_slab); 5080STAT_ATTR(ALLOC_REFILL, alloc_refill); 5081STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch); 5082STAT_ATTR(FREE_SLAB, free_slab); 5083STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush); 5084STAT_ATTR(DEACTIVATE_FULL, deactivate_full); 5085STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty); 5086STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head); 5087STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail); 5088STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees); 5089STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass); 5090STAT_ATTR(ORDER_FALLBACK, order_fallback); 5091STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail); 5092STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail); 5093STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc); 5094STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free); 5095#endif 5096 5097static struct attribute *slab_attrs[] = { 5098 &slab_size_attr.attr, 5099 &object_size_attr.attr, 5100 &objs_per_slab_attr.attr, 5101 &order_attr.attr, 5102 &min_partial_attr.attr, 5103 &cpu_partial_attr.attr, 5104 &objects_attr.attr, 5105 &objects_partial_attr.attr, 5106 &partial_attr.attr, 5107 &cpu_slabs_attr.attr, 5108 &ctor_attr.attr, 5109 &aliases_attr.attr, 5110 &align_attr.attr, 5111 &hwcache_align_attr.attr, 5112 &reclaim_account_attr.attr, 5113 &destroy_by_rcu_attr.attr, 5114 &shrink_attr.attr, 5115 &reserved_attr.attr, 5116 &slabs_cpu_partial_attr.attr, 5117#ifdef CONFIG_SLUB_DEBUG 5118 &total_objects_attr.attr, 5119 &slabs_attr.attr, 5120 &sanity_checks_attr.attr, 5121 &trace_attr.attr, 5122 &red_zone_attr.attr, 5123 &poison_attr.attr, 5124 &store_user_attr.attr, 5125 &validate_attr.attr, 5126 &alloc_calls_attr.attr, 5127 &free_calls_attr.attr, 5128#endif 5129#ifdef CONFIG_ZONE_DMA 5130 &cache_dma_attr.attr, 5131#endif 5132#ifdef CONFIG_NUMA 5133 &remote_node_defrag_ratio_attr.attr, 5134#endif 5135#ifdef CONFIG_SLUB_STATS 5136 &alloc_fastpath_attr.attr, 5137 &alloc_slowpath_attr.attr, 5138 &free_fastpath_attr.attr, 5139 &free_slowpath_attr.attr, 5140 &free_frozen_attr.attr, 5141 &free_add_partial_attr.attr, 5142 &free_remove_partial_attr.attr, 5143 &alloc_from_partial_attr.attr, 5144 &alloc_slab_attr.attr, 5145 &alloc_refill_attr.attr, 5146 &alloc_node_mismatch_attr.attr, 5147 &free_slab_attr.attr, 5148 &cpuslab_flush_attr.attr, 5149 &deactivate_full_attr.attr, 5150 &deactivate_empty_attr.attr, 5151 &deactivate_to_head_attr.attr, 5152 &deactivate_to_tail_attr.attr, 5153 &deactivate_remote_frees_attr.attr, 5154 &deactivate_bypass_attr.attr, 5155 &order_fallback_attr.attr, 5156 &cmpxchg_double_fail_attr.attr, 5157 &cmpxchg_double_cpu_fail_attr.attr, 5158 &cpu_partial_alloc_attr.attr, 5159 &cpu_partial_free_attr.attr, 5160#endif 5161#ifdef CONFIG_FAILSLAB 5162 &failslab_attr.attr, 5163#endif 5164 5165 NULL 5166}; 5167 5168static struct attribute_group slab_attr_group = { 5169 .attrs = slab_attrs, 5170}; 5171 5172static ssize_t slab_attr_show(struct kobject *kobj, 5173 struct attribute *attr, 5174 char *buf) 5175{ 5176 struct slab_attribute *attribute; 5177 struct kmem_cache *s; 5178 int err; 5179 5180 attribute = to_slab_attr(attr); 5181 s = to_slab(kobj); 5182 5183 if (!attribute->show) 5184 return -EIO; 5185 5186 err = attribute->show(s, buf); 5187 5188 return err; 5189} 5190 5191static ssize_t slab_attr_store(struct kobject *kobj, 5192 struct attribute *attr, 5193 const char *buf, size_t len) 5194{ 5195 struct slab_attribute *attribute; 5196 struct kmem_cache *s; 5197 int err; 5198 5199 attribute = to_slab_attr(attr); 5200 s = to_slab(kobj); 5201 5202 if (!attribute->store) 5203 return -EIO; 5204 5205 err = attribute->store(s, buf, len); 5206 5207 return err; 5208} 5209 5210static void kmem_cache_release(struct kobject *kobj) 5211{ 5212 struct kmem_cache *s = to_slab(kobj); 5213 5214 kfree(s->name); 5215 kfree(s); 5216} 5217 5218static const struct sysfs_ops slab_sysfs_ops = { 5219 .show = slab_attr_show, 5220 .store = slab_attr_store, 5221}; 5222 5223static struct kobj_type slab_ktype = { 5224 .sysfs_ops = &slab_sysfs_ops, 5225 .release = kmem_cache_release 5226}; 5227 5228static int uevent_filter(struct kset *kset, struct kobject *kobj) 5229{ 5230 struct kobj_type *ktype = get_ktype(kobj); 5231 5232 if (ktype == &slab_ktype) 5233 return 1; 5234 return 0; 5235} 5236 5237static const struct kset_uevent_ops slab_uevent_ops = { 5238 .filter = uevent_filter, 5239}; 5240 5241static struct kset *slab_kset; 5242 5243#define ID_STR_LENGTH 64 5244 5245/* Create a unique string id for a slab cache: 5246 * 5247 * Format :[flags-]size 5248 */ 5249static char *create_unique_id(struct kmem_cache *s) 5250{ 5251 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL); 5252 char *p = name; 5253 5254 BUG_ON(!name); 5255 5256 *p++ = ':'; 5257 /* 5258 * First flags affecting slabcache operations. We will only 5259 * get here for aliasable slabs so we do not need to support 5260 * too many flags. The flags here must cover all flags that 5261 * are matched during merging to guarantee that the id is 5262 * unique. 5263 */ 5264 if (s->flags & SLAB_CACHE_DMA) 5265 *p++ = 'd'; 5266 if (s->flags & SLAB_RECLAIM_ACCOUNT) 5267 *p++ = 'a'; 5268 if (s->flags & SLAB_DEBUG_FREE) 5269 *p++ = 'F'; 5270 if (!(s->flags & SLAB_NOTRACK)) 5271 *p++ = 't'; 5272 if (p != name + 1) 5273 *p++ = '-'; 5274 p += sprintf(p, "%07d", s->size); 5275 BUG_ON(p > name + ID_STR_LENGTH - 1); 5276 return name; 5277} 5278 5279static int sysfs_slab_add(struct kmem_cache *s) 5280{ 5281 int err; 5282 const char *name; 5283 int unmergeable; 5284 5285 if (slab_state < SYSFS) 5286 /* Defer until later */ 5287 return 0; 5288 5289 unmergeable = slab_unmergeable(s); 5290 if (unmergeable) { 5291 /* 5292 * Slabcache can never be merged so we can use the name proper. 5293 * This is typically the case for debug situations. In that 5294 * case we can catch duplicate names easily. 5295 */ 5296 sysfs_remove_link(&slab_kset->kobj, s->name); 5297 name = s->name; 5298 } else { 5299 /* 5300 * Create a unique name for the slab as a target 5301 * for the symlinks. 5302 */ 5303 name = create_unique_id(s); 5304 } 5305 5306 s->kobj.kset = slab_kset; 5307 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name); 5308 if (err) { 5309 kobject_put(&s->kobj); 5310 return err; 5311 } 5312 5313 err = sysfs_create_group(&s->kobj, &slab_attr_group); 5314 if (err) { 5315 kobject_del(&s->kobj); 5316 kobject_put(&s->kobj); 5317 return err; 5318 } 5319 kobject_uevent(&s->kobj, KOBJ_ADD); 5320 if (!unmergeable) { 5321 /* Setup first alias */ 5322 sysfs_slab_alias(s, s->name); 5323 kfree(name); 5324 } 5325 return 0; 5326} 5327 5328static void sysfs_slab_remove(struct kmem_cache *s) 5329{ 5330 if (slab_state < SYSFS) 5331 /* 5332 * Sysfs has not been setup yet so no need to remove the 5333 * cache from sysfs. 5334 */ 5335 return; 5336 5337 kobject_uevent(&s->kobj, KOBJ_REMOVE); 5338 kobject_del(&s->kobj); 5339 kobject_put(&s->kobj); 5340} 5341 5342/* 5343 * Need to buffer aliases during bootup until sysfs becomes 5344 * available lest we lose that information. 5345 */ 5346struct saved_alias { 5347 struct kmem_cache *s; 5348 const char *name; 5349 struct saved_alias *next; 5350}; 5351 5352static struct saved_alias *alias_list; 5353 5354static int sysfs_slab_alias(struct kmem_cache *s, const char *name) 5355{ 5356 struct saved_alias *al; 5357 5358 if (slab_state == SYSFS) { 5359 /* 5360 * If we have a leftover link then remove it. 5361 */ 5362 sysfs_remove_link(&slab_kset->kobj, name); 5363 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name); 5364 } 5365 5366 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL); 5367 if (!al) 5368 return -ENOMEM; 5369 5370 al->s = s; 5371 al->name = name; 5372 al->next = alias_list; 5373 alias_list = al; 5374 return 0; 5375} 5376 5377static int __init slab_sysfs_init(void) 5378{ 5379 struct kmem_cache *s; 5380 int err; 5381 5382 down_write(&slub_lock); 5383 5384 slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj); 5385 if (!slab_kset) { 5386 up_write(&slub_lock); 5387 printk(KERN_ERR "Cannot register slab subsystem.\n"); 5388 return -ENOSYS; 5389 } 5390 5391 slab_state = SYSFS; 5392 5393 list_for_each_entry(s, &slab_caches, list) { 5394 err = sysfs_slab_add(s); 5395 if (err) 5396 printk(KERN_ERR "SLUB: Unable to add boot slab %s" 5397 " to sysfs\n", s->name); 5398 } 5399 5400 while (alias_list) { 5401 struct saved_alias *al = alias_list; 5402 5403 alias_list = alias_list->next; 5404 err = sysfs_slab_alias(al->s, al->name); 5405 if (err) 5406 printk(KERN_ERR "SLUB: Unable to add boot slab alias" 5407 " %s to sysfs\n", s->name); 5408 kfree(al); 5409 } 5410 5411 up_write(&slub_lock); 5412 resiliency_test(); 5413 return 0; 5414} 5415 5416__initcall(slab_sysfs_init); 5417#endif /* CONFIG_SYSFS */ 5418 5419/* 5420 * The /proc/slabinfo ABI 5421 */ 5422#ifdef CONFIG_SLABINFO 5423static void print_slabinfo_header(struct seq_file *m) 5424{ 5425 seq_puts(m, "slabinfo - version: 2.1\n"); 5426 seq_puts(m, "# name <active_objs> <num_objs> <objsize> " 5427 "<objperslab> <pagesperslab>"); 5428 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>"); 5429 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); 5430 seq_putc(m, '\n'); 5431} 5432 5433static void *s_start(struct seq_file *m, loff_t *pos) 5434{ 5435 loff_t n = *pos; 5436 5437 down_read(&slub_lock); 5438 if (!n) 5439 print_slabinfo_header(m); 5440 5441 return seq_list_start(&slab_caches, *pos); 5442} 5443 5444static void *s_next(struct seq_file *m, void *p, loff_t *pos) 5445{ 5446 return seq_list_next(p, &slab_caches, pos); 5447} 5448 5449static void s_stop(struct seq_file *m, void *p) 5450{ 5451 up_read(&slub_lock); 5452} 5453 5454static int s_show(struct seq_file *m, void *p) 5455{ 5456 unsigned long nr_partials = 0; 5457 unsigned long nr_slabs = 0; 5458 unsigned long nr_inuse = 0; 5459 unsigned long nr_objs = 0; 5460 unsigned long nr_free = 0; 5461 struct kmem_cache *s; 5462 int node; 5463 5464 s = list_entry(p, struct kmem_cache, list); 5465 5466 for_each_online_node(node) { 5467 struct kmem_cache_node *n = get_node(s, node); 5468 5469 if (!n) 5470 continue; 5471 5472 nr_partials += n->nr_partial; 5473 nr_slabs += atomic_long_read(&n->nr_slabs); 5474 nr_objs += atomic_long_read(&n->total_objects); 5475 nr_free += count_partial(n, count_free); 5476 } 5477 5478 nr_inuse = nr_objs - nr_free; 5479 5480 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse, 5481 nr_objs, s->size, oo_objects(s->oo), 5482 (1 << oo_order(s->oo))); 5483 seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0); 5484 seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs, 5485 0UL); 5486 seq_putc(m, '\n'); 5487 return 0; 5488} 5489 5490static const struct seq_operations slabinfo_op = { 5491 .start = s_start, 5492 .next = s_next, 5493 .stop = s_stop, 5494 .show = s_show, 5495}; 5496 5497static int slabinfo_open(struct inode *inode, struct file *file) 5498{ 5499 return seq_open(file, &slabinfo_op); 5500} 5501 5502static const struct file_operations proc_slabinfo_operations = { 5503 .open = slabinfo_open, 5504 .read = seq_read, 5505 .llseek = seq_lseek, 5506 .release = seq_release, 5507}; 5508 5509static int __init slab_proc_init(void) 5510{ 5511 proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations); 5512 return 0; 5513} 5514module_init(slab_proc_init); 5515#endif /* CONFIG_SLABINFO */ 5516