slub.c revision a5dd5c117cbf620378d693963ffc42239297fac4
1/* 2 * SLUB: A slab allocator that limits cache line use instead of queuing 3 * objects in per cpu and per node lists. 4 * 5 * The allocator synchronizes using per slab locks and only 6 * uses a centralized lock to manage a pool of partial slabs. 7 * 8 * (C) 2007 SGI, Christoph Lameter 9 */ 10 11#include <linux/mm.h> 12#include <linux/swap.h> /* struct reclaim_state */ 13#include <linux/module.h> 14#include <linux/bit_spinlock.h> 15#include <linux/interrupt.h> 16#include <linux/bitops.h> 17#include <linux/slab.h> 18#include <linux/proc_fs.h> 19#include <linux/seq_file.h> 20#include <linux/kmemcheck.h> 21#include <linux/cpu.h> 22#include <linux/cpuset.h> 23#include <linux/mempolicy.h> 24#include <linux/ctype.h> 25#include <linux/debugobjects.h> 26#include <linux/kallsyms.h> 27#include <linux/memory.h> 28#include <linux/math64.h> 29#include <linux/fault-inject.h> 30 31/* 32 * Lock order: 33 * 1. slab_lock(page) 34 * 2. slab->list_lock 35 * 36 * The slab_lock protects operations on the object of a particular 37 * slab and its metadata in the page struct. If the slab lock 38 * has been taken then no allocations nor frees can be performed 39 * on the objects in the slab nor can the slab be added or removed 40 * from the partial or full lists since this would mean modifying 41 * the page_struct of the slab. 42 * 43 * The list_lock protects the partial and full list on each node and 44 * the partial slab counter. If taken then no new slabs may be added or 45 * removed from the lists nor make the number of partial slabs be modified. 46 * (Note that the total number of slabs is an atomic value that may be 47 * modified without taking the list lock). 48 * 49 * The list_lock is a centralized lock and thus we avoid taking it as 50 * much as possible. As long as SLUB does not have to handle partial 51 * slabs, operations can continue without any centralized lock. F.e. 52 * allocating a long series of objects that fill up slabs does not require 53 * the list lock. 54 * 55 * The lock order is sometimes inverted when we are trying to get a slab 56 * off a list. We take the list_lock and then look for a page on the list 57 * to use. While we do that objects in the slabs may be freed. We can 58 * only operate on the slab if we have also taken the slab_lock. So we use 59 * a slab_trylock() on the slab. If trylock was successful then no frees 60 * can occur anymore and we can use the slab for allocations etc. If the 61 * slab_trylock() does not succeed then frees are in progress in the slab and 62 * we must stay away from it for a while since we may cause a bouncing 63 * cacheline if we try to acquire the lock. So go onto the next slab. 64 * If all pages are busy then we may allocate a new slab instead of reusing 65 * a partial slab. A new slab has noone operating on it and thus there is 66 * no danger of cacheline contention. 67 * 68 * Interrupts are disabled during allocation and deallocation in order to 69 * make the slab allocator safe to use in the context of an irq. In addition 70 * interrupts are disabled to ensure that the processor does not change 71 * while handling per_cpu slabs, due to kernel preemption. 72 * 73 * SLUB assigns one slab for allocation to each processor. 74 * Allocations only occur from these slabs called cpu slabs. 75 * 76 * Slabs with free elements are kept on a partial list and during regular 77 * operations no list for full slabs is used. If an object in a full slab is 78 * freed then the slab will show up again on the partial lists. 79 * We track full slabs for debugging purposes though because otherwise we 80 * cannot scan all objects. 81 * 82 * Slabs are freed when they become empty. Teardown and setup is 83 * minimal so we rely on the page allocators per cpu caches for 84 * fast frees and allocs. 85 * 86 * Overloading of page flags that are otherwise used for LRU management. 87 * 88 * PageActive The slab is frozen and exempt from list processing. 89 * This means that the slab is dedicated to a purpose 90 * such as satisfying allocations for a specific 91 * processor. Objects may be freed in the slab while 92 * it is frozen but slab_free will then skip the usual 93 * list operations. It is up to the processor holding 94 * the slab to integrate the slab into the slab lists 95 * when the slab is no longer needed. 96 * 97 * One use of this flag is to mark slabs that are 98 * used for allocations. Then such a slab becomes a cpu 99 * slab. The cpu slab may be equipped with an additional 100 * freelist that allows lockless access to 101 * free objects in addition to the regular freelist 102 * that requires the slab lock. 103 * 104 * PageError Slab requires special handling due to debug 105 * options set. This moves slab handling out of 106 * the fast path and disables lockless freelists. 107 */ 108 109#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ 110 SLAB_TRACE | SLAB_DEBUG_FREE) 111 112static inline int kmem_cache_debug(struct kmem_cache *s) 113{ 114#ifdef CONFIG_SLUB_DEBUG 115 return unlikely(s->flags & SLAB_DEBUG_FLAGS); 116#else 117 return 0; 118#endif 119} 120 121/* 122 * Issues still to be resolved: 123 * 124 * - Support PAGE_ALLOC_DEBUG. Should be easy to do. 125 * 126 * - Variable sizing of the per node arrays 127 */ 128 129/* Enable to test recovery from slab corruption on boot */ 130#undef SLUB_RESILIENCY_TEST 131 132/* 133 * Mininum number of partial slabs. These will be left on the partial 134 * lists even if they are empty. kmem_cache_shrink may reclaim them. 135 */ 136#define MIN_PARTIAL 5 137 138/* 139 * Maximum number of desirable partial slabs. 140 * The existence of more partial slabs makes kmem_cache_shrink 141 * sort the partial list by the number of objects in the. 142 */ 143#define MAX_PARTIAL 10 144 145#define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \ 146 SLAB_POISON | SLAB_STORE_USER) 147 148/* 149 * Debugging flags that require metadata to be stored in the slab. These get 150 * disabled when slub_debug=O is used and a cache's min order increases with 151 * metadata. 152 */ 153#define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) 154 155/* 156 * Set of flags that will prevent slab merging 157 */ 158#define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ 159 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \ 160 SLAB_FAILSLAB) 161 162#define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ 163 SLAB_CACHE_DMA | SLAB_NOTRACK) 164 165#define OO_SHIFT 16 166#define OO_MASK ((1 << OO_SHIFT) - 1) 167#define MAX_OBJS_PER_PAGE 65535 /* since page.objects is u16 */ 168 169/* Internal SLUB flags */ 170#define __OBJECT_POISON 0x80000000UL /* Poison object */ 171 172static int kmem_size = sizeof(struct kmem_cache); 173 174#ifdef CONFIG_SMP 175static struct notifier_block slab_notifier; 176#endif 177 178static enum { 179 DOWN, /* No slab functionality available */ 180 PARTIAL, /* Kmem_cache_node works */ 181 UP, /* Everything works but does not show up in sysfs */ 182 SYSFS /* Sysfs up */ 183} slab_state = DOWN; 184 185/* A list of all slab caches on the system */ 186static DECLARE_RWSEM(slub_lock); 187static LIST_HEAD(slab_caches); 188 189/* 190 * Tracking user of a slab. 191 */ 192struct track { 193 unsigned long addr; /* Called from address */ 194 int cpu; /* Was running on cpu */ 195 int pid; /* Pid context */ 196 unsigned long when; /* When did the operation occur */ 197}; 198 199enum track_item { TRACK_ALLOC, TRACK_FREE }; 200 201#ifdef CONFIG_SLUB_DEBUG 202static int sysfs_slab_add(struct kmem_cache *); 203static int sysfs_slab_alias(struct kmem_cache *, const char *); 204static void sysfs_slab_remove(struct kmem_cache *); 205 206#else 207static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } 208static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) 209 { return 0; } 210static inline void sysfs_slab_remove(struct kmem_cache *s) 211{ 212 kfree(s->name); 213 kfree(s); 214} 215 216#endif 217 218static inline void stat(struct kmem_cache *s, enum stat_item si) 219{ 220#ifdef CONFIG_SLUB_STATS 221 __this_cpu_inc(s->cpu_slab->stat[si]); 222#endif 223} 224 225/******************************************************************** 226 * Core slab cache functions 227 *******************************************************************/ 228 229int slab_is_available(void) 230{ 231 return slab_state >= UP; 232} 233 234static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) 235{ 236 return s->node[node]; 237} 238 239/* Verify that a pointer has an address that is valid within a slab page */ 240static inline int check_valid_pointer(struct kmem_cache *s, 241 struct page *page, const void *object) 242{ 243 void *base; 244 245 if (!object) 246 return 1; 247 248 base = page_address(page); 249 if (object < base || object >= base + page->objects * s->size || 250 (object - base) % s->size) { 251 return 0; 252 } 253 254 return 1; 255} 256 257static inline void *get_freepointer(struct kmem_cache *s, void *object) 258{ 259 return *(void **)(object + s->offset); 260} 261 262static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) 263{ 264 *(void **)(object + s->offset) = fp; 265} 266 267/* Loop over all objects in a slab */ 268#define for_each_object(__p, __s, __addr, __objects) \ 269 for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\ 270 __p += (__s)->size) 271 272/* Scan freelist */ 273#define for_each_free_object(__p, __s, __free) \ 274 for (__p = (__free); __p; __p = get_freepointer((__s), __p)) 275 276/* Determine object index from a given position */ 277static inline int slab_index(void *p, struct kmem_cache *s, void *addr) 278{ 279 return (p - addr) / s->size; 280} 281 282static inline struct kmem_cache_order_objects oo_make(int order, 283 unsigned long size) 284{ 285 struct kmem_cache_order_objects x = { 286 (order << OO_SHIFT) + (PAGE_SIZE << order) / size 287 }; 288 289 return x; 290} 291 292static inline int oo_order(struct kmem_cache_order_objects x) 293{ 294 return x.x >> OO_SHIFT; 295} 296 297static inline int oo_objects(struct kmem_cache_order_objects x) 298{ 299 return x.x & OO_MASK; 300} 301 302#ifdef CONFIG_SLUB_DEBUG 303/* 304 * Debug settings: 305 */ 306#ifdef CONFIG_SLUB_DEBUG_ON 307static int slub_debug = DEBUG_DEFAULT_FLAGS; 308#else 309static int slub_debug; 310#endif 311 312static char *slub_debug_slabs; 313static int disable_higher_order_debug; 314 315/* 316 * Object debugging 317 */ 318static void print_section(char *text, u8 *addr, unsigned int length) 319{ 320 int i, offset; 321 int newline = 1; 322 char ascii[17]; 323 324 ascii[16] = 0; 325 326 for (i = 0; i < length; i++) { 327 if (newline) { 328 printk(KERN_ERR "%8s 0x%p: ", text, addr + i); 329 newline = 0; 330 } 331 printk(KERN_CONT " %02x", addr[i]); 332 offset = i % 16; 333 ascii[offset] = isgraph(addr[i]) ? addr[i] : '.'; 334 if (offset == 15) { 335 printk(KERN_CONT " %s\n", ascii); 336 newline = 1; 337 } 338 } 339 if (!newline) { 340 i %= 16; 341 while (i < 16) { 342 printk(KERN_CONT " "); 343 ascii[i] = ' '; 344 i++; 345 } 346 printk(KERN_CONT " %s\n", ascii); 347 } 348} 349 350static struct track *get_track(struct kmem_cache *s, void *object, 351 enum track_item alloc) 352{ 353 struct track *p; 354 355 if (s->offset) 356 p = object + s->offset + sizeof(void *); 357 else 358 p = object + s->inuse; 359 360 return p + alloc; 361} 362 363static void set_track(struct kmem_cache *s, void *object, 364 enum track_item alloc, unsigned long addr) 365{ 366 struct track *p = get_track(s, object, alloc); 367 368 if (addr) { 369 p->addr = addr; 370 p->cpu = smp_processor_id(); 371 p->pid = current->pid; 372 p->when = jiffies; 373 } else 374 memset(p, 0, sizeof(struct track)); 375} 376 377static void init_tracking(struct kmem_cache *s, void *object) 378{ 379 if (!(s->flags & SLAB_STORE_USER)) 380 return; 381 382 set_track(s, object, TRACK_FREE, 0UL); 383 set_track(s, object, TRACK_ALLOC, 0UL); 384} 385 386static void print_track(const char *s, struct track *t) 387{ 388 if (!t->addr) 389 return; 390 391 printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n", 392 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid); 393} 394 395static void print_tracking(struct kmem_cache *s, void *object) 396{ 397 if (!(s->flags & SLAB_STORE_USER)) 398 return; 399 400 print_track("Allocated", get_track(s, object, TRACK_ALLOC)); 401 print_track("Freed", get_track(s, object, TRACK_FREE)); 402} 403 404static void print_page_info(struct page *page) 405{ 406 printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n", 407 page, page->objects, page->inuse, page->freelist, page->flags); 408 409} 410 411static void slab_bug(struct kmem_cache *s, char *fmt, ...) 412{ 413 va_list args; 414 char buf[100]; 415 416 va_start(args, fmt); 417 vsnprintf(buf, sizeof(buf), fmt, args); 418 va_end(args); 419 printk(KERN_ERR "========================================" 420 "=====================================\n"); 421 printk(KERN_ERR "BUG %s: %s\n", s->name, buf); 422 printk(KERN_ERR "----------------------------------------" 423 "-------------------------------------\n\n"); 424} 425 426static void slab_fix(struct kmem_cache *s, char *fmt, ...) 427{ 428 va_list args; 429 char buf[100]; 430 431 va_start(args, fmt); 432 vsnprintf(buf, sizeof(buf), fmt, args); 433 va_end(args); 434 printk(KERN_ERR "FIX %s: %s\n", s->name, buf); 435} 436 437static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) 438{ 439 unsigned int off; /* Offset of last byte */ 440 u8 *addr = page_address(page); 441 442 print_tracking(s, p); 443 444 print_page_info(page); 445 446 printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p\n\n", 447 p, p - addr, get_freepointer(s, p)); 448 449 if (p > addr + 16) 450 print_section("Bytes b4", p - 16, 16); 451 452 print_section("Object", p, min_t(unsigned long, s->objsize, PAGE_SIZE)); 453 454 if (s->flags & SLAB_RED_ZONE) 455 print_section("Redzone", p + s->objsize, 456 s->inuse - s->objsize); 457 458 if (s->offset) 459 off = s->offset + sizeof(void *); 460 else 461 off = s->inuse; 462 463 if (s->flags & SLAB_STORE_USER) 464 off += 2 * sizeof(struct track); 465 466 if (off != s->size) 467 /* Beginning of the filler is the free pointer */ 468 print_section("Padding", p + off, s->size - off); 469 470 dump_stack(); 471} 472 473static void object_err(struct kmem_cache *s, struct page *page, 474 u8 *object, char *reason) 475{ 476 slab_bug(s, "%s", reason); 477 print_trailer(s, page, object); 478} 479 480static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...) 481{ 482 va_list args; 483 char buf[100]; 484 485 va_start(args, fmt); 486 vsnprintf(buf, sizeof(buf), fmt, args); 487 va_end(args); 488 slab_bug(s, "%s", buf); 489 print_page_info(page); 490 dump_stack(); 491} 492 493static void init_object(struct kmem_cache *s, void *object, u8 val) 494{ 495 u8 *p = object; 496 497 if (s->flags & __OBJECT_POISON) { 498 memset(p, POISON_FREE, s->objsize - 1); 499 p[s->objsize - 1] = POISON_END; 500 } 501 502 if (s->flags & SLAB_RED_ZONE) 503 memset(p + s->objsize, val, s->inuse - s->objsize); 504} 505 506static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes) 507{ 508 while (bytes) { 509 if (*start != (u8)value) 510 return start; 511 start++; 512 bytes--; 513 } 514 return NULL; 515} 516 517static void restore_bytes(struct kmem_cache *s, char *message, u8 data, 518 void *from, void *to) 519{ 520 slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data); 521 memset(from, data, to - from); 522} 523 524static int check_bytes_and_report(struct kmem_cache *s, struct page *page, 525 u8 *object, char *what, 526 u8 *start, unsigned int value, unsigned int bytes) 527{ 528 u8 *fault; 529 u8 *end; 530 531 fault = check_bytes(start, value, bytes); 532 if (!fault) 533 return 1; 534 535 end = start + bytes; 536 while (end > fault && end[-1] == value) 537 end--; 538 539 slab_bug(s, "%s overwritten", what); 540 printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n", 541 fault, end - 1, fault[0], value); 542 print_trailer(s, page, object); 543 544 restore_bytes(s, what, value, fault, end); 545 return 0; 546} 547 548/* 549 * Object layout: 550 * 551 * object address 552 * Bytes of the object to be managed. 553 * If the freepointer may overlay the object then the free 554 * pointer is the first word of the object. 555 * 556 * Poisoning uses 0x6b (POISON_FREE) and the last byte is 557 * 0xa5 (POISON_END) 558 * 559 * object + s->objsize 560 * Padding to reach word boundary. This is also used for Redzoning. 561 * Padding is extended by another word if Redzoning is enabled and 562 * objsize == inuse. 563 * 564 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with 565 * 0xcc (RED_ACTIVE) for objects in use. 566 * 567 * object + s->inuse 568 * Meta data starts here. 569 * 570 * A. Free pointer (if we cannot overwrite object on free) 571 * B. Tracking data for SLAB_STORE_USER 572 * C. Padding to reach required alignment boundary or at mininum 573 * one word if debugging is on to be able to detect writes 574 * before the word boundary. 575 * 576 * Padding is done using 0x5a (POISON_INUSE) 577 * 578 * object + s->size 579 * Nothing is used beyond s->size. 580 * 581 * If slabcaches are merged then the objsize and inuse boundaries are mostly 582 * ignored. And therefore no slab options that rely on these boundaries 583 * may be used with merged slabcaches. 584 */ 585 586static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p) 587{ 588 unsigned long off = s->inuse; /* The end of info */ 589 590 if (s->offset) 591 /* Freepointer is placed after the object. */ 592 off += sizeof(void *); 593 594 if (s->flags & SLAB_STORE_USER) 595 /* We also have user information there */ 596 off += 2 * sizeof(struct track); 597 598 if (s->size == off) 599 return 1; 600 601 return check_bytes_and_report(s, page, p, "Object padding", 602 p + off, POISON_INUSE, s->size - off); 603} 604 605/* Check the pad bytes at the end of a slab page */ 606static int slab_pad_check(struct kmem_cache *s, struct page *page) 607{ 608 u8 *start; 609 u8 *fault; 610 u8 *end; 611 int length; 612 int remainder; 613 614 if (!(s->flags & SLAB_POISON)) 615 return 1; 616 617 start = page_address(page); 618 length = (PAGE_SIZE << compound_order(page)); 619 end = start + length; 620 remainder = length % s->size; 621 if (!remainder) 622 return 1; 623 624 fault = check_bytes(end - remainder, POISON_INUSE, remainder); 625 if (!fault) 626 return 1; 627 while (end > fault && end[-1] == POISON_INUSE) 628 end--; 629 630 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1); 631 print_section("Padding", end - remainder, remainder); 632 633 restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end); 634 return 0; 635} 636 637static int check_object(struct kmem_cache *s, struct page *page, 638 void *object, u8 val) 639{ 640 u8 *p = object; 641 u8 *endobject = object + s->objsize; 642 643 if (s->flags & SLAB_RED_ZONE) { 644 if (!check_bytes_and_report(s, page, object, "Redzone", 645 endobject, val, s->inuse - s->objsize)) 646 return 0; 647 } else { 648 if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) { 649 check_bytes_and_report(s, page, p, "Alignment padding", 650 endobject, POISON_INUSE, s->inuse - s->objsize); 651 } 652 } 653 654 if (s->flags & SLAB_POISON) { 655 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) && 656 (!check_bytes_and_report(s, page, p, "Poison", p, 657 POISON_FREE, s->objsize - 1) || 658 !check_bytes_and_report(s, page, p, "Poison", 659 p + s->objsize - 1, POISON_END, 1))) 660 return 0; 661 /* 662 * check_pad_bytes cleans up on its own. 663 */ 664 check_pad_bytes(s, page, p); 665 } 666 667 if (!s->offset && val == SLUB_RED_ACTIVE) 668 /* 669 * Object and freepointer overlap. Cannot check 670 * freepointer while object is allocated. 671 */ 672 return 1; 673 674 /* Check free pointer validity */ 675 if (!check_valid_pointer(s, page, get_freepointer(s, p))) { 676 object_err(s, page, p, "Freepointer corrupt"); 677 /* 678 * No choice but to zap it and thus lose the remainder 679 * of the free objects in this slab. May cause 680 * another error because the object count is now wrong. 681 */ 682 set_freepointer(s, p, NULL); 683 return 0; 684 } 685 return 1; 686} 687 688static int check_slab(struct kmem_cache *s, struct page *page) 689{ 690 int maxobj; 691 692 VM_BUG_ON(!irqs_disabled()); 693 694 if (!PageSlab(page)) { 695 slab_err(s, page, "Not a valid slab page"); 696 return 0; 697 } 698 699 maxobj = (PAGE_SIZE << compound_order(page)) / s->size; 700 if (page->objects > maxobj) { 701 slab_err(s, page, "objects %u > max %u", 702 s->name, page->objects, maxobj); 703 return 0; 704 } 705 if (page->inuse > page->objects) { 706 slab_err(s, page, "inuse %u > max %u", 707 s->name, page->inuse, page->objects); 708 return 0; 709 } 710 /* Slab_pad_check fixes things up after itself */ 711 slab_pad_check(s, page); 712 return 1; 713} 714 715/* 716 * Determine if a certain object on a page is on the freelist. Must hold the 717 * slab lock to guarantee that the chains are in a consistent state. 718 */ 719static int on_freelist(struct kmem_cache *s, struct page *page, void *search) 720{ 721 int nr = 0; 722 void *fp = page->freelist; 723 void *object = NULL; 724 unsigned long max_objects; 725 726 while (fp && nr <= page->objects) { 727 if (fp == search) 728 return 1; 729 if (!check_valid_pointer(s, page, fp)) { 730 if (object) { 731 object_err(s, page, object, 732 "Freechain corrupt"); 733 set_freepointer(s, object, NULL); 734 break; 735 } else { 736 slab_err(s, page, "Freepointer corrupt"); 737 page->freelist = NULL; 738 page->inuse = page->objects; 739 slab_fix(s, "Freelist cleared"); 740 return 0; 741 } 742 break; 743 } 744 object = fp; 745 fp = get_freepointer(s, object); 746 nr++; 747 } 748 749 max_objects = (PAGE_SIZE << compound_order(page)) / s->size; 750 if (max_objects > MAX_OBJS_PER_PAGE) 751 max_objects = MAX_OBJS_PER_PAGE; 752 753 if (page->objects != max_objects) { 754 slab_err(s, page, "Wrong number of objects. Found %d but " 755 "should be %d", page->objects, max_objects); 756 page->objects = max_objects; 757 slab_fix(s, "Number of objects adjusted."); 758 } 759 if (page->inuse != page->objects - nr) { 760 slab_err(s, page, "Wrong object count. Counter is %d but " 761 "counted were %d", page->inuse, page->objects - nr); 762 page->inuse = page->objects - nr; 763 slab_fix(s, "Object count adjusted."); 764 } 765 return search == NULL; 766} 767 768static void trace(struct kmem_cache *s, struct page *page, void *object, 769 int alloc) 770{ 771 if (s->flags & SLAB_TRACE) { 772 printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n", 773 s->name, 774 alloc ? "alloc" : "free", 775 object, page->inuse, 776 page->freelist); 777 778 if (!alloc) 779 print_section("Object", (void *)object, s->objsize); 780 781 dump_stack(); 782 } 783} 784 785/* 786 * Hooks for other subsystems that check memory allocations. In a typical 787 * production configuration these hooks all should produce no code at all. 788 */ 789static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) 790{ 791 flags &= gfp_allowed_mask; 792 lockdep_trace_alloc(flags); 793 might_sleep_if(flags & __GFP_WAIT); 794 795 return should_failslab(s->objsize, flags, s->flags); 796} 797 798static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object) 799{ 800 flags &= gfp_allowed_mask; 801 kmemcheck_slab_alloc(s, flags, object, s->objsize); 802 kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, flags); 803} 804 805static inline void slab_free_hook(struct kmem_cache *s, void *x) 806{ 807 kmemleak_free_recursive(x, s->flags); 808} 809 810static inline void slab_free_hook_irq(struct kmem_cache *s, void *object) 811{ 812 kmemcheck_slab_free(s, object, s->objsize); 813 debug_check_no_locks_freed(object, s->objsize); 814 if (!(s->flags & SLAB_DEBUG_OBJECTS)) 815 debug_check_no_obj_freed(object, s->objsize); 816} 817 818/* 819 * Tracking of fully allocated slabs for debugging purposes. 820 */ 821static void add_full(struct kmem_cache_node *n, struct page *page) 822{ 823 spin_lock(&n->list_lock); 824 list_add(&page->lru, &n->full); 825 spin_unlock(&n->list_lock); 826} 827 828static void remove_full(struct kmem_cache *s, struct page *page) 829{ 830 struct kmem_cache_node *n; 831 832 if (!(s->flags & SLAB_STORE_USER)) 833 return; 834 835 n = get_node(s, page_to_nid(page)); 836 837 spin_lock(&n->list_lock); 838 list_del(&page->lru); 839 spin_unlock(&n->list_lock); 840} 841 842/* Tracking of the number of slabs for debugging purposes */ 843static inline unsigned long slabs_node(struct kmem_cache *s, int node) 844{ 845 struct kmem_cache_node *n = get_node(s, node); 846 847 return atomic_long_read(&n->nr_slabs); 848} 849 850static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) 851{ 852 return atomic_long_read(&n->nr_slabs); 853} 854 855static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) 856{ 857 struct kmem_cache_node *n = get_node(s, node); 858 859 /* 860 * May be called early in order to allocate a slab for the 861 * kmem_cache_node structure. Solve the chicken-egg 862 * dilemma by deferring the increment of the count during 863 * bootstrap (see early_kmem_cache_node_alloc). 864 */ 865 if (n) { 866 atomic_long_inc(&n->nr_slabs); 867 atomic_long_add(objects, &n->total_objects); 868 } 869} 870static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) 871{ 872 struct kmem_cache_node *n = get_node(s, node); 873 874 atomic_long_dec(&n->nr_slabs); 875 atomic_long_sub(objects, &n->total_objects); 876} 877 878/* Object debug checks for alloc/free paths */ 879static void setup_object_debug(struct kmem_cache *s, struct page *page, 880 void *object) 881{ 882 if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))) 883 return; 884 885 init_object(s, object, SLUB_RED_INACTIVE); 886 init_tracking(s, object); 887} 888 889static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *page, 890 void *object, unsigned long addr) 891{ 892 if (!check_slab(s, page)) 893 goto bad; 894 895 if (!on_freelist(s, page, object)) { 896 object_err(s, page, object, "Object already allocated"); 897 goto bad; 898 } 899 900 if (!check_valid_pointer(s, page, object)) { 901 object_err(s, page, object, "Freelist Pointer check fails"); 902 goto bad; 903 } 904 905 if (!check_object(s, page, object, SLUB_RED_INACTIVE)) 906 goto bad; 907 908 /* Success perform special debug activities for allocs */ 909 if (s->flags & SLAB_STORE_USER) 910 set_track(s, object, TRACK_ALLOC, addr); 911 trace(s, page, object, 1); 912 init_object(s, object, SLUB_RED_ACTIVE); 913 return 1; 914 915bad: 916 if (PageSlab(page)) { 917 /* 918 * If this is a slab page then lets do the best we can 919 * to avoid issues in the future. Marking all objects 920 * as used avoids touching the remaining objects. 921 */ 922 slab_fix(s, "Marking all objects used"); 923 page->inuse = page->objects; 924 page->freelist = NULL; 925 } 926 return 0; 927} 928 929static noinline int free_debug_processing(struct kmem_cache *s, 930 struct page *page, void *object, unsigned long addr) 931{ 932 if (!check_slab(s, page)) 933 goto fail; 934 935 if (!check_valid_pointer(s, page, object)) { 936 slab_err(s, page, "Invalid object pointer 0x%p", object); 937 goto fail; 938 } 939 940 if (on_freelist(s, page, object)) { 941 object_err(s, page, object, "Object already free"); 942 goto fail; 943 } 944 945 if (!check_object(s, page, object, SLUB_RED_ACTIVE)) 946 return 0; 947 948 if (unlikely(s != page->slab)) { 949 if (!PageSlab(page)) { 950 slab_err(s, page, "Attempt to free object(0x%p) " 951 "outside of slab", object); 952 } else if (!page->slab) { 953 printk(KERN_ERR 954 "SLUB <none>: no slab for object 0x%p.\n", 955 object); 956 dump_stack(); 957 } else 958 object_err(s, page, object, 959 "page slab pointer corrupt."); 960 goto fail; 961 } 962 963 /* Special debug activities for freeing objects */ 964 if (!PageSlubFrozen(page) && !page->freelist) 965 remove_full(s, page); 966 if (s->flags & SLAB_STORE_USER) 967 set_track(s, object, TRACK_FREE, addr); 968 trace(s, page, object, 0); 969 init_object(s, object, SLUB_RED_INACTIVE); 970 return 1; 971 972fail: 973 slab_fix(s, "Object at 0x%p not freed", object); 974 return 0; 975} 976 977static int __init setup_slub_debug(char *str) 978{ 979 slub_debug = DEBUG_DEFAULT_FLAGS; 980 if (*str++ != '=' || !*str) 981 /* 982 * No options specified. Switch on full debugging. 983 */ 984 goto out; 985 986 if (*str == ',') 987 /* 988 * No options but restriction on slabs. This means full 989 * debugging for slabs matching a pattern. 990 */ 991 goto check_slabs; 992 993 if (tolower(*str) == 'o') { 994 /* 995 * Avoid enabling debugging on caches if its minimum order 996 * would increase as a result. 997 */ 998 disable_higher_order_debug = 1; 999 goto out; 1000 } 1001 1002 slub_debug = 0; 1003 if (*str == '-') 1004 /* 1005 * Switch off all debugging measures. 1006 */ 1007 goto out; 1008 1009 /* 1010 * Determine which debug features should be switched on 1011 */ 1012 for (; *str && *str != ','; str++) { 1013 switch (tolower(*str)) { 1014 case 'f': 1015 slub_debug |= SLAB_DEBUG_FREE; 1016 break; 1017 case 'z': 1018 slub_debug |= SLAB_RED_ZONE; 1019 break; 1020 case 'p': 1021 slub_debug |= SLAB_POISON; 1022 break; 1023 case 'u': 1024 slub_debug |= SLAB_STORE_USER; 1025 break; 1026 case 't': 1027 slub_debug |= SLAB_TRACE; 1028 break; 1029 case 'a': 1030 slub_debug |= SLAB_FAILSLAB; 1031 break; 1032 default: 1033 printk(KERN_ERR "slub_debug option '%c' " 1034 "unknown. skipped\n", *str); 1035 } 1036 } 1037 1038check_slabs: 1039 if (*str == ',') 1040 slub_debug_slabs = str + 1; 1041out: 1042 return 1; 1043} 1044 1045__setup("slub_debug", setup_slub_debug); 1046 1047static unsigned long kmem_cache_flags(unsigned long objsize, 1048 unsigned long flags, const char *name, 1049 void (*ctor)(void *)) 1050{ 1051 /* 1052 * Enable debugging if selected on the kernel commandline. 1053 */ 1054 if (slub_debug && (!slub_debug_slabs || 1055 !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)))) 1056 flags |= slub_debug; 1057 1058 return flags; 1059} 1060#else 1061static inline void setup_object_debug(struct kmem_cache *s, 1062 struct page *page, void *object) {} 1063 1064static inline int alloc_debug_processing(struct kmem_cache *s, 1065 struct page *page, void *object, unsigned long addr) { return 0; } 1066 1067static inline int free_debug_processing(struct kmem_cache *s, 1068 struct page *page, void *object, unsigned long addr) { return 0; } 1069 1070static inline int slab_pad_check(struct kmem_cache *s, struct page *page) 1071 { return 1; } 1072static inline int check_object(struct kmem_cache *s, struct page *page, 1073 void *object, u8 val) { return 1; } 1074static inline void add_full(struct kmem_cache_node *n, struct page *page) {} 1075static inline unsigned long kmem_cache_flags(unsigned long objsize, 1076 unsigned long flags, const char *name, 1077 void (*ctor)(void *)) 1078{ 1079 return flags; 1080} 1081#define slub_debug 0 1082 1083#define disable_higher_order_debug 0 1084 1085static inline unsigned long slabs_node(struct kmem_cache *s, int node) 1086 { return 0; } 1087static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) 1088 { return 0; } 1089static inline void inc_slabs_node(struct kmem_cache *s, int node, 1090 int objects) {} 1091static inline void dec_slabs_node(struct kmem_cache *s, int node, 1092 int objects) {} 1093 1094static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) 1095 { return 0; } 1096 1097static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, 1098 void *object) {} 1099 1100static inline void slab_free_hook(struct kmem_cache *s, void *x) {} 1101 1102static inline void slab_free_hook_irq(struct kmem_cache *s, 1103 void *object) {} 1104 1105#endif 1106 1107/* 1108 * Slab allocation and freeing 1109 */ 1110static inline struct page *alloc_slab_page(gfp_t flags, int node, 1111 struct kmem_cache_order_objects oo) 1112{ 1113 int order = oo_order(oo); 1114 1115 flags |= __GFP_NOTRACK; 1116 1117 if (node == NUMA_NO_NODE) 1118 return alloc_pages(flags, order); 1119 else 1120 return alloc_pages_exact_node(node, flags, order); 1121} 1122 1123static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) 1124{ 1125 struct page *page; 1126 struct kmem_cache_order_objects oo = s->oo; 1127 gfp_t alloc_gfp; 1128 1129 flags |= s->allocflags; 1130 1131 /* 1132 * Let the initial higher-order allocation fail under memory pressure 1133 * so we fall-back to the minimum order allocation. 1134 */ 1135 alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL; 1136 1137 page = alloc_slab_page(alloc_gfp, node, oo); 1138 if (unlikely(!page)) { 1139 oo = s->min; 1140 /* 1141 * Allocation may have failed due to fragmentation. 1142 * Try a lower order alloc if possible 1143 */ 1144 page = alloc_slab_page(flags, node, oo); 1145 if (!page) 1146 return NULL; 1147 1148 stat(s, ORDER_FALLBACK); 1149 } 1150 1151 if (kmemcheck_enabled 1152 && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) { 1153 int pages = 1 << oo_order(oo); 1154 1155 kmemcheck_alloc_shadow(page, oo_order(oo), flags, node); 1156 1157 /* 1158 * Objects from caches that have a constructor don't get 1159 * cleared when they're allocated, so we need to do it here. 1160 */ 1161 if (s->ctor) 1162 kmemcheck_mark_uninitialized_pages(page, pages); 1163 else 1164 kmemcheck_mark_unallocated_pages(page, pages); 1165 } 1166 1167 page->objects = oo_objects(oo); 1168 mod_zone_page_state(page_zone(page), 1169 (s->flags & SLAB_RECLAIM_ACCOUNT) ? 1170 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, 1171 1 << oo_order(oo)); 1172 1173 return page; 1174} 1175 1176static void setup_object(struct kmem_cache *s, struct page *page, 1177 void *object) 1178{ 1179 setup_object_debug(s, page, object); 1180 if (unlikely(s->ctor)) 1181 s->ctor(object); 1182} 1183 1184static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) 1185{ 1186 struct page *page; 1187 void *start; 1188 void *last; 1189 void *p; 1190 1191 BUG_ON(flags & GFP_SLAB_BUG_MASK); 1192 1193 page = allocate_slab(s, 1194 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); 1195 if (!page) 1196 goto out; 1197 1198 inc_slabs_node(s, page_to_nid(page), page->objects); 1199 page->slab = s; 1200 page->flags |= 1 << PG_slab; 1201 1202 start = page_address(page); 1203 1204 if (unlikely(s->flags & SLAB_POISON)) 1205 memset(start, POISON_INUSE, PAGE_SIZE << compound_order(page)); 1206 1207 last = start; 1208 for_each_object(p, s, start, page->objects) { 1209 setup_object(s, page, last); 1210 set_freepointer(s, last, p); 1211 last = p; 1212 } 1213 setup_object(s, page, last); 1214 set_freepointer(s, last, NULL); 1215 1216 page->freelist = start; 1217 page->inuse = 0; 1218out: 1219 return page; 1220} 1221 1222static void __free_slab(struct kmem_cache *s, struct page *page) 1223{ 1224 int order = compound_order(page); 1225 int pages = 1 << order; 1226 1227 if (kmem_cache_debug(s)) { 1228 void *p; 1229 1230 slab_pad_check(s, page); 1231 for_each_object(p, s, page_address(page), 1232 page->objects) 1233 check_object(s, page, p, SLUB_RED_INACTIVE); 1234 } 1235 1236 kmemcheck_free_shadow(page, compound_order(page)); 1237 1238 mod_zone_page_state(page_zone(page), 1239 (s->flags & SLAB_RECLAIM_ACCOUNT) ? 1240 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, 1241 -pages); 1242 1243 __ClearPageSlab(page); 1244 reset_page_mapcount(page); 1245 if (current->reclaim_state) 1246 current->reclaim_state->reclaimed_slab += pages; 1247 __free_pages(page, order); 1248} 1249 1250static void rcu_free_slab(struct rcu_head *h) 1251{ 1252 struct page *page; 1253 1254 page = container_of((struct list_head *)h, struct page, lru); 1255 __free_slab(page->slab, page); 1256} 1257 1258static void free_slab(struct kmem_cache *s, struct page *page) 1259{ 1260 if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) { 1261 /* 1262 * RCU free overloads the RCU head over the LRU 1263 */ 1264 struct rcu_head *head = (void *)&page->lru; 1265 1266 call_rcu(head, rcu_free_slab); 1267 } else 1268 __free_slab(s, page); 1269} 1270 1271static void discard_slab(struct kmem_cache *s, struct page *page) 1272{ 1273 dec_slabs_node(s, page_to_nid(page), page->objects); 1274 free_slab(s, page); 1275} 1276 1277/* 1278 * Per slab locking using the pagelock 1279 */ 1280static __always_inline void slab_lock(struct page *page) 1281{ 1282 bit_spin_lock(PG_locked, &page->flags); 1283} 1284 1285static __always_inline void slab_unlock(struct page *page) 1286{ 1287 __bit_spin_unlock(PG_locked, &page->flags); 1288} 1289 1290static __always_inline int slab_trylock(struct page *page) 1291{ 1292 int rc = 1; 1293 1294 rc = bit_spin_trylock(PG_locked, &page->flags); 1295 return rc; 1296} 1297 1298/* 1299 * Management of partially allocated slabs 1300 */ 1301static void add_partial(struct kmem_cache_node *n, 1302 struct page *page, int tail) 1303{ 1304 spin_lock(&n->list_lock); 1305 n->nr_partial++; 1306 if (tail) 1307 list_add_tail(&page->lru, &n->partial); 1308 else 1309 list_add(&page->lru, &n->partial); 1310 spin_unlock(&n->list_lock); 1311} 1312 1313static inline void __remove_partial(struct kmem_cache_node *n, 1314 struct page *page) 1315{ 1316 list_del(&page->lru); 1317 n->nr_partial--; 1318} 1319 1320static void remove_partial(struct kmem_cache *s, struct page *page) 1321{ 1322 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1323 1324 spin_lock(&n->list_lock); 1325 __remove_partial(n, page); 1326 spin_unlock(&n->list_lock); 1327} 1328 1329/* 1330 * Lock slab and remove from the partial list. 1331 * 1332 * Must hold list_lock. 1333 */ 1334static inline int lock_and_freeze_slab(struct kmem_cache_node *n, 1335 struct page *page) 1336{ 1337 if (slab_trylock(page)) { 1338 __remove_partial(n, page); 1339 __SetPageSlubFrozen(page); 1340 return 1; 1341 } 1342 return 0; 1343} 1344 1345/* 1346 * Try to allocate a partial slab from a specific node. 1347 */ 1348static struct page *get_partial_node(struct kmem_cache_node *n) 1349{ 1350 struct page *page; 1351 1352 /* 1353 * Racy check. If we mistakenly see no partial slabs then we 1354 * just allocate an empty slab. If we mistakenly try to get a 1355 * partial slab and there is none available then get_partials() 1356 * will return NULL. 1357 */ 1358 if (!n || !n->nr_partial) 1359 return NULL; 1360 1361 spin_lock(&n->list_lock); 1362 list_for_each_entry(page, &n->partial, lru) 1363 if (lock_and_freeze_slab(n, page)) 1364 goto out; 1365 page = NULL; 1366out: 1367 spin_unlock(&n->list_lock); 1368 return page; 1369} 1370 1371/* 1372 * Get a page from somewhere. Search in increasing NUMA distances. 1373 */ 1374static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags) 1375{ 1376#ifdef CONFIG_NUMA 1377 struct zonelist *zonelist; 1378 struct zoneref *z; 1379 struct zone *zone; 1380 enum zone_type high_zoneidx = gfp_zone(flags); 1381 struct page *page; 1382 1383 /* 1384 * The defrag ratio allows a configuration of the tradeoffs between 1385 * inter node defragmentation and node local allocations. A lower 1386 * defrag_ratio increases the tendency to do local allocations 1387 * instead of attempting to obtain partial slabs from other nodes. 1388 * 1389 * If the defrag_ratio is set to 0 then kmalloc() always 1390 * returns node local objects. If the ratio is higher then kmalloc() 1391 * may return off node objects because partial slabs are obtained 1392 * from other nodes and filled up. 1393 * 1394 * If /sys/kernel/slab/xx/defrag_ratio is set to 100 (which makes 1395 * defrag_ratio = 1000) then every (well almost) allocation will 1396 * first attempt to defrag slab caches on other nodes. This means 1397 * scanning over all nodes to look for partial slabs which may be 1398 * expensive if we do it every time we are trying to find a slab 1399 * with available objects. 1400 */ 1401 if (!s->remote_node_defrag_ratio || 1402 get_cycles() % 1024 > s->remote_node_defrag_ratio) 1403 return NULL; 1404 1405 get_mems_allowed(); 1406 zonelist = node_zonelist(slab_node(current->mempolicy), flags); 1407 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { 1408 struct kmem_cache_node *n; 1409 1410 n = get_node(s, zone_to_nid(zone)); 1411 1412 if (n && cpuset_zone_allowed_hardwall(zone, flags) && 1413 n->nr_partial > s->min_partial) { 1414 page = get_partial_node(n); 1415 if (page) { 1416 put_mems_allowed(); 1417 return page; 1418 } 1419 } 1420 } 1421 put_mems_allowed(); 1422#endif 1423 return NULL; 1424} 1425 1426/* 1427 * Get a partial page, lock it and return it. 1428 */ 1429static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node) 1430{ 1431 struct page *page; 1432 int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node; 1433 1434 page = get_partial_node(get_node(s, searchnode)); 1435 if (page || node != -1) 1436 return page; 1437 1438 return get_any_partial(s, flags); 1439} 1440 1441/* 1442 * Move a page back to the lists. 1443 * 1444 * Must be called with the slab lock held. 1445 * 1446 * On exit the slab lock will have been dropped. 1447 */ 1448static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) 1449{ 1450 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1451 1452 __ClearPageSlubFrozen(page); 1453 if (page->inuse) { 1454 1455 if (page->freelist) { 1456 add_partial(n, page, tail); 1457 stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD); 1458 } else { 1459 stat(s, DEACTIVATE_FULL); 1460 if (kmem_cache_debug(s) && (s->flags & SLAB_STORE_USER)) 1461 add_full(n, page); 1462 } 1463 slab_unlock(page); 1464 } else { 1465 stat(s, DEACTIVATE_EMPTY); 1466 if (n->nr_partial < s->min_partial) { 1467 /* 1468 * Adding an empty slab to the partial slabs in order 1469 * to avoid page allocator overhead. This slab needs 1470 * to come after the other slabs with objects in 1471 * so that the others get filled first. That way the 1472 * size of the partial list stays small. 1473 * 1474 * kmem_cache_shrink can reclaim any empty slabs from 1475 * the partial list. 1476 */ 1477 add_partial(n, page, 1); 1478 slab_unlock(page); 1479 } else { 1480 slab_unlock(page); 1481 stat(s, FREE_SLAB); 1482 discard_slab(s, page); 1483 } 1484 } 1485} 1486 1487/* 1488 * Remove the cpu slab 1489 */ 1490static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) 1491{ 1492 struct page *page = c->page; 1493 int tail = 1; 1494 1495 if (page->freelist) 1496 stat(s, DEACTIVATE_REMOTE_FREES); 1497 /* 1498 * Merge cpu freelist into slab freelist. Typically we get here 1499 * because both freelists are empty. So this is unlikely 1500 * to occur. 1501 */ 1502 while (unlikely(c->freelist)) { 1503 void **object; 1504 1505 tail = 0; /* Hot objects. Put the slab first */ 1506 1507 /* Retrieve object from cpu_freelist */ 1508 object = c->freelist; 1509 c->freelist = get_freepointer(s, c->freelist); 1510 1511 /* And put onto the regular freelist */ 1512 set_freepointer(s, object, page->freelist); 1513 page->freelist = object; 1514 page->inuse--; 1515 } 1516 c->page = NULL; 1517 unfreeze_slab(s, page, tail); 1518} 1519 1520static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) 1521{ 1522 stat(s, CPUSLAB_FLUSH); 1523 slab_lock(c->page); 1524 deactivate_slab(s, c); 1525} 1526 1527/* 1528 * Flush cpu slab. 1529 * 1530 * Called from IPI handler with interrupts disabled. 1531 */ 1532static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) 1533{ 1534 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); 1535 1536 if (likely(c && c->page)) 1537 flush_slab(s, c); 1538} 1539 1540static void flush_cpu_slab(void *d) 1541{ 1542 struct kmem_cache *s = d; 1543 1544 __flush_cpu_slab(s, smp_processor_id()); 1545} 1546 1547static void flush_all(struct kmem_cache *s) 1548{ 1549 on_each_cpu(flush_cpu_slab, s, 1); 1550} 1551 1552/* 1553 * Check if the objects in a per cpu structure fit numa 1554 * locality expectations. 1555 */ 1556static inline int node_match(struct kmem_cache_cpu *c, int node) 1557{ 1558#ifdef CONFIG_NUMA 1559 if (node != NUMA_NO_NODE && c->node != node) 1560 return 0; 1561#endif 1562 return 1; 1563} 1564 1565static int count_free(struct page *page) 1566{ 1567 return page->objects - page->inuse; 1568} 1569 1570static unsigned long count_partial(struct kmem_cache_node *n, 1571 int (*get_count)(struct page *)) 1572{ 1573 unsigned long flags; 1574 unsigned long x = 0; 1575 struct page *page; 1576 1577 spin_lock_irqsave(&n->list_lock, flags); 1578 list_for_each_entry(page, &n->partial, lru) 1579 x += get_count(page); 1580 spin_unlock_irqrestore(&n->list_lock, flags); 1581 return x; 1582} 1583 1584static inline unsigned long node_nr_objs(struct kmem_cache_node *n) 1585{ 1586#ifdef CONFIG_SLUB_DEBUG 1587 return atomic_long_read(&n->total_objects); 1588#else 1589 return 0; 1590#endif 1591} 1592 1593static noinline void 1594slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) 1595{ 1596 int node; 1597 1598 printk(KERN_WARNING 1599 "SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n", 1600 nid, gfpflags); 1601 printk(KERN_WARNING " cache: %s, object size: %d, buffer size: %d, " 1602 "default order: %d, min order: %d\n", s->name, s->objsize, 1603 s->size, oo_order(s->oo), oo_order(s->min)); 1604 1605 if (oo_order(s->min) > get_order(s->objsize)) 1606 printk(KERN_WARNING " %s debugging increased min order, use " 1607 "slub_debug=O to disable.\n", s->name); 1608 1609 for_each_online_node(node) { 1610 struct kmem_cache_node *n = get_node(s, node); 1611 unsigned long nr_slabs; 1612 unsigned long nr_objs; 1613 unsigned long nr_free; 1614 1615 if (!n) 1616 continue; 1617 1618 nr_free = count_partial(n, count_free); 1619 nr_slabs = node_nr_slabs(n); 1620 nr_objs = node_nr_objs(n); 1621 1622 printk(KERN_WARNING 1623 " node %d: slabs: %ld, objs: %ld, free: %ld\n", 1624 node, nr_slabs, nr_objs, nr_free); 1625 } 1626} 1627 1628/* 1629 * Slow path. The lockless freelist is empty or we need to perform 1630 * debugging duties. 1631 * 1632 * Interrupts are disabled. 1633 * 1634 * Processing is still very fast if new objects have been freed to the 1635 * regular freelist. In that case we simply take over the regular freelist 1636 * as the lockless freelist and zap the regular freelist. 1637 * 1638 * If that is not working then we fall back to the partial lists. We take the 1639 * first element of the freelist as the object to allocate now and move the 1640 * rest of the freelist to the lockless freelist. 1641 * 1642 * And if we were unable to get a new slab from the partial slab lists then 1643 * we need to allocate a new slab. This is the slowest path since it involves 1644 * a call to the page allocator and the setup of a new slab. 1645 */ 1646static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, 1647 unsigned long addr, struct kmem_cache_cpu *c) 1648{ 1649 void **object; 1650 struct page *new; 1651 1652 /* We handle __GFP_ZERO in the caller */ 1653 gfpflags &= ~__GFP_ZERO; 1654 1655 if (!c->page) 1656 goto new_slab; 1657 1658 slab_lock(c->page); 1659 if (unlikely(!node_match(c, node))) 1660 goto another_slab; 1661 1662 stat(s, ALLOC_REFILL); 1663 1664load_freelist: 1665 object = c->page->freelist; 1666 if (unlikely(!object)) 1667 goto another_slab; 1668 if (kmem_cache_debug(s)) 1669 goto debug; 1670 1671 c->freelist = get_freepointer(s, object); 1672 c->page->inuse = c->page->objects; 1673 c->page->freelist = NULL; 1674 c->node = page_to_nid(c->page); 1675unlock_out: 1676 slab_unlock(c->page); 1677 stat(s, ALLOC_SLOWPATH); 1678 return object; 1679 1680another_slab: 1681 deactivate_slab(s, c); 1682 1683new_slab: 1684 new = get_partial(s, gfpflags, node); 1685 if (new) { 1686 c->page = new; 1687 stat(s, ALLOC_FROM_PARTIAL); 1688 goto load_freelist; 1689 } 1690 1691 gfpflags &= gfp_allowed_mask; 1692 if (gfpflags & __GFP_WAIT) 1693 local_irq_enable(); 1694 1695 new = new_slab(s, gfpflags, node); 1696 1697 if (gfpflags & __GFP_WAIT) 1698 local_irq_disable(); 1699 1700 if (new) { 1701 c = __this_cpu_ptr(s->cpu_slab); 1702 stat(s, ALLOC_SLAB); 1703 if (c->page) 1704 flush_slab(s, c); 1705 slab_lock(new); 1706 __SetPageSlubFrozen(new); 1707 c->page = new; 1708 goto load_freelist; 1709 } 1710 if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit()) 1711 slab_out_of_memory(s, gfpflags, node); 1712 return NULL; 1713debug: 1714 if (!alloc_debug_processing(s, c->page, object, addr)) 1715 goto another_slab; 1716 1717 c->page->inuse++; 1718 c->page->freelist = get_freepointer(s, object); 1719 c->node = -1; 1720 goto unlock_out; 1721} 1722 1723/* 1724 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) 1725 * have the fastpath folded into their functions. So no function call 1726 * overhead for requests that can be satisfied on the fastpath. 1727 * 1728 * The fastpath works by first checking if the lockless freelist can be used. 1729 * If not then __slab_alloc is called for slow processing. 1730 * 1731 * Otherwise we can simply pick the next object from the lockless free list. 1732 */ 1733static __always_inline void *slab_alloc(struct kmem_cache *s, 1734 gfp_t gfpflags, int node, unsigned long addr) 1735{ 1736 void **object; 1737 struct kmem_cache_cpu *c; 1738 unsigned long flags; 1739 1740 if (slab_pre_alloc_hook(s, gfpflags)) 1741 return NULL; 1742 1743 local_irq_save(flags); 1744 c = __this_cpu_ptr(s->cpu_slab); 1745 object = c->freelist; 1746 if (unlikely(!object || !node_match(c, node))) 1747 1748 object = __slab_alloc(s, gfpflags, node, addr, c); 1749 1750 else { 1751 c->freelist = get_freepointer(s, object); 1752 stat(s, ALLOC_FASTPATH); 1753 } 1754 local_irq_restore(flags); 1755 1756 if (unlikely(gfpflags & __GFP_ZERO) && object) 1757 memset(object, 0, s->objsize); 1758 1759 slab_post_alloc_hook(s, gfpflags, object); 1760 1761 return object; 1762} 1763 1764void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) 1765{ 1766 void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); 1767 1768 trace_kmem_cache_alloc(_RET_IP_, ret, s->objsize, s->size, gfpflags); 1769 1770 return ret; 1771} 1772EXPORT_SYMBOL(kmem_cache_alloc); 1773 1774#ifdef CONFIG_TRACING 1775void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) 1776{ 1777 return slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); 1778} 1779EXPORT_SYMBOL(kmem_cache_alloc_notrace); 1780#endif 1781 1782#ifdef CONFIG_NUMA 1783void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) 1784{ 1785 void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); 1786 1787 trace_kmem_cache_alloc_node(_RET_IP_, ret, 1788 s->objsize, s->size, gfpflags, node); 1789 1790 return ret; 1791} 1792EXPORT_SYMBOL(kmem_cache_alloc_node); 1793#endif 1794 1795#ifdef CONFIG_TRACING 1796void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, 1797 gfp_t gfpflags, 1798 int node) 1799{ 1800 return slab_alloc(s, gfpflags, node, _RET_IP_); 1801} 1802EXPORT_SYMBOL(kmem_cache_alloc_node_notrace); 1803#endif 1804 1805/* 1806 * Slow patch handling. This may still be called frequently since objects 1807 * have a longer lifetime than the cpu slabs in most processing loads. 1808 * 1809 * So we still attempt to reduce cache line usage. Just take the slab 1810 * lock and free the item. If there is no additional partial page 1811 * handling required then we can return immediately. 1812 */ 1813static void __slab_free(struct kmem_cache *s, struct page *page, 1814 void *x, unsigned long addr) 1815{ 1816 void *prior; 1817 void **object = (void *)x; 1818 1819 stat(s, FREE_SLOWPATH); 1820 slab_lock(page); 1821 1822 if (kmem_cache_debug(s)) 1823 goto debug; 1824 1825checks_ok: 1826 prior = page->freelist; 1827 set_freepointer(s, object, prior); 1828 page->freelist = object; 1829 page->inuse--; 1830 1831 if (unlikely(PageSlubFrozen(page))) { 1832 stat(s, FREE_FROZEN); 1833 goto out_unlock; 1834 } 1835 1836 if (unlikely(!page->inuse)) 1837 goto slab_empty; 1838 1839 /* 1840 * Objects left in the slab. If it was not on the partial list before 1841 * then add it. 1842 */ 1843 if (unlikely(!prior)) { 1844 add_partial(get_node(s, page_to_nid(page)), page, 1); 1845 stat(s, FREE_ADD_PARTIAL); 1846 } 1847 1848out_unlock: 1849 slab_unlock(page); 1850 return; 1851 1852slab_empty: 1853 if (prior) { 1854 /* 1855 * Slab still on the partial list. 1856 */ 1857 remove_partial(s, page); 1858 stat(s, FREE_REMOVE_PARTIAL); 1859 } 1860 slab_unlock(page); 1861 stat(s, FREE_SLAB); 1862 discard_slab(s, page); 1863 return; 1864 1865debug: 1866 if (!free_debug_processing(s, page, x, addr)) 1867 goto out_unlock; 1868 goto checks_ok; 1869} 1870 1871/* 1872 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that 1873 * can perform fastpath freeing without additional function calls. 1874 * 1875 * The fastpath is only possible if we are freeing to the current cpu slab 1876 * of this processor. This typically the case if we have just allocated 1877 * the item before. 1878 * 1879 * If fastpath is not possible then fall back to __slab_free where we deal 1880 * with all sorts of special processing. 1881 */ 1882static __always_inline void slab_free(struct kmem_cache *s, 1883 struct page *page, void *x, unsigned long addr) 1884{ 1885 void **object = (void *)x; 1886 struct kmem_cache_cpu *c; 1887 unsigned long flags; 1888 1889 slab_free_hook(s, x); 1890 1891 local_irq_save(flags); 1892 c = __this_cpu_ptr(s->cpu_slab); 1893 1894 slab_free_hook_irq(s, x); 1895 1896 if (likely(page == c->page && c->node >= 0)) { 1897 set_freepointer(s, object, c->freelist); 1898 c->freelist = object; 1899 stat(s, FREE_FASTPATH); 1900 } else 1901 __slab_free(s, page, x, addr); 1902 1903 local_irq_restore(flags); 1904} 1905 1906void kmem_cache_free(struct kmem_cache *s, void *x) 1907{ 1908 struct page *page; 1909 1910 page = virt_to_head_page(x); 1911 1912 slab_free(s, page, x, _RET_IP_); 1913 1914 trace_kmem_cache_free(_RET_IP_, x); 1915} 1916EXPORT_SYMBOL(kmem_cache_free); 1917 1918/* Figure out on which slab page the object resides */ 1919static struct page *get_object_page(const void *x) 1920{ 1921 struct page *page = virt_to_head_page(x); 1922 1923 if (!PageSlab(page)) 1924 return NULL; 1925 1926 return page; 1927} 1928 1929/* 1930 * Object placement in a slab is made very easy because we always start at 1931 * offset 0. If we tune the size of the object to the alignment then we can 1932 * get the required alignment by putting one properly sized object after 1933 * another. 1934 * 1935 * Notice that the allocation order determines the sizes of the per cpu 1936 * caches. Each processor has always one slab available for allocations. 1937 * Increasing the allocation order reduces the number of times that slabs 1938 * must be moved on and off the partial lists and is therefore a factor in 1939 * locking overhead. 1940 */ 1941 1942/* 1943 * Mininum / Maximum order of slab pages. This influences locking overhead 1944 * and slab fragmentation. A higher order reduces the number of partial slabs 1945 * and increases the number of allocations possible without having to 1946 * take the list_lock. 1947 */ 1948static int slub_min_order; 1949static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER; 1950static int slub_min_objects; 1951 1952/* 1953 * Merge control. If this is set then no merging of slab caches will occur. 1954 * (Could be removed. This was introduced to pacify the merge skeptics.) 1955 */ 1956static int slub_nomerge; 1957 1958/* 1959 * Calculate the order of allocation given an slab object size. 1960 * 1961 * The order of allocation has significant impact on performance and other 1962 * system components. Generally order 0 allocations should be preferred since 1963 * order 0 does not cause fragmentation in the page allocator. Larger objects 1964 * be problematic to put into order 0 slabs because there may be too much 1965 * unused space left. We go to a higher order if more than 1/16th of the slab 1966 * would be wasted. 1967 * 1968 * In order to reach satisfactory performance we must ensure that a minimum 1969 * number of objects is in one slab. Otherwise we may generate too much 1970 * activity on the partial lists which requires taking the list_lock. This is 1971 * less a concern for large slabs though which are rarely used. 1972 * 1973 * slub_max_order specifies the order where we begin to stop considering the 1974 * number of objects in a slab as critical. If we reach slub_max_order then 1975 * we try to keep the page order as low as possible. So we accept more waste 1976 * of space in favor of a small page order. 1977 * 1978 * Higher order allocations also allow the placement of more objects in a 1979 * slab and thereby reduce object handling overhead. If the user has 1980 * requested a higher mininum order then we start with that one instead of 1981 * the smallest order which will fit the object. 1982 */ 1983static inline int slab_order(int size, int min_objects, 1984 int max_order, int fract_leftover) 1985{ 1986 int order; 1987 int rem; 1988 int min_order = slub_min_order; 1989 1990 if ((PAGE_SIZE << min_order) / size > MAX_OBJS_PER_PAGE) 1991 return get_order(size * MAX_OBJS_PER_PAGE) - 1; 1992 1993 for (order = max(min_order, 1994 fls(min_objects * size - 1) - PAGE_SHIFT); 1995 order <= max_order; order++) { 1996 1997 unsigned long slab_size = PAGE_SIZE << order; 1998 1999 if (slab_size < min_objects * size) 2000 continue; 2001 2002 rem = slab_size % size; 2003 2004 if (rem <= slab_size / fract_leftover) 2005 break; 2006 2007 } 2008 2009 return order; 2010} 2011 2012static inline int calculate_order(int size) 2013{ 2014 int order; 2015 int min_objects; 2016 int fraction; 2017 int max_objects; 2018 2019 /* 2020 * Attempt to find best configuration for a slab. This 2021 * works by first attempting to generate a layout with 2022 * the best configuration and backing off gradually. 2023 * 2024 * First we reduce the acceptable waste in a slab. Then 2025 * we reduce the minimum objects required in a slab. 2026 */ 2027 min_objects = slub_min_objects; 2028 if (!min_objects) 2029 min_objects = 4 * (fls(nr_cpu_ids) + 1); 2030 max_objects = (PAGE_SIZE << slub_max_order)/size; 2031 min_objects = min(min_objects, max_objects); 2032 2033 while (min_objects > 1) { 2034 fraction = 16; 2035 while (fraction >= 4) { 2036 order = slab_order(size, min_objects, 2037 slub_max_order, fraction); 2038 if (order <= slub_max_order) 2039 return order; 2040 fraction /= 2; 2041 } 2042 min_objects--; 2043 } 2044 2045 /* 2046 * We were unable to place multiple objects in a slab. Now 2047 * lets see if we can place a single object there. 2048 */ 2049 order = slab_order(size, 1, slub_max_order, 1); 2050 if (order <= slub_max_order) 2051 return order; 2052 2053 /* 2054 * Doh this slab cannot be placed using slub_max_order. 2055 */ 2056 order = slab_order(size, 1, MAX_ORDER, 1); 2057 if (order < MAX_ORDER) 2058 return order; 2059 return -ENOSYS; 2060} 2061 2062/* 2063 * Figure out what the alignment of the objects will be. 2064 */ 2065static unsigned long calculate_alignment(unsigned long flags, 2066 unsigned long align, unsigned long size) 2067{ 2068 /* 2069 * If the user wants hardware cache aligned objects then follow that 2070 * suggestion if the object is sufficiently large. 2071 * 2072 * The hardware cache alignment cannot override the specified 2073 * alignment though. If that is greater then use it. 2074 */ 2075 if (flags & SLAB_HWCACHE_ALIGN) { 2076 unsigned long ralign = cache_line_size(); 2077 while (size <= ralign / 2) 2078 ralign /= 2; 2079 align = max(align, ralign); 2080 } 2081 2082 if (align < ARCH_SLAB_MINALIGN) 2083 align = ARCH_SLAB_MINALIGN; 2084 2085 return ALIGN(align, sizeof(void *)); 2086} 2087 2088static void 2089init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s) 2090{ 2091 n->nr_partial = 0; 2092 spin_lock_init(&n->list_lock); 2093 INIT_LIST_HEAD(&n->partial); 2094#ifdef CONFIG_SLUB_DEBUG 2095 atomic_long_set(&n->nr_slabs, 0); 2096 atomic_long_set(&n->total_objects, 0); 2097 INIT_LIST_HEAD(&n->full); 2098#endif 2099} 2100 2101static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) 2102{ 2103 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < 2104 SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu)); 2105 2106 s->cpu_slab = alloc_percpu(struct kmem_cache_cpu); 2107 2108 return s->cpu_slab != NULL; 2109} 2110 2111static struct kmem_cache *kmem_cache_node; 2112 2113/* 2114 * No kmalloc_node yet so do it by hand. We know that this is the first 2115 * slab on the node for this slabcache. There are no concurrent accesses 2116 * possible. 2117 * 2118 * Note that this function only works on the kmalloc_node_cache 2119 * when allocating for the kmalloc_node_cache. This is used for bootstrapping 2120 * memory on a fresh node that has no slab structures yet. 2121 */ 2122static void early_kmem_cache_node_alloc(int node) 2123{ 2124 struct page *page; 2125 struct kmem_cache_node *n; 2126 unsigned long flags; 2127 2128 BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node)); 2129 2130 page = new_slab(kmem_cache_node, GFP_NOWAIT, node); 2131 2132 BUG_ON(!page); 2133 if (page_to_nid(page) != node) { 2134 printk(KERN_ERR "SLUB: Unable to allocate memory from " 2135 "node %d\n", node); 2136 printk(KERN_ERR "SLUB: Allocating a useless per node structure " 2137 "in order to be able to continue\n"); 2138 } 2139 2140 n = page->freelist; 2141 BUG_ON(!n); 2142 page->freelist = get_freepointer(kmem_cache_node, n); 2143 page->inuse++; 2144 kmem_cache_node->node[node] = n; 2145#ifdef CONFIG_SLUB_DEBUG 2146 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); 2147 init_tracking(kmem_cache_node, n); 2148#endif 2149 init_kmem_cache_node(n, kmem_cache_node); 2150 inc_slabs_node(kmem_cache_node, node, page->objects); 2151 2152 /* 2153 * lockdep requires consistent irq usage for each lock 2154 * so even though there cannot be a race this early in 2155 * the boot sequence, we still disable irqs. 2156 */ 2157 local_irq_save(flags); 2158 add_partial(n, page, 0); 2159 local_irq_restore(flags); 2160} 2161 2162static void free_kmem_cache_nodes(struct kmem_cache *s) 2163{ 2164 int node; 2165 2166 for_each_node_state(node, N_NORMAL_MEMORY) { 2167 struct kmem_cache_node *n = s->node[node]; 2168 2169 if (n) 2170 kmem_cache_free(kmem_cache_node, n); 2171 2172 s->node[node] = NULL; 2173 } 2174} 2175 2176static int init_kmem_cache_nodes(struct kmem_cache *s) 2177{ 2178 int node; 2179 2180 for_each_node_state(node, N_NORMAL_MEMORY) { 2181 struct kmem_cache_node *n; 2182 2183 if (slab_state == DOWN) { 2184 early_kmem_cache_node_alloc(node); 2185 continue; 2186 } 2187 n = kmem_cache_alloc_node(kmem_cache_node, 2188 GFP_KERNEL, node); 2189 2190 if (!n) { 2191 free_kmem_cache_nodes(s); 2192 return 0; 2193 } 2194 2195 s->node[node] = n; 2196 init_kmem_cache_node(n, s); 2197 } 2198 return 1; 2199} 2200 2201static void set_min_partial(struct kmem_cache *s, unsigned long min) 2202{ 2203 if (min < MIN_PARTIAL) 2204 min = MIN_PARTIAL; 2205 else if (min > MAX_PARTIAL) 2206 min = MAX_PARTIAL; 2207 s->min_partial = min; 2208} 2209 2210/* 2211 * calculate_sizes() determines the order and the distribution of data within 2212 * a slab object. 2213 */ 2214static int calculate_sizes(struct kmem_cache *s, int forced_order) 2215{ 2216 unsigned long flags = s->flags; 2217 unsigned long size = s->objsize; 2218 unsigned long align = s->align; 2219 int order; 2220 2221 /* 2222 * Round up object size to the next word boundary. We can only 2223 * place the free pointer at word boundaries and this determines 2224 * the possible location of the free pointer. 2225 */ 2226 size = ALIGN(size, sizeof(void *)); 2227 2228#ifdef CONFIG_SLUB_DEBUG 2229 /* 2230 * Determine if we can poison the object itself. If the user of 2231 * the slab may touch the object after free or before allocation 2232 * then we should never poison the object itself. 2233 */ 2234 if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) && 2235 !s->ctor) 2236 s->flags |= __OBJECT_POISON; 2237 else 2238 s->flags &= ~__OBJECT_POISON; 2239 2240 2241 /* 2242 * If we are Redzoning then check if there is some space between the 2243 * end of the object and the free pointer. If not then add an 2244 * additional word to have some bytes to store Redzone information. 2245 */ 2246 if ((flags & SLAB_RED_ZONE) && size == s->objsize) 2247 size += sizeof(void *); 2248#endif 2249 2250 /* 2251 * With that we have determined the number of bytes in actual use 2252 * by the object. This is the potential offset to the free pointer. 2253 */ 2254 s->inuse = size; 2255 2256 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) || 2257 s->ctor)) { 2258 /* 2259 * Relocate free pointer after the object if it is not 2260 * permitted to overwrite the first word of the object on 2261 * kmem_cache_free. 2262 * 2263 * This is the case if we do RCU, have a constructor or 2264 * destructor or are poisoning the objects. 2265 */ 2266 s->offset = size; 2267 size += sizeof(void *); 2268 } 2269 2270#ifdef CONFIG_SLUB_DEBUG 2271 if (flags & SLAB_STORE_USER) 2272 /* 2273 * Need to store information about allocs and frees after 2274 * the object. 2275 */ 2276 size += 2 * sizeof(struct track); 2277 2278 if (flags & SLAB_RED_ZONE) 2279 /* 2280 * Add some empty padding so that we can catch 2281 * overwrites from earlier objects rather than let 2282 * tracking information or the free pointer be 2283 * corrupted if a user writes before the start 2284 * of the object. 2285 */ 2286 size += sizeof(void *); 2287#endif 2288 2289 /* 2290 * Determine the alignment based on various parameters that the 2291 * user specified and the dynamic determination of cache line size 2292 * on bootup. 2293 */ 2294 align = calculate_alignment(flags, align, s->objsize); 2295 s->align = align; 2296 2297 /* 2298 * SLUB stores one object immediately after another beginning from 2299 * offset 0. In order to align the objects we have to simply size 2300 * each object to conform to the alignment. 2301 */ 2302 size = ALIGN(size, align); 2303 s->size = size; 2304 if (forced_order >= 0) 2305 order = forced_order; 2306 else 2307 order = calculate_order(size); 2308 2309 if (order < 0) 2310 return 0; 2311 2312 s->allocflags = 0; 2313 if (order) 2314 s->allocflags |= __GFP_COMP; 2315 2316 if (s->flags & SLAB_CACHE_DMA) 2317 s->allocflags |= SLUB_DMA; 2318 2319 if (s->flags & SLAB_RECLAIM_ACCOUNT) 2320 s->allocflags |= __GFP_RECLAIMABLE; 2321 2322 /* 2323 * Determine the number of objects per slab 2324 */ 2325 s->oo = oo_make(order, size); 2326 s->min = oo_make(get_order(size), size); 2327 if (oo_objects(s->oo) > oo_objects(s->max)) 2328 s->max = s->oo; 2329 2330 return !!oo_objects(s->oo); 2331 2332} 2333 2334static int kmem_cache_open(struct kmem_cache *s, 2335 const char *name, size_t size, 2336 size_t align, unsigned long flags, 2337 void (*ctor)(void *)) 2338{ 2339 memset(s, 0, kmem_size); 2340 s->name = name; 2341 s->ctor = ctor; 2342 s->objsize = size; 2343 s->align = align; 2344 s->flags = kmem_cache_flags(size, flags, name, ctor); 2345 2346 if (!calculate_sizes(s, -1)) 2347 goto error; 2348 if (disable_higher_order_debug) { 2349 /* 2350 * Disable debugging flags that store metadata if the min slab 2351 * order increased. 2352 */ 2353 if (get_order(s->size) > get_order(s->objsize)) { 2354 s->flags &= ~DEBUG_METADATA_FLAGS; 2355 s->offset = 0; 2356 if (!calculate_sizes(s, -1)) 2357 goto error; 2358 } 2359 } 2360 2361 /* 2362 * The larger the object size is, the more pages we want on the partial 2363 * list to avoid pounding the page allocator excessively. 2364 */ 2365 set_min_partial(s, ilog2(s->size)); 2366 s->refcount = 1; 2367#ifdef CONFIG_NUMA 2368 s->remote_node_defrag_ratio = 1000; 2369#endif 2370 if (!init_kmem_cache_nodes(s)) 2371 goto error; 2372 2373 if (alloc_kmem_cache_cpus(s)) 2374 return 1; 2375 2376 free_kmem_cache_nodes(s); 2377error: 2378 if (flags & SLAB_PANIC) 2379 panic("Cannot create slab %s size=%lu realsize=%u " 2380 "order=%u offset=%u flags=%lx\n", 2381 s->name, (unsigned long)size, s->size, oo_order(s->oo), 2382 s->offset, flags); 2383 return 0; 2384} 2385 2386/* 2387 * Check if a given pointer is valid 2388 */ 2389int kmem_ptr_validate(struct kmem_cache *s, const void *object) 2390{ 2391 struct page *page; 2392 2393 if (!kern_ptr_validate(object, s->size)) 2394 return 0; 2395 2396 page = get_object_page(object); 2397 2398 if (!page || s != page->slab) 2399 /* No slab or wrong slab */ 2400 return 0; 2401 2402 if (!check_valid_pointer(s, page, object)) 2403 return 0; 2404 2405 /* 2406 * We could also check if the object is on the slabs freelist. 2407 * But this would be too expensive and it seems that the main 2408 * purpose of kmem_ptr_valid() is to check if the object belongs 2409 * to a certain slab. 2410 */ 2411 return 1; 2412} 2413EXPORT_SYMBOL(kmem_ptr_validate); 2414 2415/* 2416 * Determine the size of a slab object 2417 */ 2418unsigned int kmem_cache_size(struct kmem_cache *s) 2419{ 2420 return s->objsize; 2421} 2422EXPORT_SYMBOL(kmem_cache_size); 2423 2424const char *kmem_cache_name(struct kmem_cache *s) 2425{ 2426 return s->name; 2427} 2428EXPORT_SYMBOL(kmem_cache_name); 2429 2430static void list_slab_objects(struct kmem_cache *s, struct page *page, 2431 const char *text) 2432{ 2433#ifdef CONFIG_SLUB_DEBUG 2434 void *addr = page_address(page); 2435 void *p; 2436 unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) * 2437 sizeof(long), GFP_ATOMIC); 2438 if (!map) 2439 return; 2440 slab_err(s, page, "%s", text); 2441 slab_lock(page); 2442 for_each_free_object(p, s, page->freelist) 2443 set_bit(slab_index(p, s, addr), map); 2444 2445 for_each_object(p, s, addr, page->objects) { 2446 2447 if (!test_bit(slab_index(p, s, addr), map)) { 2448 printk(KERN_ERR "INFO: Object 0x%p @offset=%tu\n", 2449 p, p - addr); 2450 print_tracking(s, p); 2451 } 2452 } 2453 slab_unlock(page); 2454 kfree(map); 2455#endif 2456} 2457 2458/* 2459 * Attempt to free all partial slabs on a node. 2460 */ 2461static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) 2462{ 2463 unsigned long flags; 2464 struct page *page, *h; 2465 2466 spin_lock_irqsave(&n->list_lock, flags); 2467 list_for_each_entry_safe(page, h, &n->partial, lru) { 2468 if (!page->inuse) { 2469 __remove_partial(n, page); 2470 discard_slab(s, page); 2471 } else { 2472 list_slab_objects(s, page, 2473 "Objects remaining on kmem_cache_close()"); 2474 } 2475 } 2476 spin_unlock_irqrestore(&n->list_lock, flags); 2477} 2478 2479/* 2480 * Release all resources used by a slab cache. 2481 */ 2482static inline int kmem_cache_close(struct kmem_cache *s) 2483{ 2484 int node; 2485 2486 flush_all(s); 2487 free_percpu(s->cpu_slab); 2488 /* Attempt to free all objects */ 2489 for_each_node_state(node, N_NORMAL_MEMORY) { 2490 struct kmem_cache_node *n = get_node(s, node); 2491 2492 free_partial(s, n); 2493 if (n->nr_partial || slabs_node(s, node)) 2494 return 1; 2495 } 2496 free_kmem_cache_nodes(s); 2497 return 0; 2498} 2499 2500/* 2501 * Close a cache and release the kmem_cache structure 2502 * (must be used for caches created using kmem_cache_create) 2503 */ 2504void kmem_cache_destroy(struct kmem_cache *s) 2505{ 2506 down_write(&slub_lock); 2507 s->refcount--; 2508 if (!s->refcount) { 2509 list_del(&s->list); 2510 if (kmem_cache_close(s)) { 2511 printk(KERN_ERR "SLUB %s: %s called for cache that " 2512 "still has objects.\n", s->name, __func__); 2513 dump_stack(); 2514 } 2515 if (s->flags & SLAB_DESTROY_BY_RCU) 2516 rcu_barrier(); 2517 sysfs_slab_remove(s); 2518 } 2519 up_write(&slub_lock); 2520} 2521EXPORT_SYMBOL(kmem_cache_destroy); 2522 2523/******************************************************************** 2524 * Kmalloc subsystem 2525 *******************************************************************/ 2526 2527struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT]; 2528EXPORT_SYMBOL(kmalloc_caches); 2529 2530static struct kmem_cache *kmem_cache; 2531 2532#ifdef CONFIG_ZONE_DMA 2533static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT]; 2534#endif 2535 2536static int __init setup_slub_min_order(char *str) 2537{ 2538 get_option(&str, &slub_min_order); 2539 2540 return 1; 2541} 2542 2543__setup("slub_min_order=", setup_slub_min_order); 2544 2545static int __init setup_slub_max_order(char *str) 2546{ 2547 get_option(&str, &slub_max_order); 2548 slub_max_order = min(slub_max_order, MAX_ORDER - 1); 2549 2550 return 1; 2551} 2552 2553__setup("slub_max_order=", setup_slub_max_order); 2554 2555static int __init setup_slub_min_objects(char *str) 2556{ 2557 get_option(&str, &slub_min_objects); 2558 2559 return 1; 2560} 2561 2562__setup("slub_min_objects=", setup_slub_min_objects); 2563 2564static int __init setup_slub_nomerge(char *str) 2565{ 2566 slub_nomerge = 1; 2567 return 1; 2568} 2569 2570__setup("slub_nomerge", setup_slub_nomerge); 2571 2572static struct kmem_cache *__init create_kmalloc_cache(const char *name, 2573 int size, unsigned int flags) 2574{ 2575 struct kmem_cache *s; 2576 2577 s = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); 2578 2579 /* 2580 * This function is called with IRQs disabled during early-boot on 2581 * single CPU so there's no need to take slub_lock here. 2582 */ 2583 if (!kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN, 2584 flags, NULL)) 2585 goto panic; 2586 2587 list_add(&s->list, &slab_caches); 2588 return s; 2589 2590panic: 2591 panic("Creation of kmalloc slab %s size=%d failed.\n", name, size); 2592 return NULL; 2593} 2594 2595/* 2596 * Conversion table for small slabs sizes / 8 to the index in the 2597 * kmalloc array. This is necessary for slabs < 192 since we have non power 2598 * of two cache sizes there. The size of larger slabs can be determined using 2599 * fls. 2600 */ 2601static s8 size_index[24] = { 2602 3, /* 8 */ 2603 4, /* 16 */ 2604 5, /* 24 */ 2605 5, /* 32 */ 2606 6, /* 40 */ 2607 6, /* 48 */ 2608 6, /* 56 */ 2609 6, /* 64 */ 2610 1, /* 72 */ 2611 1, /* 80 */ 2612 1, /* 88 */ 2613 1, /* 96 */ 2614 7, /* 104 */ 2615 7, /* 112 */ 2616 7, /* 120 */ 2617 7, /* 128 */ 2618 2, /* 136 */ 2619 2, /* 144 */ 2620 2, /* 152 */ 2621 2, /* 160 */ 2622 2, /* 168 */ 2623 2, /* 176 */ 2624 2, /* 184 */ 2625 2 /* 192 */ 2626}; 2627 2628static inline int size_index_elem(size_t bytes) 2629{ 2630 return (bytes - 1) / 8; 2631} 2632 2633static struct kmem_cache *get_slab(size_t size, gfp_t flags) 2634{ 2635 int index; 2636 2637 if (size <= 192) { 2638 if (!size) 2639 return ZERO_SIZE_PTR; 2640 2641 index = size_index[size_index_elem(size)]; 2642 } else 2643 index = fls(size - 1); 2644 2645#ifdef CONFIG_ZONE_DMA 2646 if (unlikely((flags & SLUB_DMA))) 2647 return kmalloc_dma_caches[index]; 2648 2649#endif 2650 return kmalloc_caches[index]; 2651} 2652 2653void *__kmalloc(size_t size, gfp_t flags) 2654{ 2655 struct kmem_cache *s; 2656 void *ret; 2657 2658 if (unlikely(size > SLUB_MAX_SIZE)) 2659 return kmalloc_large(size, flags); 2660 2661 s = get_slab(size, flags); 2662 2663 if (unlikely(ZERO_OR_NULL_PTR(s))) 2664 return s; 2665 2666 ret = slab_alloc(s, flags, NUMA_NO_NODE, _RET_IP_); 2667 2668 trace_kmalloc(_RET_IP_, ret, size, s->size, flags); 2669 2670 return ret; 2671} 2672EXPORT_SYMBOL(__kmalloc); 2673 2674static void *kmalloc_large_node(size_t size, gfp_t flags, int node) 2675{ 2676 struct page *page; 2677 void *ptr = NULL; 2678 2679 flags |= __GFP_COMP | __GFP_NOTRACK; 2680 page = alloc_pages_node(node, flags, get_order(size)); 2681 if (page) 2682 ptr = page_address(page); 2683 2684 kmemleak_alloc(ptr, size, 1, flags); 2685 return ptr; 2686} 2687 2688#ifdef CONFIG_NUMA 2689void *__kmalloc_node(size_t size, gfp_t flags, int node) 2690{ 2691 struct kmem_cache *s; 2692 void *ret; 2693 2694 if (unlikely(size > SLUB_MAX_SIZE)) { 2695 ret = kmalloc_large_node(size, flags, node); 2696 2697 trace_kmalloc_node(_RET_IP_, ret, 2698 size, PAGE_SIZE << get_order(size), 2699 flags, node); 2700 2701 return ret; 2702 } 2703 2704 s = get_slab(size, flags); 2705 2706 if (unlikely(ZERO_OR_NULL_PTR(s))) 2707 return s; 2708 2709 ret = slab_alloc(s, flags, node, _RET_IP_); 2710 2711 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); 2712 2713 return ret; 2714} 2715EXPORT_SYMBOL(__kmalloc_node); 2716#endif 2717 2718size_t ksize(const void *object) 2719{ 2720 struct page *page; 2721 struct kmem_cache *s; 2722 2723 if (unlikely(object == ZERO_SIZE_PTR)) 2724 return 0; 2725 2726 page = virt_to_head_page(object); 2727 2728 if (unlikely(!PageSlab(page))) { 2729 WARN_ON(!PageCompound(page)); 2730 return PAGE_SIZE << compound_order(page); 2731 } 2732 s = page->slab; 2733 2734#ifdef CONFIG_SLUB_DEBUG 2735 /* 2736 * Debugging requires use of the padding between object 2737 * and whatever may come after it. 2738 */ 2739 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) 2740 return s->objsize; 2741 2742#endif 2743 /* 2744 * If we have the need to store the freelist pointer 2745 * back there or track user information then we can 2746 * only use the space before that information. 2747 */ 2748 if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER)) 2749 return s->inuse; 2750 /* 2751 * Else we can use all the padding etc for the allocation 2752 */ 2753 return s->size; 2754} 2755EXPORT_SYMBOL(ksize); 2756 2757void kfree(const void *x) 2758{ 2759 struct page *page; 2760 void *object = (void *)x; 2761 2762 trace_kfree(_RET_IP_, x); 2763 2764 if (unlikely(ZERO_OR_NULL_PTR(x))) 2765 return; 2766 2767 page = virt_to_head_page(x); 2768 if (unlikely(!PageSlab(page))) { 2769 BUG_ON(!PageCompound(page)); 2770 kmemleak_free(x); 2771 put_page(page); 2772 return; 2773 } 2774 slab_free(page->slab, page, object, _RET_IP_); 2775} 2776EXPORT_SYMBOL(kfree); 2777 2778/* 2779 * kmem_cache_shrink removes empty slabs from the partial lists and sorts 2780 * the remaining slabs by the number of items in use. The slabs with the 2781 * most items in use come first. New allocations will then fill those up 2782 * and thus they can be removed from the partial lists. 2783 * 2784 * The slabs with the least items are placed last. This results in them 2785 * being allocated from last increasing the chance that the last objects 2786 * are freed in them. 2787 */ 2788int kmem_cache_shrink(struct kmem_cache *s) 2789{ 2790 int node; 2791 int i; 2792 struct kmem_cache_node *n; 2793 struct page *page; 2794 struct page *t; 2795 int objects = oo_objects(s->max); 2796 struct list_head *slabs_by_inuse = 2797 kmalloc(sizeof(struct list_head) * objects, GFP_KERNEL); 2798 unsigned long flags; 2799 2800 if (!slabs_by_inuse) 2801 return -ENOMEM; 2802 2803 flush_all(s); 2804 for_each_node_state(node, N_NORMAL_MEMORY) { 2805 n = get_node(s, node); 2806 2807 if (!n->nr_partial) 2808 continue; 2809 2810 for (i = 0; i < objects; i++) 2811 INIT_LIST_HEAD(slabs_by_inuse + i); 2812 2813 spin_lock_irqsave(&n->list_lock, flags); 2814 2815 /* 2816 * Build lists indexed by the items in use in each slab. 2817 * 2818 * Note that concurrent frees may occur while we hold the 2819 * list_lock. page->inuse here is the upper limit. 2820 */ 2821 list_for_each_entry_safe(page, t, &n->partial, lru) { 2822 if (!page->inuse && slab_trylock(page)) { 2823 /* 2824 * Must hold slab lock here because slab_free 2825 * may have freed the last object and be 2826 * waiting to release the slab. 2827 */ 2828 __remove_partial(n, page); 2829 slab_unlock(page); 2830 discard_slab(s, page); 2831 } else { 2832 list_move(&page->lru, 2833 slabs_by_inuse + page->inuse); 2834 } 2835 } 2836 2837 /* 2838 * Rebuild the partial list with the slabs filled up most 2839 * first and the least used slabs at the end. 2840 */ 2841 for (i = objects - 1; i >= 0; i--) 2842 list_splice(slabs_by_inuse + i, n->partial.prev); 2843 2844 spin_unlock_irqrestore(&n->list_lock, flags); 2845 } 2846 2847 kfree(slabs_by_inuse); 2848 return 0; 2849} 2850EXPORT_SYMBOL(kmem_cache_shrink); 2851 2852#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG) 2853static int slab_mem_going_offline_callback(void *arg) 2854{ 2855 struct kmem_cache *s; 2856 2857 down_read(&slub_lock); 2858 list_for_each_entry(s, &slab_caches, list) 2859 kmem_cache_shrink(s); 2860 up_read(&slub_lock); 2861 2862 return 0; 2863} 2864 2865static void slab_mem_offline_callback(void *arg) 2866{ 2867 struct kmem_cache_node *n; 2868 struct kmem_cache *s; 2869 struct memory_notify *marg = arg; 2870 int offline_node; 2871 2872 offline_node = marg->status_change_nid; 2873 2874 /* 2875 * If the node still has available memory. we need kmem_cache_node 2876 * for it yet. 2877 */ 2878 if (offline_node < 0) 2879 return; 2880 2881 down_read(&slub_lock); 2882 list_for_each_entry(s, &slab_caches, list) { 2883 n = get_node(s, offline_node); 2884 if (n) { 2885 /* 2886 * if n->nr_slabs > 0, slabs still exist on the node 2887 * that is going down. We were unable to free them, 2888 * and offline_pages() function shouldn't call this 2889 * callback. So, we must fail. 2890 */ 2891 BUG_ON(slabs_node(s, offline_node)); 2892 2893 s->node[offline_node] = NULL; 2894 kmem_cache_free(kmem_cache_node, n); 2895 } 2896 } 2897 up_read(&slub_lock); 2898} 2899 2900static int slab_mem_going_online_callback(void *arg) 2901{ 2902 struct kmem_cache_node *n; 2903 struct kmem_cache *s; 2904 struct memory_notify *marg = arg; 2905 int nid = marg->status_change_nid; 2906 int ret = 0; 2907 2908 /* 2909 * If the node's memory is already available, then kmem_cache_node is 2910 * already created. Nothing to do. 2911 */ 2912 if (nid < 0) 2913 return 0; 2914 2915 /* 2916 * We are bringing a node online. No memory is available yet. We must 2917 * allocate a kmem_cache_node structure in order to bring the node 2918 * online. 2919 */ 2920 down_read(&slub_lock); 2921 list_for_each_entry(s, &slab_caches, list) { 2922 /* 2923 * XXX: kmem_cache_alloc_node will fallback to other nodes 2924 * since memory is not yet available from the node that 2925 * is brought up. 2926 */ 2927 n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL); 2928 if (!n) { 2929 ret = -ENOMEM; 2930 goto out; 2931 } 2932 init_kmem_cache_node(n, s); 2933 s->node[nid] = n; 2934 } 2935out: 2936 up_read(&slub_lock); 2937 return ret; 2938} 2939 2940static int slab_memory_callback(struct notifier_block *self, 2941 unsigned long action, void *arg) 2942{ 2943 int ret = 0; 2944 2945 switch (action) { 2946 case MEM_GOING_ONLINE: 2947 ret = slab_mem_going_online_callback(arg); 2948 break; 2949 case MEM_GOING_OFFLINE: 2950 ret = slab_mem_going_offline_callback(arg); 2951 break; 2952 case MEM_OFFLINE: 2953 case MEM_CANCEL_ONLINE: 2954 slab_mem_offline_callback(arg); 2955 break; 2956 case MEM_ONLINE: 2957 case MEM_CANCEL_OFFLINE: 2958 break; 2959 } 2960 if (ret) 2961 ret = notifier_from_errno(ret); 2962 else 2963 ret = NOTIFY_OK; 2964 return ret; 2965} 2966 2967#endif /* CONFIG_MEMORY_HOTPLUG */ 2968 2969/******************************************************************** 2970 * Basic setup of slabs 2971 *******************************************************************/ 2972 2973/* 2974 * Used for early kmem_cache structures that were allocated using 2975 * the page allocator 2976 */ 2977 2978static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s) 2979{ 2980 int node; 2981 2982 list_add(&s->list, &slab_caches); 2983 s->refcount = -1; 2984 2985 for_each_node_state(node, N_NORMAL_MEMORY) { 2986 struct kmem_cache_node *n = get_node(s, node); 2987 struct page *p; 2988 2989 if (n) { 2990 list_for_each_entry(p, &n->partial, lru) 2991 p->slab = s; 2992 2993#ifdef CONFIG_SLAB_DEBUG 2994 list_for_each_entry(p, &n->full, lru) 2995 p->slab = s; 2996#endif 2997 } 2998 } 2999} 3000 3001void __init kmem_cache_init(void) 3002{ 3003 int i; 3004 int caches = 0; 3005 struct kmem_cache *temp_kmem_cache; 3006 int order; 3007 struct kmem_cache *temp_kmem_cache_node; 3008 unsigned long kmalloc_size; 3009 3010 kmem_size = offsetof(struct kmem_cache, node) + 3011 nr_node_ids * sizeof(struct kmem_cache_node *); 3012 3013 /* Allocate two kmem_caches from the page allocator */ 3014 kmalloc_size = ALIGN(kmem_size, cache_line_size()); 3015 order = get_order(2 * kmalloc_size); 3016 kmem_cache = (void *)__get_free_pages(GFP_NOWAIT, order); 3017 3018 /* 3019 * Must first have the slab cache available for the allocations of the 3020 * struct kmem_cache_node's. There is special bootstrap code in 3021 * kmem_cache_open for slab_state == DOWN. 3022 */ 3023 kmem_cache_node = (void *)kmem_cache + kmalloc_size; 3024 3025 kmem_cache_open(kmem_cache_node, "kmem_cache_node", 3026 sizeof(struct kmem_cache_node), 3027 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); 3028 3029 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); 3030 3031 /* Able to allocate the per node structures */ 3032 slab_state = PARTIAL; 3033 3034 temp_kmem_cache = kmem_cache; 3035 kmem_cache_open(kmem_cache, "kmem_cache", kmem_size, 3036 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); 3037 kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); 3038 memcpy(kmem_cache, temp_kmem_cache, kmem_size); 3039 3040 /* 3041 * Allocate kmem_cache_node properly from the kmem_cache slab. 3042 * kmem_cache_node is separately allocated so no need to 3043 * update any list pointers. 3044 */ 3045 temp_kmem_cache_node = kmem_cache_node; 3046 3047 kmem_cache_node = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); 3048 memcpy(kmem_cache_node, temp_kmem_cache_node, kmem_size); 3049 3050 kmem_cache_bootstrap_fixup(kmem_cache_node); 3051 3052 caches++; 3053 kmem_cache_bootstrap_fixup(kmem_cache); 3054 caches++; 3055 /* Free temporary boot structure */ 3056 free_pages((unsigned long)temp_kmem_cache, order); 3057 3058 /* Now we can use the kmem_cache to allocate kmalloc slabs */ 3059 3060 /* 3061 * Patch up the size_index table if we have strange large alignment 3062 * requirements for the kmalloc array. This is only the case for 3063 * MIPS it seems. The standard arches will not generate any code here. 3064 * 3065 * Largest permitted alignment is 256 bytes due to the way we 3066 * handle the index determination for the smaller caches. 3067 * 3068 * Make sure that nothing crazy happens if someone starts tinkering 3069 * around with ARCH_KMALLOC_MINALIGN 3070 */ 3071 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 || 3072 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1))); 3073 3074 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) { 3075 int elem = size_index_elem(i); 3076 if (elem >= ARRAY_SIZE(size_index)) 3077 break; 3078 size_index[elem] = KMALLOC_SHIFT_LOW; 3079 } 3080 3081 if (KMALLOC_MIN_SIZE == 64) { 3082 /* 3083 * The 96 byte size cache is not used if the alignment 3084 * is 64 byte. 3085 */ 3086 for (i = 64 + 8; i <= 96; i += 8) 3087 size_index[size_index_elem(i)] = 7; 3088 } else if (KMALLOC_MIN_SIZE == 128) { 3089 /* 3090 * The 192 byte sized cache is not used if the alignment 3091 * is 128 byte. Redirect kmalloc to use the 256 byte cache 3092 * instead. 3093 */ 3094 for (i = 128 + 8; i <= 192; i += 8) 3095 size_index[size_index_elem(i)] = 8; 3096 } 3097 3098 /* Caches that are not of the two-to-the-power-of size */ 3099 if (KMALLOC_MIN_SIZE <= 32) { 3100 kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0); 3101 caches++; 3102 } 3103 3104 if (KMALLOC_MIN_SIZE <= 64) { 3105 kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0); 3106 caches++; 3107 } 3108 3109 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { 3110 kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0); 3111 caches++; 3112 } 3113 3114 slab_state = UP; 3115 3116 /* Provide the correct kmalloc names now that the caches are up */ 3117 if (KMALLOC_MIN_SIZE <= 32) { 3118 kmalloc_caches[1]->name = kstrdup(kmalloc_caches[1]->name, GFP_NOWAIT); 3119 BUG_ON(!kmalloc_caches[1]->name); 3120 } 3121 3122 if (KMALLOC_MIN_SIZE <= 64) { 3123 kmalloc_caches[2]->name = kstrdup(kmalloc_caches[2]->name, GFP_NOWAIT); 3124 BUG_ON(!kmalloc_caches[2]->name); 3125 } 3126 3127 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { 3128 char *s = kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i); 3129 3130 BUG_ON(!s); 3131 kmalloc_caches[i]->name = s; 3132 } 3133 3134#ifdef CONFIG_SMP 3135 register_cpu_notifier(&slab_notifier); 3136#endif 3137 3138#ifdef CONFIG_ZONE_DMA 3139 for (i = 0; i < SLUB_PAGE_SHIFT; i++) { 3140 struct kmem_cache *s = kmalloc_caches[i]; 3141 3142 if (s && s->size) { 3143 char *name = kasprintf(GFP_NOWAIT, 3144 "dma-kmalloc-%d", s->objsize); 3145 3146 BUG_ON(!name); 3147 kmalloc_dma_caches[i] = create_kmalloc_cache(name, 3148 s->objsize, SLAB_CACHE_DMA); 3149 } 3150 } 3151#endif 3152 printk(KERN_INFO 3153 "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d," 3154 " CPUs=%d, Nodes=%d\n", 3155 caches, cache_line_size(), 3156 slub_min_order, slub_max_order, slub_min_objects, 3157 nr_cpu_ids, nr_node_ids); 3158} 3159 3160void __init kmem_cache_init_late(void) 3161{ 3162} 3163 3164/* 3165 * Find a mergeable slab cache 3166 */ 3167static int slab_unmergeable(struct kmem_cache *s) 3168{ 3169 if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) 3170 return 1; 3171 3172 if (s->ctor) 3173 return 1; 3174 3175 /* 3176 * We may have set a slab to be unmergeable during bootstrap. 3177 */ 3178 if (s->refcount < 0) 3179 return 1; 3180 3181 return 0; 3182} 3183 3184static struct kmem_cache *find_mergeable(size_t size, 3185 size_t align, unsigned long flags, const char *name, 3186 void (*ctor)(void *)) 3187{ 3188 struct kmem_cache *s; 3189 3190 if (slub_nomerge || (flags & SLUB_NEVER_MERGE)) 3191 return NULL; 3192 3193 if (ctor) 3194 return NULL; 3195 3196 size = ALIGN(size, sizeof(void *)); 3197 align = calculate_alignment(flags, align, size); 3198 size = ALIGN(size, align); 3199 flags = kmem_cache_flags(size, flags, name, NULL); 3200 3201 list_for_each_entry(s, &slab_caches, list) { 3202 if (slab_unmergeable(s)) 3203 continue; 3204 3205 if (size > s->size) 3206 continue; 3207 3208 if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME)) 3209 continue; 3210 /* 3211 * Check if alignment is compatible. 3212 * Courtesy of Adrian Drzewiecki 3213 */ 3214 if ((s->size & ~(align - 1)) != s->size) 3215 continue; 3216 3217 if (s->size - size >= sizeof(void *)) 3218 continue; 3219 3220 return s; 3221 } 3222 return NULL; 3223} 3224 3225struct kmem_cache *kmem_cache_create(const char *name, size_t size, 3226 size_t align, unsigned long flags, void (*ctor)(void *)) 3227{ 3228 struct kmem_cache *s; 3229 char *n; 3230 3231 if (WARN_ON(!name)) 3232 return NULL; 3233 3234 down_write(&slub_lock); 3235 s = find_mergeable(size, align, flags, name, ctor); 3236 if (s) { 3237 s->refcount++; 3238 /* 3239 * Adjust the object sizes so that we clear 3240 * the complete object on kzalloc. 3241 */ 3242 s->objsize = max(s->objsize, (int)size); 3243 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); 3244 3245 if (sysfs_slab_alias(s, name)) { 3246 s->refcount--; 3247 goto err; 3248 } 3249 up_write(&slub_lock); 3250 return s; 3251 } 3252 3253 n = kstrdup(name, GFP_KERNEL); 3254 if (!n) 3255 goto err; 3256 3257 s = kmalloc(kmem_size, GFP_KERNEL); 3258 if (s) { 3259 if (kmem_cache_open(s, n, 3260 size, align, flags, ctor)) { 3261 list_add(&s->list, &slab_caches); 3262 if (sysfs_slab_add(s)) { 3263 list_del(&s->list); 3264 kfree(n); 3265 kfree(s); 3266 goto err; 3267 } 3268 up_write(&slub_lock); 3269 return s; 3270 } 3271 kfree(n); 3272 kfree(s); 3273 } 3274 up_write(&slub_lock); 3275 3276err: 3277 if (flags & SLAB_PANIC) 3278 panic("Cannot create slabcache %s\n", name); 3279 else 3280 s = NULL; 3281 return s; 3282} 3283EXPORT_SYMBOL(kmem_cache_create); 3284 3285#ifdef CONFIG_SMP 3286/* 3287 * Use the cpu notifier to insure that the cpu slabs are flushed when 3288 * necessary. 3289 */ 3290static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb, 3291 unsigned long action, void *hcpu) 3292{ 3293 long cpu = (long)hcpu; 3294 struct kmem_cache *s; 3295 unsigned long flags; 3296 3297 switch (action) { 3298 case CPU_UP_CANCELED: 3299 case CPU_UP_CANCELED_FROZEN: 3300 case CPU_DEAD: 3301 case CPU_DEAD_FROZEN: 3302 down_read(&slub_lock); 3303 list_for_each_entry(s, &slab_caches, list) { 3304 local_irq_save(flags); 3305 __flush_cpu_slab(s, cpu); 3306 local_irq_restore(flags); 3307 } 3308 up_read(&slub_lock); 3309 break; 3310 default: 3311 break; 3312 } 3313 return NOTIFY_OK; 3314} 3315 3316static struct notifier_block __cpuinitdata slab_notifier = { 3317 .notifier_call = slab_cpuup_callback 3318}; 3319 3320#endif 3321 3322void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) 3323{ 3324 struct kmem_cache *s; 3325 void *ret; 3326 3327 if (unlikely(size > SLUB_MAX_SIZE)) 3328 return kmalloc_large(size, gfpflags); 3329 3330 s = get_slab(size, gfpflags); 3331 3332 if (unlikely(ZERO_OR_NULL_PTR(s))) 3333 return s; 3334 3335 ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, caller); 3336 3337 /* Honor the call site pointer we recieved. */ 3338 trace_kmalloc(caller, ret, size, s->size, gfpflags); 3339 3340 return ret; 3341} 3342 3343void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, 3344 int node, unsigned long caller) 3345{ 3346 struct kmem_cache *s; 3347 void *ret; 3348 3349 if (unlikely(size > SLUB_MAX_SIZE)) { 3350 ret = kmalloc_large_node(size, gfpflags, node); 3351 3352 trace_kmalloc_node(caller, ret, 3353 size, PAGE_SIZE << get_order(size), 3354 gfpflags, node); 3355 3356 return ret; 3357 } 3358 3359 s = get_slab(size, gfpflags); 3360 3361 if (unlikely(ZERO_OR_NULL_PTR(s))) 3362 return s; 3363 3364 ret = slab_alloc(s, gfpflags, node, caller); 3365 3366 /* Honor the call site pointer we recieved. */ 3367 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node); 3368 3369 return ret; 3370} 3371 3372#ifdef CONFIG_SLUB_DEBUG 3373static int count_inuse(struct page *page) 3374{ 3375 return page->inuse; 3376} 3377 3378static int count_total(struct page *page) 3379{ 3380 return page->objects; 3381} 3382 3383static int validate_slab(struct kmem_cache *s, struct page *page, 3384 unsigned long *map) 3385{ 3386 void *p; 3387 void *addr = page_address(page); 3388 3389 if (!check_slab(s, page) || 3390 !on_freelist(s, page, NULL)) 3391 return 0; 3392 3393 /* Now we know that a valid freelist exists */ 3394 bitmap_zero(map, page->objects); 3395 3396 for_each_free_object(p, s, page->freelist) { 3397 set_bit(slab_index(p, s, addr), map); 3398 if (!check_object(s, page, p, 0)) 3399 return 0; 3400 } 3401 3402 for_each_object(p, s, addr, page->objects) 3403 if (!test_bit(slab_index(p, s, addr), map)) 3404 if (!check_object(s, page, p, 1)) 3405 return 0; 3406 return 1; 3407} 3408 3409static void validate_slab_slab(struct kmem_cache *s, struct page *page, 3410 unsigned long *map) 3411{ 3412 if (slab_trylock(page)) { 3413 validate_slab(s, page, map); 3414 slab_unlock(page); 3415 } else 3416 printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n", 3417 s->name, page); 3418} 3419 3420static int validate_slab_node(struct kmem_cache *s, 3421 struct kmem_cache_node *n, unsigned long *map) 3422{ 3423 unsigned long count = 0; 3424 struct page *page; 3425 unsigned long flags; 3426 3427 spin_lock_irqsave(&n->list_lock, flags); 3428 3429 list_for_each_entry(page, &n->partial, lru) { 3430 validate_slab_slab(s, page, map); 3431 count++; 3432 } 3433 if (count != n->nr_partial) 3434 printk(KERN_ERR "SLUB %s: %ld partial slabs counted but " 3435 "counter=%ld\n", s->name, count, n->nr_partial); 3436 3437 if (!(s->flags & SLAB_STORE_USER)) 3438 goto out; 3439 3440 list_for_each_entry(page, &n->full, lru) { 3441 validate_slab_slab(s, page, map); 3442 count++; 3443 } 3444 if (count != atomic_long_read(&n->nr_slabs)) 3445 printk(KERN_ERR "SLUB: %s %ld slabs counted but " 3446 "counter=%ld\n", s->name, count, 3447 atomic_long_read(&n->nr_slabs)); 3448 3449out: 3450 spin_unlock_irqrestore(&n->list_lock, flags); 3451 return count; 3452} 3453 3454static long validate_slab_cache(struct kmem_cache *s) 3455{ 3456 int node; 3457 unsigned long count = 0; 3458 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) * 3459 sizeof(unsigned long), GFP_KERNEL); 3460 3461 if (!map) 3462 return -ENOMEM; 3463 3464 flush_all(s); 3465 for_each_node_state(node, N_NORMAL_MEMORY) { 3466 struct kmem_cache_node *n = get_node(s, node); 3467 3468 count += validate_slab_node(s, n, map); 3469 } 3470 kfree(map); 3471 return count; 3472} 3473 3474#ifdef SLUB_RESILIENCY_TEST 3475static void resiliency_test(void) 3476{ 3477 u8 *p; 3478 3479 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || SLUB_PAGE_SHIFT < 10); 3480 3481 printk(KERN_ERR "SLUB resiliency testing\n"); 3482 printk(KERN_ERR "-----------------------\n"); 3483 printk(KERN_ERR "A. Corruption after allocation\n"); 3484 3485 p = kzalloc(16, GFP_KERNEL); 3486 p[16] = 0x12; 3487 printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer" 3488 " 0x12->0x%p\n\n", p + 16); 3489 3490 validate_slab_cache(kmalloc_caches[4]); 3491 3492 /* Hmmm... The next two are dangerous */ 3493 p = kzalloc(32, GFP_KERNEL); 3494 p[32 + sizeof(void *)] = 0x34; 3495 printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab" 3496 " 0x34 -> -0x%p\n", p); 3497 printk(KERN_ERR 3498 "If allocated object is overwritten then not detectable\n\n"); 3499 3500 validate_slab_cache(kmalloc_caches[5]); 3501 p = kzalloc(64, GFP_KERNEL); 3502 p += 64 + (get_cycles() & 0xff) * sizeof(void *); 3503 *p = 0x56; 3504 printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n", 3505 p); 3506 printk(KERN_ERR 3507 "If allocated object is overwritten then not detectable\n\n"); 3508 validate_slab_cache(kmalloc_caches[6]); 3509 3510 printk(KERN_ERR "\nB. Corruption after free\n"); 3511 p = kzalloc(128, GFP_KERNEL); 3512 kfree(p); 3513 *p = 0x78; 3514 printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p); 3515 validate_slab_cache(kmalloc_caches[7]); 3516 3517 p = kzalloc(256, GFP_KERNEL); 3518 kfree(p); 3519 p[50] = 0x9a; 3520 printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", 3521 p); 3522 validate_slab_cache(kmalloc_caches[8]); 3523 3524 p = kzalloc(512, GFP_KERNEL); 3525 kfree(p); 3526 p[512] = 0xab; 3527 printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p); 3528 validate_slab_cache(kmalloc_caches[9]); 3529} 3530#else 3531static void resiliency_test(void) {}; 3532#endif 3533 3534/* 3535 * Generate lists of code addresses where slabcache objects are allocated 3536 * and freed. 3537 */ 3538 3539struct location { 3540 unsigned long count; 3541 unsigned long addr; 3542 long long sum_time; 3543 long min_time; 3544 long max_time; 3545 long min_pid; 3546 long max_pid; 3547 DECLARE_BITMAP(cpus, NR_CPUS); 3548 nodemask_t nodes; 3549}; 3550 3551struct loc_track { 3552 unsigned long max; 3553 unsigned long count; 3554 struct location *loc; 3555}; 3556 3557static void free_loc_track(struct loc_track *t) 3558{ 3559 if (t->max) 3560 free_pages((unsigned long)t->loc, 3561 get_order(sizeof(struct location) * t->max)); 3562} 3563 3564static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags) 3565{ 3566 struct location *l; 3567 int order; 3568 3569 order = get_order(sizeof(struct location) * max); 3570 3571 l = (void *)__get_free_pages(flags, order); 3572 if (!l) 3573 return 0; 3574 3575 if (t->count) { 3576 memcpy(l, t->loc, sizeof(struct location) * t->count); 3577 free_loc_track(t); 3578 } 3579 t->max = max; 3580 t->loc = l; 3581 return 1; 3582} 3583 3584static int add_location(struct loc_track *t, struct kmem_cache *s, 3585 const struct track *track) 3586{ 3587 long start, end, pos; 3588 struct location *l; 3589 unsigned long caddr; 3590 unsigned long age = jiffies - track->when; 3591 3592 start = -1; 3593 end = t->count; 3594 3595 for ( ; ; ) { 3596 pos = start + (end - start + 1) / 2; 3597 3598 /* 3599 * There is nothing at "end". If we end up there 3600 * we need to add something to before end. 3601 */ 3602 if (pos == end) 3603 break; 3604 3605 caddr = t->loc[pos].addr; 3606 if (track->addr == caddr) { 3607 3608 l = &t->loc[pos]; 3609 l->count++; 3610 if (track->when) { 3611 l->sum_time += age; 3612 if (age < l->min_time) 3613 l->min_time = age; 3614 if (age > l->max_time) 3615 l->max_time = age; 3616 3617 if (track->pid < l->min_pid) 3618 l->min_pid = track->pid; 3619 if (track->pid > l->max_pid) 3620 l->max_pid = track->pid; 3621 3622 cpumask_set_cpu(track->cpu, 3623 to_cpumask(l->cpus)); 3624 } 3625 node_set(page_to_nid(virt_to_page(track)), l->nodes); 3626 return 1; 3627 } 3628 3629 if (track->addr < caddr) 3630 end = pos; 3631 else 3632 start = pos; 3633 } 3634 3635 /* 3636 * Not found. Insert new tracking element. 3637 */ 3638 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC)) 3639 return 0; 3640 3641 l = t->loc + pos; 3642 if (pos < t->count) 3643 memmove(l + 1, l, 3644 (t->count - pos) * sizeof(struct location)); 3645 t->count++; 3646 l->count = 1; 3647 l->addr = track->addr; 3648 l->sum_time = age; 3649 l->min_time = age; 3650 l->max_time = age; 3651 l->min_pid = track->pid; 3652 l->max_pid = track->pid; 3653 cpumask_clear(to_cpumask(l->cpus)); 3654 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus)); 3655 nodes_clear(l->nodes); 3656 node_set(page_to_nid(virt_to_page(track)), l->nodes); 3657 return 1; 3658} 3659 3660static void process_slab(struct loc_track *t, struct kmem_cache *s, 3661 struct page *page, enum track_item alloc, 3662 unsigned long *map) 3663{ 3664 void *addr = page_address(page); 3665 void *p; 3666 3667 bitmap_zero(map, page->objects); 3668 for_each_free_object(p, s, page->freelist) 3669 set_bit(slab_index(p, s, addr), map); 3670 3671 for_each_object(p, s, addr, page->objects) 3672 if (!test_bit(slab_index(p, s, addr), map)) 3673 add_location(t, s, get_track(s, p, alloc)); 3674} 3675 3676static int list_locations(struct kmem_cache *s, char *buf, 3677 enum track_item alloc) 3678{ 3679 int len = 0; 3680 unsigned long i; 3681 struct loc_track t = { 0, 0, NULL }; 3682 int node; 3683 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) * 3684 sizeof(unsigned long), GFP_KERNEL); 3685 3686 if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location), 3687 GFP_TEMPORARY)) { 3688 kfree(map); 3689 return sprintf(buf, "Out of memory\n"); 3690 } 3691 /* Push back cpu slabs */ 3692 flush_all(s); 3693 3694 for_each_node_state(node, N_NORMAL_MEMORY) { 3695 struct kmem_cache_node *n = get_node(s, node); 3696 unsigned long flags; 3697 struct page *page; 3698 3699 if (!atomic_long_read(&n->nr_slabs)) 3700 continue; 3701 3702 spin_lock_irqsave(&n->list_lock, flags); 3703 list_for_each_entry(page, &n->partial, lru) 3704 process_slab(&t, s, page, alloc, map); 3705 list_for_each_entry(page, &n->full, lru) 3706 process_slab(&t, s, page, alloc, map); 3707 spin_unlock_irqrestore(&n->list_lock, flags); 3708 } 3709 3710 for (i = 0; i < t.count; i++) { 3711 struct location *l = &t.loc[i]; 3712 3713 if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100) 3714 break; 3715 len += sprintf(buf + len, "%7ld ", l->count); 3716 3717 if (l->addr) 3718 len += sprint_symbol(buf + len, (unsigned long)l->addr); 3719 else 3720 len += sprintf(buf + len, "<not-available>"); 3721 3722 if (l->sum_time != l->min_time) { 3723 len += sprintf(buf + len, " age=%ld/%ld/%ld", 3724 l->min_time, 3725 (long)div_u64(l->sum_time, l->count), 3726 l->max_time); 3727 } else 3728 len += sprintf(buf + len, " age=%ld", 3729 l->min_time); 3730 3731 if (l->min_pid != l->max_pid) 3732 len += sprintf(buf + len, " pid=%ld-%ld", 3733 l->min_pid, l->max_pid); 3734 else 3735 len += sprintf(buf + len, " pid=%ld", 3736 l->min_pid); 3737 3738 if (num_online_cpus() > 1 && 3739 !cpumask_empty(to_cpumask(l->cpus)) && 3740 len < PAGE_SIZE - 60) { 3741 len += sprintf(buf + len, " cpus="); 3742 len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50, 3743 to_cpumask(l->cpus)); 3744 } 3745 3746 if (nr_online_nodes > 1 && !nodes_empty(l->nodes) && 3747 len < PAGE_SIZE - 60) { 3748 len += sprintf(buf + len, " nodes="); 3749 len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50, 3750 l->nodes); 3751 } 3752 3753 len += sprintf(buf + len, "\n"); 3754 } 3755 3756 free_loc_track(&t); 3757 kfree(map); 3758 if (!t.count) 3759 len += sprintf(buf, "No data\n"); 3760 return len; 3761} 3762 3763enum slab_stat_type { 3764 SL_ALL, /* All slabs */ 3765 SL_PARTIAL, /* Only partially allocated slabs */ 3766 SL_CPU, /* Only slabs used for cpu caches */ 3767 SL_OBJECTS, /* Determine allocated objects not slabs */ 3768 SL_TOTAL /* Determine object capacity not slabs */ 3769}; 3770 3771#define SO_ALL (1 << SL_ALL) 3772#define SO_PARTIAL (1 << SL_PARTIAL) 3773#define SO_CPU (1 << SL_CPU) 3774#define SO_OBJECTS (1 << SL_OBJECTS) 3775#define SO_TOTAL (1 << SL_TOTAL) 3776 3777static ssize_t show_slab_objects(struct kmem_cache *s, 3778 char *buf, unsigned long flags) 3779{ 3780 unsigned long total = 0; 3781 int node; 3782 int x; 3783 unsigned long *nodes; 3784 unsigned long *per_cpu; 3785 3786 nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL); 3787 if (!nodes) 3788 return -ENOMEM; 3789 per_cpu = nodes + nr_node_ids; 3790 3791 if (flags & SO_CPU) { 3792 int cpu; 3793 3794 for_each_possible_cpu(cpu) { 3795 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); 3796 3797 if (!c || c->node < 0) 3798 continue; 3799 3800 if (c->page) { 3801 if (flags & SO_TOTAL) 3802 x = c->page->objects; 3803 else if (flags & SO_OBJECTS) 3804 x = c->page->inuse; 3805 else 3806 x = 1; 3807 3808 total += x; 3809 nodes[c->node] += x; 3810 } 3811 per_cpu[c->node]++; 3812 } 3813 } 3814 3815 if (flags & SO_ALL) { 3816 for_each_node_state(node, N_NORMAL_MEMORY) { 3817 struct kmem_cache_node *n = get_node(s, node); 3818 3819 if (flags & SO_TOTAL) 3820 x = atomic_long_read(&n->total_objects); 3821 else if (flags & SO_OBJECTS) 3822 x = atomic_long_read(&n->total_objects) - 3823 count_partial(n, count_free); 3824 3825 else 3826 x = atomic_long_read(&n->nr_slabs); 3827 total += x; 3828 nodes[node] += x; 3829 } 3830 3831 } else if (flags & SO_PARTIAL) { 3832 for_each_node_state(node, N_NORMAL_MEMORY) { 3833 struct kmem_cache_node *n = get_node(s, node); 3834 3835 if (flags & SO_TOTAL) 3836 x = count_partial(n, count_total); 3837 else if (flags & SO_OBJECTS) 3838 x = count_partial(n, count_inuse); 3839 else 3840 x = n->nr_partial; 3841 total += x; 3842 nodes[node] += x; 3843 } 3844 } 3845 x = sprintf(buf, "%lu", total); 3846#ifdef CONFIG_NUMA 3847 for_each_node_state(node, N_NORMAL_MEMORY) 3848 if (nodes[node]) 3849 x += sprintf(buf + x, " N%d=%lu", 3850 node, nodes[node]); 3851#endif 3852 kfree(nodes); 3853 return x + sprintf(buf + x, "\n"); 3854} 3855 3856static int any_slab_objects(struct kmem_cache *s) 3857{ 3858 int node; 3859 3860 for_each_online_node(node) { 3861 struct kmem_cache_node *n = get_node(s, node); 3862 3863 if (!n) 3864 continue; 3865 3866 if (atomic_long_read(&n->total_objects)) 3867 return 1; 3868 } 3869 return 0; 3870} 3871 3872#define to_slab_attr(n) container_of(n, struct slab_attribute, attr) 3873#define to_slab(n) container_of(n, struct kmem_cache, kobj); 3874 3875struct slab_attribute { 3876 struct attribute attr; 3877 ssize_t (*show)(struct kmem_cache *s, char *buf); 3878 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count); 3879}; 3880 3881#define SLAB_ATTR_RO(_name) \ 3882 static struct slab_attribute _name##_attr = __ATTR_RO(_name) 3883 3884#define SLAB_ATTR(_name) \ 3885 static struct slab_attribute _name##_attr = \ 3886 __ATTR(_name, 0644, _name##_show, _name##_store) 3887 3888static ssize_t slab_size_show(struct kmem_cache *s, char *buf) 3889{ 3890 return sprintf(buf, "%d\n", s->size); 3891} 3892SLAB_ATTR_RO(slab_size); 3893 3894static ssize_t align_show(struct kmem_cache *s, char *buf) 3895{ 3896 return sprintf(buf, "%d\n", s->align); 3897} 3898SLAB_ATTR_RO(align); 3899 3900static ssize_t object_size_show(struct kmem_cache *s, char *buf) 3901{ 3902 return sprintf(buf, "%d\n", s->objsize); 3903} 3904SLAB_ATTR_RO(object_size); 3905 3906static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf) 3907{ 3908 return sprintf(buf, "%d\n", oo_objects(s->oo)); 3909} 3910SLAB_ATTR_RO(objs_per_slab); 3911 3912static ssize_t order_store(struct kmem_cache *s, 3913 const char *buf, size_t length) 3914{ 3915 unsigned long order; 3916 int err; 3917 3918 err = strict_strtoul(buf, 10, &order); 3919 if (err) 3920 return err; 3921 3922 if (order > slub_max_order || order < slub_min_order) 3923 return -EINVAL; 3924 3925 calculate_sizes(s, order); 3926 return length; 3927} 3928 3929static ssize_t order_show(struct kmem_cache *s, char *buf) 3930{ 3931 return sprintf(buf, "%d\n", oo_order(s->oo)); 3932} 3933SLAB_ATTR(order); 3934 3935static ssize_t min_partial_show(struct kmem_cache *s, char *buf) 3936{ 3937 return sprintf(buf, "%lu\n", s->min_partial); 3938} 3939 3940static ssize_t min_partial_store(struct kmem_cache *s, const char *buf, 3941 size_t length) 3942{ 3943 unsigned long min; 3944 int err; 3945 3946 err = strict_strtoul(buf, 10, &min); 3947 if (err) 3948 return err; 3949 3950 set_min_partial(s, min); 3951 return length; 3952} 3953SLAB_ATTR(min_partial); 3954 3955static ssize_t ctor_show(struct kmem_cache *s, char *buf) 3956{ 3957 if (s->ctor) { 3958 int n = sprint_symbol(buf, (unsigned long)s->ctor); 3959 3960 return n + sprintf(buf + n, "\n"); 3961 } 3962 return 0; 3963} 3964SLAB_ATTR_RO(ctor); 3965 3966static ssize_t aliases_show(struct kmem_cache *s, char *buf) 3967{ 3968 return sprintf(buf, "%d\n", s->refcount - 1); 3969} 3970SLAB_ATTR_RO(aliases); 3971 3972static ssize_t slabs_show(struct kmem_cache *s, char *buf) 3973{ 3974 return show_slab_objects(s, buf, SO_ALL); 3975} 3976SLAB_ATTR_RO(slabs); 3977 3978static ssize_t partial_show(struct kmem_cache *s, char *buf) 3979{ 3980 return show_slab_objects(s, buf, SO_PARTIAL); 3981} 3982SLAB_ATTR_RO(partial); 3983 3984static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf) 3985{ 3986 return show_slab_objects(s, buf, SO_CPU); 3987} 3988SLAB_ATTR_RO(cpu_slabs); 3989 3990static ssize_t objects_show(struct kmem_cache *s, char *buf) 3991{ 3992 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS); 3993} 3994SLAB_ATTR_RO(objects); 3995 3996static ssize_t objects_partial_show(struct kmem_cache *s, char *buf) 3997{ 3998 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS); 3999} 4000SLAB_ATTR_RO(objects_partial); 4001 4002static ssize_t total_objects_show(struct kmem_cache *s, char *buf) 4003{ 4004 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL); 4005} 4006SLAB_ATTR_RO(total_objects); 4007 4008static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf) 4009{ 4010 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE)); 4011} 4012 4013static ssize_t sanity_checks_store(struct kmem_cache *s, 4014 const char *buf, size_t length) 4015{ 4016 s->flags &= ~SLAB_DEBUG_FREE; 4017 if (buf[0] == '1') 4018 s->flags |= SLAB_DEBUG_FREE; 4019 return length; 4020} 4021SLAB_ATTR(sanity_checks); 4022 4023static ssize_t trace_show(struct kmem_cache *s, char *buf) 4024{ 4025 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE)); 4026} 4027 4028static ssize_t trace_store(struct kmem_cache *s, const char *buf, 4029 size_t length) 4030{ 4031 s->flags &= ~SLAB_TRACE; 4032 if (buf[0] == '1') 4033 s->flags |= SLAB_TRACE; 4034 return length; 4035} 4036SLAB_ATTR(trace); 4037 4038#ifdef CONFIG_FAILSLAB 4039static ssize_t failslab_show(struct kmem_cache *s, char *buf) 4040{ 4041 return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB)); 4042} 4043 4044static ssize_t failslab_store(struct kmem_cache *s, const char *buf, 4045 size_t length) 4046{ 4047 s->flags &= ~SLAB_FAILSLAB; 4048 if (buf[0] == '1') 4049 s->flags |= SLAB_FAILSLAB; 4050 return length; 4051} 4052SLAB_ATTR(failslab); 4053#endif 4054 4055static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) 4056{ 4057 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); 4058} 4059 4060static ssize_t reclaim_account_store(struct kmem_cache *s, 4061 const char *buf, size_t length) 4062{ 4063 s->flags &= ~SLAB_RECLAIM_ACCOUNT; 4064 if (buf[0] == '1') 4065 s->flags |= SLAB_RECLAIM_ACCOUNT; 4066 return length; 4067} 4068SLAB_ATTR(reclaim_account); 4069 4070static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) 4071{ 4072 return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); 4073} 4074SLAB_ATTR_RO(hwcache_align); 4075 4076#ifdef CONFIG_ZONE_DMA 4077static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) 4078{ 4079 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA)); 4080} 4081SLAB_ATTR_RO(cache_dma); 4082#endif 4083 4084static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) 4085{ 4086 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU)); 4087} 4088SLAB_ATTR_RO(destroy_by_rcu); 4089 4090static ssize_t red_zone_show(struct kmem_cache *s, char *buf) 4091{ 4092 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE)); 4093} 4094 4095static ssize_t red_zone_store(struct kmem_cache *s, 4096 const char *buf, size_t length) 4097{ 4098 if (any_slab_objects(s)) 4099 return -EBUSY; 4100 4101 s->flags &= ~SLAB_RED_ZONE; 4102 if (buf[0] == '1') 4103 s->flags |= SLAB_RED_ZONE; 4104 calculate_sizes(s, -1); 4105 return length; 4106} 4107SLAB_ATTR(red_zone); 4108 4109static ssize_t poison_show(struct kmem_cache *s, char *buf) 4110{ 4111 return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON)); 4112} 4113 4114static ssize_t poison_store(struct kmem_cache *s, 4115 const char *buf, size_t length) 4116{ 4117 if (any_slab_objects(s)) 4118 return -EBUSY; 4119 4120 s->flags &= ~SLAB_POISON; 4121 if (buf[0] == '1') 4122 s->flags |= SLAB_POISON; 4123 calculate_sizes(s, -1); 4124 return length; 4125} 4126SLAB_ATTR(poison); 4127 4128static ssize_t store_user_show(struct kmem_cache *s, char *buf) 4129{ 4130 return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER)); 4131} 4132 4133static ssize_t store_user_store(struct kmem_cache *s, 4134 const char *buf, size_t length) 4135{ 4136 if (any_slab_objects(s)) 4137 return -EBUSY; 4138 4139 s->flags &= ~SLAB_STORE_USER; 4140 if (buf[0] == '1') 4141 s->flags |= SLAB_STORE_USER; 4142 calculate_sizes(s, -1); 4143 return length; 4144} 4145SLAB_ATTR(store_user); 4146 4147static ssize_t validate_show(struct kmem_cache *s, char *buf) 4148{ 4149 return 0; 4150} 4151 4152static ssize_t validate_store(struct kmem_cache *s, 4153 const char *buf, size_t length) 4154{ 4155 int ret = -EINVAL; 4156 4157 if (buf[0] == '1') { 4158 ret = validate_slab_cache(s); 4159 if (ret >= 0) 4160 ret = length; 4161 } 4162 return ret; 4163} 4164SLAB_ATTR(validate); 4165 4166static ssize_t shrink_show(struct kmem_cache *s, char *buf) 4167{ 4168 return 0; 4169} 4170 4171static ssize_t shrink_store(struct kmem_cache *s, 4172 const char *buf, size_t length) 4173{ 4174 if (buf[0] == '1') { 4175 int rc = kmem_cache_shrink(s); 4176 4177 if (rc) 4178 return rc; 4179 } else 4180 return -EINVAL; 4181 return length; 4182} 4183SLAB_ATTR(shrink); 4184 4185static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf) 4186{ 4187 if (!(s->flags & SLAB_STORE_USER)) 4188 return -ENOSYS; 4189 return list_locations(s, buf, TRACK_ALLOC); 4190} 4191SLAB_ATTR_RO(alloc_calls); 4192 4193static ssize_t free_calls_show(struct kmem_cache *s, char *buf) 4194{ 4195 if (!(s->flags & SLAB_STORE_USER)) 4196 return -ENOSYS; 4197 return list_locations(s, buf, TRACK_FREE); 4198} 4199SLAB_ATTR_RO(free_calls); 4200 4201#ifdef CONFIG_NUMA 4202static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) 4203{ 4204 return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10); 4205} 4206 4207static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s, 4208 const char *buf, size_t length) 4209{ 4210 unsigned long ratio; 4211 int err; 4212 4213 err = strict_strtoul(buf, 10, &ratio); 4214 if (err) 4215 return err; 4216 4217 if (ratio <= 100) 4218 s->remote_node_defrag_ratio = ratio * 10; 4219 4220 return length; 4221} 4222SLAB_ATTR(remote_node_defrag_ratio); 4223#endif 4224 4225#ifdef CONFIG_SLUB_STATS 4226static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) 4227{ 4228 unsigned long sum = 0; 4229 int cpu; 4230 int len; 4231 int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL); 4232 4233 if (!data) 4234 return -ENOMEM; 4235 4236 for_each_online_cpu(cpu) { 4237 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si]; 4238 4239 data[cpu] = x; 4240 sum += x; 4241 } 4242 4243 len = sprintf(buf, "%lu", sum); 4244 4245#ifdef CONFIG_SMP 4246 for_each_online_cpu(cpu) { 4247 if (data[cpu] && len < PAGE_SIZE - 20) 4248 len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]); 4249 } 4250#endif 4251 kfree(data); 4252 return len + sprintf(buf + len, "\n"); 4253} 4254 4255static void clear_stat(struct kmem_cache *s, enum stat_item si) 4256{ 4257 int cpu; 4258 4259 for_each_online_cpu(cpu) 4260 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0; 4261} 4262 4263#define STAT_ATTR(si, text) \ 4264static ssize_t text##_show(struct kmem_cache *s, char *buf) \ 4265{ \ 4266 return show_stat(s, buf, si); \ 4267} \ 4268static ssize_t text##_store(struct kmem_cache *s, \ 4269 const char *buf, size_t length) \ 4270{ \ 4271 if (buf[0] != '0') \ 4272 return -EINVAL; \ 4273 clear_stat(s, si); \ 4274 return length; \ 4275} \ 4276SLAB_ATTR(text); \ 4277 4278STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath); 4279STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath); 4280STAT_ATTR(FREE_FASTPATH, free_fastpath); 4281STAT_ATTR(FREE_SLOWPATH, free_slowpath); 4282STAT_ATTR(FREE_FROZEN, free_frozen); 4283STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial); 4284STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial); 4285STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial); 4286STAT_ATTR(ALLOC_SLAB, alloc_slab); 4287STAT_ATTR(ALLOC_REFILL, alloc_refill); 4288STAT_ATTR(FREE_SLAB, free_slab); 4289STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush); 4290STAT_ATTR(DEACTIVATE_FULL, deactivate_full); 4291STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty); 4292STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head); 4293STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail); 4294STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees); 4295STAT_ATTR(ORDER_FALLBACK, order_fallback); 4296#endif 4297 4298static struct attribute *slab_attrs[] = { 4299 &slab_size_attr.attr, 4300 &object_size_attr.attr, 4301 &objs_per_slab_attr.attr, 4302 &order_attr.attr, 4303 &min_partial_attr.attr, 4304 &objects_attr.attr, 4305 &objects_partial_attr.attr, 4306 &total_objects_attr.attr, 4307 &slabs_attr.attr, 4308 &partial_attr.attr, 4309 &cpu_slabs_attr.attr, 4310 &ctor_attr.attr, 4311 &aliases_attr.attr, 4312 &align_attr.attr, 4313 &sanity_checks_attr.attr, 4314 &trace_attr.attr, 4315 &hwcache_align_attr.attr, 4316 &reclaim_account_attr.attr, 4317 &destroy_by_rcu_attr.attr, 4318 &red_zone_attr.attr, 4319 &poison_attr.attr, 4320 &store_user_attr.attr, 4321 &validate_attr.attr, 4322 &shrink_attr.attr, 4323 &alloc_calls_attr.attr, 4324 &free_calls_attr.attr, 4325#ifdef CONFIG_ZONE_DMA 4326 &cache_dma_attr.attr, 4327#endif 4328#ifdef CONFIG_NUMA 4329 &remote_node_defrag_ratio_attr.attr, 4330#endif 4331#ifdef CONFIG_SLUB_STATS 4332 &alloc_fastpath_attr.attr, 4333 &alloc_slowpath_attr.attr, 4334 &free_fastpath_attr.attr, 4335 &free_slowpath_attr.attr, 4336 &free_frozen_attr.attr, 4337 &free_add_partial_attr.attr, 4338 &free_remove_partial_attr.attr, 4339 &alloc_from_partial_attr.attr, 4340 &alloc_slab_attr.attr, 4341 &alloc_refill_attr.attr, 4342 &free_slab_attr.attr, 4343 &cpuslab_flush_attr.attr, 4344 &deactivate_full_attr.attr, 4345 &deactivate_empty_attr.attr, 4346 &deactivate_to_head_attr.attr, 4347 &deactivate_to_tail_attr.attr, 4348 &deactivate_remote_frees_attr.attr, 4349 &order_fallback_attr.attr, 4350#endif 4351#ifdef CONFIG_FAILSLAB 4352 &failslab_attr.attr, 4353#endif 4354 4355 NULL 4356}; 4357 4358static struct attribute_group slab_attr_group = { 4359 .attrs = slab_attrs, 4360}; 4361 4362static ssize_t slab_attr_show(struct kobject *kobj, 4363 struct attribute *attr, 4364 char *buf) 4365{ 4366 struct slab_attribute *attribute; 4367 struct kmem_cache *s; 4368 int err; 4369 4370 attribute = to_slab_attr(attr); 4371 s = to_slab(kobj); 4372 4373 if (!attribute->show) 4374 return -EIO; 4375 4376 err = attribute->show(s, buf); 4377 4378 return err; 4379} 4380 4381static ssize_t slab_attr_store(struct kobject *kobj, 4382 struct attribute *attr, 4383 const char *buf, size_t len) 4384{ 4385 struct slab_attribute *attribute; 4386 struct kmem_cache *s; 4387 int err; 4388 4389 attribute = to_slab_attr(attr); 4390 s = to_slab(kobj); 4391 4392 if (!attribute->store) 4393 return -EIO; 4394 4395 err = attribute->store(s, buf, len); 4396 4397 return err; 4398} 4399 4400static void kmem_cache_release(struct kobject *kobj) 4401{ 4402 struct kmem_cache *s = to_slab(kobj); 4403 4404 kfree(s->name); 4405 kfree(s); 4406} 4407 4408static const struct sysfs_ops slab_sysfs_ops = { 4409 .show = slab_attr_show, 4410 .store = slab_attr_store, 4411}; 4412 4413static struct kobj_type slab_ktype = { 4414 .sysfs_ops = &slab_sysfs_ops, 4415 .release = kmem_cache_release 4416}; 4417 4418static int uevent_filter(struct kset *kset, struct kobject *kobj) 4419{ 4420 struct kobj_type *ktype = get_ktype(kobj); 4421 4422 if (ktype == &slab_ktype) 4423 return 1; 4424 return 0; 4425} 4426 4427static const struct kset_uevent_ops slab_uevent_ops = { 4428 .filter = uevent_filter, 4429}; 4430 4431static struct kset *slab_kset; 4432 4433#define ID_STR_LENGTH 64 4434 4435/* Create a unique string id for a slab cache: 4436 * 4437 * Format :[flags-]size 4438 */ 4439static char *create_unique_id(struct kmem_cache *s) 4440{ 4441 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL); 4442 char *p = name; 4443 4444 BUG_ON(!name); 4445 4446 *p++ = ':'; 4447 /* 4448 * First flags affecting slabcache operations. We will only 4449 * get here for aliasable slabs so we do not need to support 4450 * too many flags. The flags here must cover all flags that 4451 * are matched during merging to guarantee that the id is 4452 * unique. 4453 */ 4454 if (s->flags & SLAB_CACHE_DMA) 4455 *p++ = 'd'; 4456 if (s->flags & SLAB_RECLAIM_ACCOUNT) 4457 *p++ = 'a'; 4458 if (s->flags & SLAB_DEBUG_FREE) 4459 *p++ = 'F'; 4460 if (!(s->flags & SLAB_NOTRACK)) 4461 *p++ = 't'; 4462 if (p != name + 1) 4463 *p++ = '-'; 4464 p += sprintf(p, "%07d", s->size); 4465 BUG_ON(p > name + ID_STR_LENGTH - 1); 4466 return name; 4467} 4468 4469static int sysfs_slab_add(struct kmem_cache *s) 4470{ 4471 int err; 4472 const char *name; 4473 int unmergeable; 4474 4475 if (slab_state < SYSFS) 4476 /* Defer until later */ 4477 return 0; 4478 4479 unmergeable = slab_unmergeable(s); 4480 if (unmergeable) { 4481 /* 4482 * Slabcache can never be merged so we can use the name proper. 4483 * This is typically the case for debug situations. In that 4484 * case we can catch duplicate names easily. 4485 */ 4486 sysfs_remove_link(&slab_kset->kobj, s->name); 4487 name = s->name; 4488 } else { 4489 /* 4490 * Create a unique name for the slab as a target 4491 * for the symlinks. 4492 */ 4493 name = create_unique_id(s); 4494 } 4495 4496 s->kobj.kset = slab_kset; 4497 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name); 4498 if (err) { 4499 kobject_put(&s->kobj); 4500 return err; 4501 } 4502 4503 err = sysfs_create_group(&s->kobj, &slab_attr_group); 4504 if (err) { 4505 kobject_del(&s->kobj); 4506 kobject_put(&s->kobj); 4507 return err; 4508 } 4509 kobject_uevent(&s->kobj, KOBJ_ADD); 4510 if (!unmergeable) { 4511 /* Setup first alias */ 4512 sysfs_slab_alias(s, s->name); 4513 kfree(name); 4514 } 4515 return 0; 4516} 4517 4518static void sysfs_slab_remove(struct kmem_cache *s) 4519{ 4520 if (slab_state < SYSFS) 4521 /* 4522 * Sysfs has not been setup yet so no need to remove the 4523 * cache from sysfs. 4524 */ 4525 return; 4526 4527 kobject_uevent(&s->kobj, KOBJ_REMOVE); 4528 kobject_del(&s->kobj); 4529 kobject_put(&s->kobj); 4530} 4531 4532/* 4533 * Need to buffer aliases during bootup until sysfs becomes 4534 * available lest we lose that information. 4535 */ 4536struct saved_alias { 4537 struct kmem_cache *s; 4538 const char *name; 4539 struct saved_alias *next; 4540}; 4541 4542static struct saved_alias *alias_list; 4543 4544static int sysfs_slab_alias(struct kmem_cache *s, const char *name) 4545{ 4546 struct saved_alias *al; 4547 4548 if (slab_state == SYSFS) { 4549 /* 4550 * If we have a leftover link then remove it. 4551 */ 4552 sysfs_remove_link(&slab_kset->kobj, name); 4553 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name); 4554 } 4555 4556 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL); 4557 if (!al) 4558 return -ENOMEM; 4559 4560 al->s = s; 4561 al->name = name; 4562 al->next = alias_list; 4563 alias_list = al; 4564 return 0; 4565} 4566 4567static int __init slab_sysfs_init(void) 4568{ 4569 struct kmem_cache *s; 4570 int err; 4571 4572 down_write(&slub_lock); 4573 4574 slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj); 4575 if (!slab_kset) { 4576 up_write(&slub_lock); 4577 printk(KERN_ERR "Cannot register slab subsystem.\n"); 4578 return -ENOSYS; 4579 } 4580 4581 slab_state = SYSFS; 4582 4583 list_for_each_entry(s, &slab_caches, list) { 4584 err = sysfs_slab_add(s); 4585 if (err) 4586 printk(KERN_ERR "SLUB: Unable to add boot slab %s" 4587 " to sysfs\n", s->name); 4588 } 4589 4590 while (alias_list) { 4591 struct saved_alias *al = alias_list; 4592 4593 alias_list = alias_list->next; 4594 err = sysfs_slab_alias(al->s, al->name); 4595 if (err) 4596 printk(KERN_ERR "SLUB: Unable to add boot slab alias" 4597 " %s to sysfs\n", s->name); 4598 kfree(al); 4599 } 4600 4601 up_write(&slub_lock); 4602 resiliency_test(); 4603 return 0; 4604} 4605 4606__initcall(slab_sysfs_init); 4607#endif 4608 4609/* 4610 * The /proc/slabinfo ABI 4611 */ 4612#ifdef CONFIG_SLABINFO 4613static void print_slabinfo_header(struct seq_file *m) 4614{ 4615 seq_puts(m, "slabinfo - version: 2.1\n"); 4616 seq_puts(m, "# name <active_objs> <num_objs> <objsize> " 4617 "<objperslab> <pagesperslab>"); 4618 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>"); 4619 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); 4620 seq_putc(m, '\n'); 4621} 4622 4623static void *s_start(struct seq_file *m, loff_t *pos) 4624{ 4625 loff_t n = *pos; 4626 4627 down_read(&slub_lock); 4628 if (!n) 4629 print_slabinfo_header(m); 4630 4631 return seq_list_start(&slab_caches, *pos); 4632} 4633 4634static void *s_next(struct seq_file *m, void *p, loff_t *pos) 4635{ 4636 return seq_list_next(p, &slab_caches, pos); 4637} 4638 4639static void s_stop(struct seq_file *m, void *p) 4640{ 4641 up_read(&slub_lock); 4642} 4643 4644static int s_show(struct seq_file *m, void *p) 4645{ 4646 unsigned long nr_partials = 0; 4647 unsigned long nr_slabs = 0; 4648 unsigned long nr_inuse = 0; 4649 unsigned long nr_objs = 0; 4650 unsigned long nr_free = 0; 4651 struct kmem_cache *s; 4652 int node; 4653 4654 s = list_entry(p, struct kmem_cache, list); 4655 4656 for_each_online_node(node) { 4657 struct kmem_cache_node *n = get_node(s, node); 4658 4659 if (!n) 4660 continue; 4661 4662 nr_partials += n->nr_partial; 4663 nr_slabs += atomic_long_read(&n->nr_slabs); 4664 nr_objs += atomic_long_read(&n->total_objects); 4665 nr_free += count_partial(n, count_free); 4666 } 4667 4668 nr_inuse = nr_objs - nr_free; 4669 4670 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse, 4671 nr_objs, s->size, oo_objects(s->oo), 4672 (1 << oo_order(s->oo))); 4673 seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0); 4674 seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs, 4675 0UL); 4676 seq_putc(m, '\n'); 4677 return 0; 4678} 4679 4680static const struct seq_operations slabinfo_op = { 4681 .start = s_start, 4682 .next = s_next, 4683 .stop = s_stop, 4684 .show = s_show, 4685}; 4686 4687static int slabinfo_open(struct inode *inode, struct file *file) 4688{ 4689 return seq_open(file, &slabinfo_op); 4690} 4691 4692static const struct file_operations proc_slabinfo_operations = { 4693 .open = slabinfo_open, 4694 .read = seq_read, 4695 .llseek = seq_lseek, 4696 .release = seq_release, 4697}; 4698 4699static int __init slab_proc_init(void) 4700{ 4701 proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations); 4702 return 0; 4703} 4704module_init(slab_proc_init); 4705#endif /* CONFIG_SLABINFO */ 4706