slub.c revision 773ff60e841461cb1f9374a713ffcda029b8c317
1/* 2 * SLUB: A slab allocator that limits cache line use instead of queuing 3 * objects in per cpu and per node lists. 4 * 5 * The allocator synchronizes using per slab locks and only 6 * uses a centralized lock to manage a pool of partial slabs. 7 * 8 * (C) 2007 SGI, Christoph Lameter 9 */ 10 11#include <linux/mm.h> 12#include <linux/module.h> 13#include <linux/bit_spinlock.h> 14#include <linux/interrupt.h> 15#include <linux/bitops.h> 16#include <linux/slab.h> 17#include <linux/proc_fs.h> 18#include <linux/seq_file.h> 19#include <linux/cpu.h> 20#include <linux/cpuset.h> 21#include <linux/mempolicy.h> 22#include <linux/ctype.h> 23#include <linux/debugobjects.h> 24#include <linux/kallsyms.h> 25#include <linux/memory.h> 26#include <linux/math64.h> 27#include <linux/fault-inject.h> 28 29/* 30 * Lock order: 31 * 1. slab_lock(page) 32 * 2. slab->list_lock 33 * 34 * The slab_lock protects operations on the object of a particular 35 * slab and its metadata in the page struct. If the slab lock 36 * has been taken then no allocations nor frees can be performed 37 * on the objects in the slab nor can the slab be added or removed 38 * from the partial or full lists since this would mean modifying 39 * the page_struct of the slab. 40 * 41 * The list_lock protects the partial and full list on each node and 42 * the partial slab counter. If taken then no new slabs may be added or 43 * removed from the lists nor make the number of partial slabs be modified. 44 * (Note that the total number of slabs is an atomic value that may be 45 * modified without taking the list lock). 46 * 47 * The list_lock is a centralized lock and thus we avoid taking it as 48 * much as possible. As long as SLUB does not have to handle partial 49 * slabs, operations can continue without any centralized lock. F.e. 50 * allocating a long series of objects that fill up slabs does not require 51 * the list lock. 52 * 53 * The lock order is sometimes inverted when we are trying to get a slab 54 * off a list. We take the list_lock and then look for a page on the list 55 * to use. While we do that objects in the slabs may be freed. We can 56 * only operate on the slab if we have also taken the slab_lock. So we use 57 * a slab_trylock() on the slab. If trylock was successful then no frees 58 * can occur anymore and we can use the slab for allocations etc. If the 59 * slab_trylock() does not succeed then frees are in progress in the slab and 60 * we must stay away from it for a while since we may cause a bouncing 61 * cacheline if we try to acquire the lock. So go onto the next slab. 62 * If all pages are busy then we may allocate a new slab instead of reusing 63 * a partial slab. A new slab has noone operating on it and thus there is 64 * no danger of cacheline contention. 65 * 66 * Interrupts are disabled during allocation and deallocation in order to 67 * make the slab allocator safe to use in the context of an irq. In addition 68 * interrupts are disabled to ensure that the processor does not change 69 * while handling per_cpu slabs, due to kernel preemption. 70 * 71 * SLUB assigns one slab for allocation to each processor. 72 * Allocations only occur from these slabs called cpu slabs. 73 * 74 * Slabs with free elements are kept on a partial list and during regular 75 * operations no list for full slabs is used. If an object in a full slab is 76 * freed then the slab will show up again on the partial lists. 77 * We track full slabs for debugging purposes though because otherwise we 78 * cannot scan all objects. 79 * 80 * Slabs are freed when they become empty. Teardown and setup is 81 * minimal so we rely on the page allocators per cpu caches for 82 * fast frees and allocs. 83 * 84 * Overloading of page flags that are otherwise used for LRU management. 85 * 86 * PageActive The slab is frozen and exempt from list processing. 87 * This means that the slab is dedicated to a purpose 88 * such as satisfying allocations for a specific 89 * processor. Objects may be freed in the slab while 90 * it is frozen but slab_free will then skip the usual 91 * list operations. It is up to the processor holding 92 * the slab to integrate the slab into the slab lists 93 * when the slab is no longer needed. 94 * 95 * One use of this flag is to mark slabs that are 96 * used for allocations. Then such a slab becomes a cpu 97 * slab. The cpu slab may be equipped with an additional 98 * freelist that allows lockless access to 99 * free objects in addition to the regular freelist 100 * that requires the slab lock. 101 * 102 * PageError Slab requires special handling due to debug 103 * options set. This moves slab handling out of 104 * the fast path and disables lockless freelists. 105 */ 106 107#ifdef CONFIG_SLUB_DEBUG 108#define SLABDEBUG 1 109#else 110#define SLABDEBUG 0 111#endif 112 113/* 114 * Issues still to be resolved: 115 * 116 * - Support PAGE_ALLOC_DEBUG. Should be easy to do. 117 * 118 * - Variable sizing of the per node arrays 119 */ 120 121/* Enable to test recovery from slab corruption on boot */ 122#undef SLUB_RESILIENCY_TEST 123 124/* 125 * Mininum number of partial slabs. These will be left on the partial 126 * lists even if they are empty. kmem_cache_shrink may reclaim them. 127 */ 128#define MIN_PARTIAL 5 129 130/* 131 * Maximum number of desirable partial slabs. 132 * The existence of more partial slabs makes kmem_cache_shrink 133 * sort the partial list by the number of objects in the. 134 */ 135#define MAX_PARTIAL 10 136 137#define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \ 138 SLAB_POISON | SLAB_STORE_USER) 139 140/* 141 * Set of flags that will prevent slab merging 142 */ 143#define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ 144 SLAB_TRACE | SLAB_DESTROY_BY_RCU) 145 146#define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ 147 SLAB_CACHE_DMA) 148 149#ifndef ARCH_KMALLOC_MINALIGN 150#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) 151#endif 152 153#ifndef ARCH_SLAB_MINALIGN 154#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) 155#endif 156 157/* Internal SLUB flags */ 158#define __OBJECT_POISON 0x80000000 /* Poison object */ 159#define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */ 160 161static int kmem_size = sizeof(struct kmem_cache); 162 163#ifdef CONFIG_SMP 164static struct notifier_block slab_notifier; 165#endif 166 167static enum { 168 DOWN, /* No slab functionality available */ 169 PARTIAL, /* kmem_cache_open() works but kmalloc does not */ 170 UP, /* Everything works but does not show up in sysfs */ 171 SYSFS /* Sysfs up */ 172} slab_state = DOWN; 173 174/* A list of all slab caches on the system */ 175static DECLARE_RWSEM(slub_lock); 176static LIST_HEAD(slab_caches); 177 178/* 179 * Tracking user of a slab. 180 */ 181struct track { 182 void *addr; /* Called from address */ 183 int cpu; /* Was running on cpu */ 184 int pid; /* Pid context */ 185 unsigned long when; /* When did the operation occur */ 186}; 187 188enum track_item { TRACK_ALLOC, TRACK_FREE }; 189 190#ifdef CONFIG_SLUB_DEBUG 191static int sysfs_slab_add(struct kmem_cache *); 192static int sysfs_slab_alias(struct kmem_cache *, const char *); 193static void sysfs_slab_remove(struct kmem_cache *); 194 195#else 196static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } 197static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) 198 { return 0; } 199static inline void sysfs_slab_remove(struct kmem_cache *s) 200{ 201 kfree(s); 202} 203 204#endif 205 206static inline void stat(struct kmem_cache_cpu *c, enum stat_item si) 207{ 208#ifdef CONFIG_SLUB_STATS 209 c->stat[si]++; 210#endif 211} 212 213/******************************************************************** 214 * Core slab cache functions 215 *******************************************************************/ 216 217int slab_is_available(void) 218{ 219 return slab_state >= UP; 220} 221 222static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) 223{ 224#ifdef CONFIG_NUMA 225 return s->node[node]; 226#else 227 return &s->local_node; 228#endif 229} 230 231static inline struct kmem_cache_cpu *get_cpu_slab(struct kmem_cache *s, int cpu) 232{ 233#ifdef CONFIG_SMP 234 return s->cpu_slab[cpu]; 235#else 236 return &s->cpu_slab; 237#endif 238} 239 240/* Verify that a pointer has an address that is valid within a slab page */ 241static inline int check_valid_pointer(struct kmem_cache *s, 242 struct page *page, const void *object) 243{ 244 void *base; 245 246 if (!object) 247 return 1; 248 249 base = page_address(page); 250 if (object < base || object >= base + page->objects * s->size || 251 (object - base) % s->size) { 252 return 0; 253 } 254 255 return 1; 256} 257 258/* 259 * Slow version of get and set free pointer. 260 * 261 * This version requires touching the cache lines of kmem_cache which 262 * we avoid to do in the fast alloc free paths. There we obtain the offset 263 * from the page struct. 264 */ 265static inline void *get_freepointer(struct kmem_cache *s, void *object) 266{ 267 return *(void **)(object + s->offset); 268} 269 270static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) 271{ 272 *(void **)(object + s->offset) = fp; 273} 274 275/* Loop over all objects in a slab */ 276#define for_each_object(__p, __s, __addr, __objects) \ 277 for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\ 278 __p += (__s)->size) 279 280/* Scan freelist */ 281#define for_each_free_object(__p, __s, __free) \ 282 for (__p = (__free); __p; __p = get_freepointer((__s), __p)) 283 284/* Determine object index from a given position */ 285static inline int slab_index(void *p, struct kmem_cache *s, void *addr) 286{ 287 return (p - addr) / s->size; 288} 289 290static inline struct kmem_cache_order_objects oo_make(int order, 291 unsigned long size) 292{ 293 struct kmem_cache_order_objects x = { 294 (order << 16) + (PAGE_SIZE << order) / size 295 }; 296 297 return x; 298} 299 300static inline int oo_order(struct kmem_cache_order_objects x) 301{ 302 return x.x >> 16; 303} 304 305static inline int oo_objects(struct kmem_cache_order_objects x) 306{ 307 return x.x & ((1 << 16) - 1); 308} 309 310#ifdef CONFIG_SLUB_DEBUG 311/* 312 * Debug settings: 313 */ 314#ifdef CONFIG_SLUB_DEBUG_ON 315static int slub_debug = DEBUG_DEFAULT_FLAGS; 316#else 317static int slub_debug; 318#endif 319 320static char *slub_debug_slabs; 321 322/* 323 * Object debugging 324 */ 325static void print_section(char *text, u8 *addr, unsigned int length) 326{ 327 int i, offset; 328 int newline = 1; 329 char ascii[17]; 330 331 ascii[16] = 0; 332 333 for (i = 0; i < length; i++) { 334 if (newline) { 335 printk(KERN_ERR "%8s 0x%p: ", text, addr + i); 336 newline = 0; 337 } 338 printk(KERN_CONT " %02x", addr[i]); 339 offset = i % 16; 340 ascii[offset] = isgraph(addr[i]) ? addr[i] : '.'; 341 if (offset == 15) { 342 printk(KERN_CONT " %s\n", ascii); 343 newline = 1; 344 } 345 } 346 if (!newline) { 347 i %= 16; 348 while (i < 16) { 349 printk(KERN_CONT " "); 350 ascii[i] = ' '; 351 i++; 352 } 353 printk(KERN_CONT " %s\n", ascii); 354 } 355} 356 357static struct track *get_track(struct kmem_cache *s, void *object, 358 enum track_item alloc) 359{ 360 struct track *p; 361 362 if (s->offset) 363 p = object + s->offset + sizeof(void *); 364 else 365 p = object + s->inuse; 366 367 return p + alloc; 368} 369 370static void set_track(struct kmem_cache *s, void *object, 371 enum track_item alloc, void *addr) 372{ 373 struct track *p; 374 375 if (s->offset) 376 p = object + s->offset + sizeof(void *); 377 else 378 p = object + s->inuse; 379 380 p += alloc; 381 if (addr) { 382 p->addr = addr; 383 p->cpu = smp_processor_id(); 384 p->pid = current->pid; 385 p->when = jiffies; 386 } else 387 memset(p, 0, sizeof(struct track)); 388} 389 390static void init_tracking(struct kmem_cache *s, void *object) 391{ 392 if (!(s->flags & SLAB_STORE_USER)) 393 return; 394 395 set_track(s, object, TRACK_FREE, NULL); 396 set_track(s, object, TRACK_ALLOC, NULL); 397} 398 399static void print_track(const char *s, struct track *t) 400{ 401 if (!t->addr) 402 return; 403 404 printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n", 405 s, t->addr, jiffies - t->when, t->cpu, t->pid); 406} 407 408static void print_tracking(struct kmem_cache *s, void *object) 409{ 410 if (!(s->flags & SLAB_STORE_USER)) 411 return; 412 413 print_track("Allocated", get_track(s, object, TRACK_ALLOC)); 414 print_track("Freed", get_track(s, object, TRACK_FREE)); 415} 416 417static void print_page_info(struct page *page) 418{ 419 printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n", 420 page, page->objects, page->inuse, page->freelist, page->flags); 421 422} 423 424static void slab_bug(struct kmem_cache *s, char *fmt, ...) 425{ 426 va_list args; 427 char buf[100]; 428 429 va_start(args, fmt); 430 vsnprintf(buf, sizeof(buf), fmt, args); 431 va_end(args); 432 printk(KERN_ERR "========================================" 433 "=====================================\n"); 434 printk(KERN_ERR "BUG %s: %s\n", s->name, buf); 435 printk(KERN_ERR "----------------------------------------" 436 "-------------------------------------\n\n"); 437} 438 439static void slab_fix(struct kmem_cache *s, char *fmt, ...) 440{ 441 va_list args; 442 char buf[100]; 443 444 va_start(args, fmt); 445 vsnprintf(buf, sizeof(buf), fmt, args); 446 va_end(args); 447 printk(KERN_ERR "FIX %s: %s\n", s->name, buf); 448} 449 450static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) 451{ 452 unsigned int off; /* Offset of last byte */ 453 u8 *addr = page_address(page); 454 455 print_tracking(s, p); 456 457 print_page_info(page); 458 459 printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p\n\n", 460 p, p - addr, get_freepointer(s, p)); 461 462 if (p > addr + 16) 463 print_section("Bytes b4", p - 16, 16); 464 465 print_section("Object", p, min_t(unsigned long, s->objsize, PAGE_SIZE)); 466 467 if (s->flags & SLAB_RED_ZONE) 468 print_section("Redzone", p + s->objsize, 469 s->inuse - s->objsize); 470 471 if (s->offset) 472 off = s->offset + sizeof(void *); 473 else 474 off = s->inuse; 475 476 if (s->flags & SLAB_STORE_USER) 477 off += 2 * sizeof(struct track); 478 479 if (off != s->size) 480 /* Beginning of the filler is the free pointer */ 481 print_section("Padding", p + off, s->size - off); 482 483 dump_stack(); 484} 485 486static void object_err(struct kmem_cache *s, struct page *page, 487 u8 *object, char *reason) 488{ 489 slab_bug(s, "%s", reason); 490 print_trailer(s, page, object); 491} 492 493static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...) 494{ 495 va_list args; 496 char buf[100]; 497 498 va_start(args, fmt); 499 vsnprintf(buf, sizeof(buf), fmt, args); 500 va_end(args); 501 slab_bug(s, "%s", buf); 502 print_page_info(page); 503 dump_stack(); 504} 505 506static void init_object(struct kmem_cache *s, void *object, int active) 507{ 508 u8 *p = object; 509 510 if (s->flags & __OBJECT_POISON) { 511 memset(p, POISON_FREE, s->objsize - 1); 512 p[s->objsize - 1] = POISON_END; 513 } 514 515 if (s->flags & SLAB_RED_ZONE) 516 memset(p + s->objsize, 517 active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE, 518 s->inuse - s->objsize); 519} 520 521static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes) 522{ 523 while (bytes) { 524 if (*start != (u8)value) 525 return start; 526 start++; 527 bytes--; 528 } 529 return NULL; 530} 531 532static void restore_bytes(struct kmem_cache *s, char *message, u8 data, 533 void *from, void *to) 534{ 535 slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data); 536 memset(from, data, to - from); 537} 538 539static int check_bytes_and_report(struct kmem_cache *s, struct page *page, 540 u8 *object, char *what, 541 u8 *start, unsigned int value, unsigned int bytes) 542{ 543 u8 *fault; 544 u8 *end; 545 546 fault = check_bytes(start, value, bytes); 547 if (!fault) 548 return 1; 549 550 end = start + bytes; 551 while (end > fault && end[-1] == value) 552 end--; 553 554 slab_bug(s, "%s overwritten", what); 555 printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n", 556 fault, end - 1, fault[0], value); 557 print_trailer(s, page, object); 558 559 restore_bytes(s, what, value, fault, end); 560 return 0; 561} 562 563/* 564 * Object layout: 565 * 566 * object address 567 * Bytes of the object to be managed. 568 * If the freepointer may overlay the object then the free 569 * pointer is the first word of the object. 570 * 571 * Poisoning uses 0x6b (POISON_FREE) and the last byte is 572 * 0xa5 (POISON_END) 573 * 574 * object + s->objsize 575 * Padding to reach word boundary. This is also used for Redzoning. 576 * Padding is extended by another word if Redzoning is enabled and 577 * objsize == inuse. 578 * 579 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with 580 * 0xcc (RED_ACTIVE) for objects in use. 581 * 582 * object + s->inuse 583 * Meta data starts here. 584 * 585 * A. Free pointer (if we cannot overwrite object on free) 586 * B. Tracking data for SLAB_STORE_USER 587 * C. Padding to reach required alignment boundary or at mininum 588 * one word if debugging is on to be able to detect writes 589 * before the word boundary. 590 * 591 * Padding is done using 0x5a (POISON_INUSE) 592 * 593 * object + s->size 594 * Nothing is used beyond s->size. 595 * 596 * If slabcaches are merged then the objsize and inuse boundaries are mostly 597 * ignored. And therefore no slab options that rely on these boundaries 598 * may be used with merged slabcaches. 599 */ 600 601static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p) 602{ 603 unsigned long off = s->inuse; /* The end of info */ 604 605 if (s->offset) 606 /* Freepointer is placed after the object. */ 607 off += sizeof(void *); 608 609 if (s->flags & SLAB_STORE_USER) 610 /* We also have user information there */ 611 off += 2 * sizeof(struct track); 612 613 if (s->size == off) 614 return 1; 615 616 return check_bytes_and_report(s, page, p, "Object padding", 617 p + off, POISON_INUSE, s->size - off); 618} 619 620/* Check the pad bytes at the end of a slab page */ 621static int slab_pad_check(struct kmem_cache *s, struct page *page) 622{ 623 u8 *start; 624 u8 *fault; 625 u8 *end; 626 int length; 627 int remainder; 628 629 if (!(s->flags & SLAB_POISON)) 630 return 1; 631 632 start = page_address(page); 633 length = (PAGE_SIZE << compound_order(page)); 634 end = start + length; 635 remainder = length % s->size; 636 if (!remainder) 637 return 1; 638 639 fault = check_bytes(end - remainder, POISON_INUSE, remainder); 640 if (!fault) 641 return 1; 642 while (end > fault && end[-1] == POISON_INUSE) 643 end--; 644 645 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1); 646 print_section("Padding", end - remainder, remainder); 647 648 restore_bytes(s, "slab padding", POISON_INUSE, start, end); 649 return 0; 650} 651 652static int check_object(struct kmem_cache *s, struct page *page, 653 void *object, int active) 654{ 655 u8 *p = object; 656 u8 *endobject = object + s->objsize; 657 658 if (s->flags & SLAB_RED_ZONE) { 659 unsigned int red = 660 active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE; 661 662 if (!check_bytes_and_report(s, page, object, "Redzone", 663 endobject, red, s->inuse - s->objsize)) 664 return 0; 665 } else { 666 if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) { 667 check_bytes_and_report(s, page, p, "Alignment padding", 668 endobject, POISON_INUSE, s->inuse - s->objsize); 669 } 670 } 671 672 if (s->flags & SLAB_POISON) { 673 if (!active && (s->flags & __OBJECT_POISON) && 674 (!check_bytes_and_report(s, page, p, "Poison", p, 675 POISON_FREE, s->objsize - 1) || 676 !check_bytes_and_report(s, page, p, "Poison", 677 p + s->objsize - 1, POISON_END, 1))) 678 return 0; 679 /* 680 * check_pad_bytes cleans up on its own. 681 */ 682 check_pad_bytes(s, page, p); 683 } 684 685 if (!s->offset && active) 686 /* 687 * Object and freepointer overlap. Cannot check 688 * freepointer while object is allocated. 689 */ 690 return 1; 691 692 /* Check free pointer validity */ 693 if (!check_valid_pointer(s, page, get_freepointer(s, p))) { 694 object_err(s, page, p, "Freepointer corrupt"); 695 /* 696 * No choice but to zap it and thus loose the remainder 697 * of the free objects in this slab. May cause 698 * another error because the object count is now wrong. 699 */ 700 set_freepointer(s, p, NULL); 701 return 0; 702 } 703 return 1; 704} 705 706static int check_slab(struct kmem_cache *s, struct page *page) 707{ 708 int maxobj; 709 710 VM_BUG_ON(!irqs_disabled()); 711 712 if (!PageSlab(page)) { 713 slab_err(s, page, "Not a valid slab page"); 714 return 0; 715 } 716 717 maxobj = (PAGE_SIZE << compound_order(page)) / s->size; 718 if (page->objects > maxobj) { 719 slab_err(s, page, "objects %u > max %u", 720 s->name, page->objects, maxobj); 721 return 0; 722 } 723 if (page->inuse > page->objects) { 724 slab_err(s, page, "inuse %u > max %u", 725 s->name, page->inuse, page->objects); 726 return 0; 727 } 728 /* Slab_pad_check fixes things up after itself */ 729 slab_pad_check(s, page); 730 return 1; 731} 732 733/* 734 * Determine if a certain object on a page is on the freelist. Must hold the 735 * slab lock to guarantee that the chains are in a consistent state. 736 */ 737static int on_freelist(struct kmem_cache *s, struct page *page, void *search) 738{ 739 int nr = 0; 740 void *fp = page->freelist; 741 void *object = NULL; 742 unsigned long max_objects; 743 744 while (fp && nr <= page->objects) { 745 if (fp == search) 746 return 1; 747 if (!check_valid_pointer(s, page, fp)) { 748 if (object) { 749 object_err(s, page, object, 750 "Freechain corrupt"); 751 set_freepointer(s, object, NULL); 752 break; 753 } else { 754 slab_err(s, page, "Freepointer corrupt"); 755 page->freelist = NULL; 756 page->inuse = page->objects; 757 slab_fix(s, "Freelist cleared"); 758 return 0; 759 } 760 break; 761 } 762 object = fp; 763 fp = get_freepointer(s, object); 764 nr++; 765 } 766 767 max_objects = (PAGE_SIZE << compound_order(page)) / s->size; 768 if (max_objects > 65535) 769 max_objects = 65535; 770 771 if (page->objects != max_objects) { 772 slab_err(s, page, "Wrong number of objects. Found %d but " 773 "should be %d", page->objects, max_objects); 774 page->objects = max_objects; 775 slab_fix(s, "Number of objects adjusted."); 776 } 777 if (page->inuse != page->objects - nr) { 778 slab_err(s, page, "Wrong object count. Counter is %d but " 779 "counted were %d", page->inuse, page->objects - nr); 780 page->inuse = page->objects - nr; 781 slab_fix(s, "Object count adjusted."); 782 } 783 return search == NULL; 784} 785 786static void trace(struct kmem_cache *s, struct page *page, void *object, 787 int alloc) 788{ 789 if (s->flags & SLAB_TRACE) { 790 printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n", 791 s->name, 792 alloc ? "alloc" : "free", 793 object, page->inuse, 794 page->freelist); 795 796 if (!alloc) 797 print_section("Object", (void *)object, s->objsize); 798 799 dump_stack(); 800 } 801} 802 803/* 804 * Tracking of fully allocated slabs for debugging purposes. 805 */ 806static void add_full(struct kmem_cache_node *n, struct page *page) 807{ 808 spin_lock(&n->list_lock); 809 list_add(&page->lru, &n->full); 810 spin_unlock(&n->list_lock); 811} 812 813static void remove_full(struct kmem_cache *s, struct page *page) 814{ 815 struct kmem_cache_node *n; 816 817 if (!(s->flags & SLAB_STORE_USER)) 818 return; 819 820 n = get_node(s, page_to_nid(page)); 821 822 spin_lock(&n->list_lock); 823 list_del(&page->lru); 824 spin_unlock(&n->list_lock); 825} 826 827/* Tracking of the number of slabs for debugging purposes */ 828static inline unsigned long slabs_node(struct kmem_cache *s, int node) 829{ 830 struct kmem_cache_node *n = get_node(s, node); 831 832 return atomic_long_read(&n->nr_slabs); 833} 834 835static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) 836{ 837 struct kmem_cache_node *n = get_node(s, node); 838 839 /* 840 * May be called early in order to allocate a slab for the 841 * kmem_cache_node structure. Solve the chicken-egg 842 * dilemma by deferring the increment of the count during 843 * bootstrap (see early_kmem_cache_node_alloc). 844 */ 845 if (!NUMA_BUILD || n) { 846 atomic_long_inc(&n->nr_slabs); 847 atomic_long_add(objects, &n->total_objects); 848 } 849} 850static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) 851{ 852 struct kmem_cache_node *n = get_node(s, node); 853 854 atomic_long_dec(&n->nr_slabs); 855 atomic_long_sub(objects, &n->total_objects); 856} 857 858/* Object debug checks for alloc/free paths */ 859static void setup_object_debug(struct kmem_cache *s, struct page *page, 860 void *object) 861{ 862 if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))) 863 return; 864 865 init_object(s, object, 0); 866 init_tracking(s, object); 867} 868 869static int alloc_debug_processing(struct kmem_cache *s, struct page *page, 870 void *object, void *addr) 871{ 872 if (!check_slab(s, page)) 873 goto bad; 874 875 if (!on_freelist(s, page, object)) { 876 object_err(s, page, object, "Object already allocated"); 877 goto bad; 878 } 879 880 if (!check_valid_pointer(s, page, object)) { 881 object_err(s, page, object, "Freelist Pointer check fails"); 882 goto bad; 883 } 884 885 if (!check_object(s, page, object, 0)) 886 goto bad; 887 888 /* Success perform special debug activities for allocs */ 889 if (s->flags & SLAB_STORE_USER) 890 set_track(s, object, TRACK_ALLOC, addr); 891 trace(s, page, object, 1); 892 init_object(s, object, 1); 893 return 1; 894 895bad: 896 if (PageSlab(page)) { 897 /* 898 * If this is a slab page then lets do the best we can 899 * to avoid issues in the future. Marking all objects 900 * as used avoids touching the remaining objects. 901 */ 902 slab_fix(s, "Marking all objects used"); 903 page->inuse = page->objects; 904 page->freelist = NULL; 905 } 906 return 0; 907} 908 909static int free_debug_processing(struct kmem_cache *s, struct page *page, 910 void *object, void *addr) 911{ 912 if (!check_slab(s, page)) 913 goto fail; 914 915 if (!check_valid_pointer(s, page, object)) { 916 slab_err(s, page, "Invalid object pointer 0x%p", object); 917 goto fail; 918 } 919 920 if (on_freelist(s, page, object)) { 921 object_err(s, page, object, "Object already free"); 922 goto fail; 923 } 924 925 if (!check_object(s, page, object, 1)) 926 return 0; 927 928 if (unlikely(s != page->slab)) { 929 if (!PageSlab(page)) { 930 slab_err(s, page, "Attempt to free object(0x%p) " 931 "outside of slab", object); 932 } else if (!page->slab) { 933 printk(KERN_ERR 934 "SLUB <none>: no slab for object 0x%p.\n", 935 object); 936 dump_stack(); 937 } else 938 object_err(s, page, object, 939 "page slab pointer corrupt."); 940 goto fail; 941 } 942 943 /* Special debug activities for freeing objects */ 944 if (!PageSlubFrozen(page) && !page->freelist) 945 remove_full(s, page); 946 if (s->flags & SLAB_STORE_USER) 947 set_track(s, object, TRACK_FREE, addr); 948 trace(s, page, object, 0); 949 init_object(s, object, 0); 950 return 1; 951 952fail: 953 slab_fix(s, "Object at 0x%p not freed", object); 954 return 0; 955} 956 957static int __init setup_slub_debug(char *str) 958{ 959 slub_debug = DEBUG_DEFAULT_FLAGS; 960 if (*str++ != '=' || !*str) 961 /* 962 * No options specified. Switch on full debugging. 963 */ 964 goto out; 965 966 if (*str == ',') 967 /* 968 * No options but restriction on slabs. This means full 969 * debugging for slabs matching a pattern. 970 */ 971 goto check_slabs; 972 973 slub_debug = 0; 974 if (*str == '-') 975 /* 976 * Switch off all debugging measures. 977 */ 978 goto out; 979 980 /* 981 * Determine which debug features should be switched on 982 */ 983 for (; *str && *str != ','; str++) { 984 switch (tolower(*str)) { 985 case 'f': 986 slub_debug |= SLAB_DEBUG_FREE; 987 break; 988 case 'z': 989 slub_debug |= SLAB_RED_ZONE; 990 break; 991 case 'p': 992 slub_debug |= SLAB_POISON; 993 break; 994 case 'u': 995 slub_debug |= SLAB_STORE_USER; 996 break; 997 case 't': 998 slub_debug |= SLAB_TRACE; 999 break; 1000 default: 1001 printk(KERN_ERR "slub_debug option '%c' " 1002 "unknown. skipped\n", *str); 1003 } 1004 } 1005 1006check_slabs: 1007 if (*str == ',') 1008 slub_debug_slabs = str + 1; 1009out: 1010 return 1; 1011} 1012 1013__setup("slub_debug", setup_slub_debug); 1014 1015static unsigned long kmem_cache_flags(unsigned long objsize, 1016 unsigned long flags, const char *name, 1017 void (*ctor)(void *)) 1018{ 1019 /* 1020 * Enable debugging if selected on the kernel commandline. 1021 */ 1022 if (slub_debug && (!slub_debug_slabs || 1023 strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)) == 0)) 1024 flags |= slub_debug; 1025 1026 return flags; 1027} 1028#else 1029static inline void setup_object_debug(struct kmem_cache *s, 1030 struct page *page, void *object) {} 1031 1032static inline int alloc_debug_processing(struct kmem_cache *s, 1033 struct page *page, void *object, void *addr) { return 0; } 1034 1035static inline int free_debug_processing(struct kmem_cache *s, 1036 struct page *page, void *object, void *addr) { return 0; } 1037 1038static inline int slab_pad_check(struct kmem_cache *s, struct page *page) 1039 { return 1; } 1040static inline int check_object(struct kmem_cache *s, struct page *page, 1041 void *object, int active) { return 1; } 1042static inline void add_full(struct kmem_cache_node *n, struct page *page) {} 1043static inline unsigned long kmem_cache_flags(unsigned long objsize, 1044 unsigned long flags, const char *name, 1045 void (*ctor)(void *)) 1046{ 1047 return flags; 1048} 1049#define slub_debug 0 1050 1051static inline unsigned long slabs_node(struct kmem_cache *s, int node) 1052 { return 0; } 1053static inline void inc_slabs_node(struct kmem_cache *s, int node, 1054 int objects) {} 1055static inline void dec_slabs_node(struct kmem_cache *s, int node, 1056 int objects) {} 1057#endif 1058 1059/* 1060 * Slab allocation and freeing 1061 */ 1062static inline struct page *alloc_slab_page(gfp_t flags, int node, 1063 struct kmem_cache_order_objects oo) 1064{ 1065 int order = oo_order(oo); 1066 1067 if (node == -1) 1068 return alloc_pages(flags, order); 1069 else 1070 return alloc_pages_node(node, flags, order); 1071} 1072 1073static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) 1074{ 1075 struct page *page; 1076 struct kmem_cache_order_objects oo = s->oo; 1077 1078 flags |= s->allocflags; 1079 1080 page = alloc_slab_page(flags | __GFP_NOWARN | __GFP_NORETRY, node, 1081 oo); 1082 if (unlikely(!page)) { 1083 oo = s->min; 1084 /* 1085 * Allocation may have failed due to fragmentation. 1086 * Try a lower order alloc if possible 1087 */ 1088 page = alloc_slab_page(flags, node, oo); 1089 if (!page) 1090 return NULL; 1091 1092 stat(get_cpu_slab(s, raw_smp_processor_id()), ORDER_FALLBACK); 1093 } 1094 page->objects = oo_objects(oo); 1095 mod_zone_page_state(page_zone(page), 1096 (s->flags & SLAB_RECLAIM_ACCOUNT) ? 1097 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, 1098 1 << oo_order(oo)); 1099 1100 return page; 1101} 1102 1103static void setup_object(struct kmem_cache *s, struct page *page, 1104 void *object) 1105{ 1106 setup_object_debug(s, page, object); 1107 if (unlikely(s->ctor)) 1108 s->ctor(object); 1109} 1110 1111static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) 1112{ 1113 struct page *page; 1114 void *start; 1115 void *last; 1116 void *p; 1117 1118 BUG_ON(flags & GFP_SLAB_BUG_MASK); 1119 1120 page = allocate_slab(s, 1121 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); 1122 if (!page) 1123 goto out; 1124 1125 inc_slabs_node(s, page_to_nid(page), page->objects); 1126 page->slab = s; 1127 page->flags |= 1 << PG_slab; 1128 if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON | 1129 SLAB_STORE_USER | SLAB_TRACE)) 1130 __SetPageSlubDebug(page); 1131 1132 start = page_address(page); 1133 1134 if (unlikely(s->flags & SLAB_POISON)) 1135 memset(start, POISON_INUSE, PAGE_SIZE << compound_order(page)); 1136 1137 last = start; 1138 for_each_object(p, s, start, page->objects) { 1139 setup_object(s, page, last); 1140 set_freepointer(s, last, p); 1141 last = p; 1142 } 1143 setup_object(s, page, last); 1144 set_freepointer(s, last, NULL); 1145 1146 page->freelist = start; 1147 page->inuse = 0; 1148out: 1149 return page; 1150} 1151 1152static void __free_slab(struct kmem_cache *s, struct page *page) 1153{ 1154 int order = compound_order(page); 1155 int pages = 1 << order; 1156 1157 if (unlikely(SLABDEBUG && PageSlubDebug(page))) { 1158 void *p; 1159 1160 slab_pad_check(s, page); 1161 for_each_object(p, s, page_address(page), 1162 page->objects) 1163 check_object(s, page, p, 0); 1164 __ClearPageSlubDebug(page); 1165 } 1166 1167 mod_zone_page_state(page_zone(page), 1168 (s->flags & SLAB_RECLAIM_ACCOUNT) ? 1169 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, 1170 -pages); 1171 1172 __ClearPageSlab(page); 1173 reset_page_mapcount(page); 1174 __free_pages(page, order); 1175} 1176 1177static void rcu_free_slab(struct rcu_head *h) 1178{ 1179 struct page *page; 1180 1181 page = container_of((struct list_head *)h, struct page, lru); 1182 __free_slab(page->slab, page); 1183} 1184 1185static void free_slab(struct kmem_cache *s, struct page *page) 1186{ 1187 if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) { 1188 /* 1189 * RCU free overloads the RCU head over the LRU 1190 */ 1191 struct rcu_head *head = (void *)&page->lru; 1192 1193 call_rcu(head, rcu_free_slab); 1194 } else 1195 __free_slab(s, page); 1196} 1197 1198static void discard_slab(struct kmem_cache *s, struct page *page) 1199{ 1200 dec_slabs_node(s, page_to_nid(page), page->objects); 1201 free_slab(s, page); 1202} 1203 1204/* 1205 * Per slab locking using the pagelock 1206 */ 1207static __always_inline void slab_lock(struct page *page) 1208{ 1209 bit_spin_lock(PG_locked, &page->flags); 1210} 1211 1212static __always_inline void slab_unlock(struct page *page) 1213{ 1214 __bit_spin_unlock(PG_locked, &page->flags); 1215} 1216 1217static __always_inline int slab_trylock(struct page *page) 1218{ 1219 int rc = 1; 1220 1221 rc = bit_spin_trylock(PG_locked, &page->flags); 1222 return rc; 1223} 1224 1225/* 1226 * Management of partially allocated slabs 1227 */ 1228static void add_partial(struct kmem_cache_node *n, 1229 struct page *page, int tail) 1230{ 1231 spin_lock(&n->list_lock); 1232 n->nr_partial++; 1233 if (tail) 1234 list_add_tail(&page->lru, &n->partial); 1235 else 1236 list_add(&page->lru, &n->partial); 1237 spin_unlock(&n->list_lock); 1238} 1239 1240static void remove_partial(struct kmem_cache *s, struct page *page) 1241{ 1242 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1243 1244 spin_lock(&n->list_lock); 1245 list_del(&page->lru); 1246 n->nr_partial--; 1247 spin_unlock(&n->list_lock); 1248} 1249 1250/* 1251 * Lock slab and remove from the partial list. 1252 * 1253 * Must hold list_lock. 1254 */ 1255static inline int lock_and_freeze_slab(struct kmem_cache_node *n, 1256 struct page *page) 1257{ 1258 if (slab_trylock(page)) { 1259 list_del(&page->lru); 1260 n->nr_partial--; 1261 __SetPageSlubFrozen(page); 1262 return 1; 1263 } 1264 return 0; 1265} 1266 1267/* 1268 * Try to allocate a partial slab from a specific node. 1269 */ 1270static struct page *get_partial_node(struct kmem_cache_node *n) 1271{ 1272 struct page *page; 1273 1274 /* 1275 * Racy check. If we mistakenly see no partial slabs then we 1276 * just allocate an empty slab. If we mistakenly try to get a 1277 * partial slab and there is none available then get_partials() 1278 * will return NULL. 1279 */ 1280 if (!n || !n->nr_partial) 1281 return NULL; 1282 1283 spin_lock(&n->list_lock); 1284 list_for_each_entry(page, &n->partial, lru) 1285 if (lock_and_freeze_slab(n, page)) 1286 goto out; 1287 page = NULL; 1288out: 1289 spin_unlock(&n->list_lock); 1290 return page; 1291} 1292 1293/* 1294 * Get a page from somewhere. Search in increasing NUMA distances. 1295 */ 1296static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags) 1297{ 1298#ifdef CONFIG_NUMA 1299 struct zonelist *zonelist; 1300 struct zoneref *z; 1301 struct zone *zone; 1302 enum zone_type high_zoneidx = gfp_zone(flags); 1303 struct page *page; 1304 1305 /* 1306 * The defrag ratio allows a configuration of the tradeoffs between 1307 * inter node defragmentation and node local allocations. A lower 1308 * defrag_ratio increases the tendency to do local allocations 1309 * instead of attempting to obtain partial slabs from other nodes. 1310 * 1311 * If the defrag_ratio is set to 0 then kmalloc() always 1312 * returns node local objects. If the ratio is higher then kmalloc() 1313 * may return off node objects because partial slabs are obtained 1314 * from other nodes and filled up. 1315 * 1316 * If /sys/kernel/slab/xx/defrag_ratio is set to 100 (which makes 1317 * defrag_ratio = 1000) then every (well almost) allocation will 1318 * first attempt to defrag slab caches on other nodes. This means 1319 * scanning over all nodes to look for partial slabs which may be 1320 * expensive if we do it every time we are trying to find a slab 1321 * with available objects. 1322 */ 1323 if (!s->remote_node_defrag_ratio || 1324 get_cycles() % 1024 > s->remote_node_defrag_ratio) 1325 return NULL; 1326 1327 zonelist = node_zonelist(slab_node(current->mempolicy), flags); 1328 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { 1329 struct kmem_cache_node *n; 1330 1331 n = get_node(s, zone_to_nid(zone)); 1332 1333 if (n && cpuset_zone_allowed_hardwall(zone, flags) && 1334 n->nr_partial > n->min_partial) { 1335 page = get_partial_node(n); 1336 if (page) 1337 return page; 1338 } 1339 } 1340#endif 1341 return NULL; 1342} 1343 1344/* 1345 * Get a partial page, lock it and return it. 1346 */ 1347static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node) 1348{ 1349 struct page *page; 1350 int searchnode = (node == -1) ? numa_node_id() : node; 1351 1352 page = get_partial_node(get_node(s, searchnode)); 1353 if (page || (flags & __GFP_THISNODE)) 1354 return page; 1355 1356 return get_any_partial(s, flags); 1357} 1358 1359/* 1360 * Move a page back to the lists. 1361 * 1362 * Must be called with the slab lock held. 1363 * 1364 * On exit the slab lock will have been dropped. 1365 */ 1366static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) 1367{ 1368 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1369 struct kmem_cache_cpu *c = get_cpu_slab(s, smp_processor_id()); 1370 1371 __ClearPageSlubFrozen(page); 1372 if (page->inuse) { 1373 1374 if (page->freelist) { 1375 add_partial(n, page, tail); 1376 stat(c, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD); 1377 } else { 1378 stat(c, DEACTIVATE_FULL); 1379 if (SLABDEBUG && PageSlubDebug(page) && 1380 (s->flags & SLAB_STORE_USER)) 1381 add_full(n, page); 1382 } 1383 slab_unlock(page); 1384 } else { 1385 stat(c, DEACTIVATE_EMPTY); 1386 if (n->nr_partial < n->min_partial) { 1387 /* 1388 * Adding an empty slab to the partial slabs in order 1389 * to avoid page allocator overhead. This slab needs 1390 * to come after the other slabs with objects in 1391 * so that the others get filled first. That way the 1392 * size of the partial list stays small. 1393 * 1394 * kmem_cache_shrink can reclaim any empty slabs from 1395 * the partial list. 1396 */ 1397 add_partial(n, page, 1); 1398 slab_unlock(page); 1399 } else { 1400 slab_unlock(page); 1401 stat(get_cpu_slab(s, raw_smp_processor_id()), FREE_SLAB); 1402 discard_slab(s, page); 1403 } 1404 } 1405} 1406 1407/* 1408 * Remove the cpu slab 1409 */ 1410static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) 1411{ 1412 struct page *page = c->page; 1413 int tail = 1; 1414 1415 if (page->freelist) 1416 stat(c, DEACTIVATE_REMOTE_FREES); 1417 /* 1418 * Merge cpu freelist into slab freelist. Typically we get here 1419 * because both freelists are empty. So this is unlikely 1420 * to occur. 1421 */ 1422 while (unlikely(c->freelist)) { 1423 void **object; 1424 1425 tail = 0; /* Hot objects. Put the slab first */ 1426 1427 /* Retrieve object from cpu_freelist */ 1428 object = c->freelist; 1429 c->freelist = c->freelist[c->offset]; 1430 1431 /* And put onto the regular freelist */ 1432 object[c->offset] = page->freelist; 1433 page->freelist = object; 1434 page->inuse--; 1435 } 1436 c->page = NULL; 1437 unfreeze_slab(s, page, tail); 1438} 1439 1440static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) 1441{ 1442 stat(c, CPUSLAB_FLUSH); 1443 slab_lock(c->page); 1444 deactivate_slab(s, c); 1445} 1446 1447/* 1448 * Flush cpu slab. 1449 * 1450 * Called from IPI handler with interrupts disabled. 1451 */ 1452static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) 1453{ 1454 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); 1455 1456 if (likely(c && c->page)) 1457 flush_slab(s, c); 1458} 1459 1460static void flush_cpu_slab(void *d) 1461{ 1462 struct kmem_cache *s = d; 1463 1464 __flush_cpu_slab(s, smp_processor_id()); 1465} 1466 1467static void flush_all(struct kmem_cache *s) 1468{ 1469 on_each_cpu(flush_cpu_slab, s, 1); 1470} 1471 1472/* 1473 * Check if the objects in a per cpu structure fit numa 1474 * locality expectations. 1475 */ 1476static inline int node_match(struct kmem_cache_cpu *c, int node) 1477{ 1478#ifdef CONFIG_NUMA 1479 if (node != -1 && c->node != node) 1480 return 0; 1481#endif 1482 return 1; 1483} 1484 1485/* 1486 * Slow path. The lockless freelist is empty or we need to perform 1487 * debugging duties. 1488 * 1489 * Interrupts are disabled. 1490 * 1491 * Processing is still very fast if new objects have been freed to the 1492 * regular freelist. In that case we simply take over the regular freelist 1493 * as the lockless freelist and zap the regular freelist. 1494 * 1495 * If that is not working then we fall back to the partial lists. We take the 1496 * first element of the freelist as the object to allocate now and move the 1497 * rest of the freelist to the lockless freelist. 1498 * 1499 * And if we were unable to get a new slab from the partial slab lists then 1500 * we need to allocate a new slab. This is the slowest path since it involves 1501 * a call to the page allocator and the setup of a new slab. 1502 */ 1503static void *__slab_alloc(struct kmem_cache *s, 1504 gfp_t gfpflags, int node, void *addr, struct kmem_cache_cpu *c) 1505{ 1506 void **object; 1507 struct page *new; 1508 1509 /* We handle __GFP_ZERO in the caller */ 1510 gfpflags &= ~__GFP_ZERO; 1511 1512 if (!c->page) 1513 goto new_slab; 1514 1515 slab_lock(c->page); 1516 if (unlikely(!node_match(c, node))) 1517 goto another_slab; 1518 1519 stat(c, ALLOC_REFILL); 1520 1521load_freelist: 1522 object = c->page->freelist; 1523 if (unlikely(!object)) 1524 goto another_slab; 1525 if (unlikely(SLABDEBUG && PageSlubDebug(c->page))) 1526 goto debug; 1527 1528 c->freelist = object[c->offset]; 1529 c->page->inuse = c->page->objects; 1530 c->page->freelist = NULL; 1531 c->node = page_to_nid(c->page); 1532unlock_out: 1533 slab_unlock(c->page); 1534 stat(c, ALLOC_SLOWPATH); 1535 return object; 1536 1537another_slab: 1538 deactivate_slab(s, c); 1539 1540new_slab: 1541 new = get_partial(s, gfpflags, node); 1542 if (new) { 1543 c->page = new; 1544 stat(c, ALLOC_FROM_PARTIAL); 1545 goto load_freelist; 1546 } 1547 1548 if (gfpflags & __GFP_WAIT) 1549 local_irq_enable(); 1550 1551 new = new_slab(s, gfpflags, node); 1552 1553 if (gfpflags & __GFP_WAIT) 1554 local_irq_disable(); 1555 1556 if (new) { 1557 c = get_cpu_slab(s, smp_processor_id()); 1558 stat(c, ALLOC_SLAB); 1559 if (c->page) 1560 flush_slab(s, c); 1561 slab_lock(new); 1562 __SetPageSlubFrozen(new); 1563 c->page = new; 1564 goto load_freelist; 1565 } 1566 return NULL; 1567debug: 1568 if (!alloc_debug_processing(s, c->page, object, addr)) 1569 goto another_slab; 1570 1571 c->page->inuse++; 1572 c->page->freelist = object[c->offset]; 1573 c->node = -1; 1574 goto unlock_out; 1575} 1576 1577/* 1578 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) 1579 * have the fastpath folded into their functions. So no function call 1580 * overhead for requests that can be satisfied on the fastpath. 1581 * 1582 * The fastpath works by first checking if the lockless freelist can be used. 1583 * If not then __slab_alloc is called for slow processing. 1584 * 1585 * Otherwise we can simply pick the next object from the lockless free list. 1586 */ 1587static __always_inline void *slab_alloc(struct kmem_cache *s, 1588 gfp_t gfpflags, int node, void *addr) 1589{ 1590 void **object; 1591 struct kmem_cache_cpu *c; 1592 unsigned long flags; 1593 unsigned int objsize; 1594 1595 if (should_failslab(s->objsize, gfpflags)) 1596 return NULL; 1597 1598 local_irq_save(flags); 1599 c = get_cpu_slab(s, smp_processor_id()); 1600 objsize = c->objsize; 1601 if (unlikely(!c->freelist || !node_match(c, node))) 1602 1603 object = __slab_alloc(s, gfpflags, node, addr, c); 1604 1605 else { 1606 object = c->freelist; 1607 c->freelist = object[c->offset]; 1608 stat(c, ALLOC_FASTPATH); 1609 } 1610 local_irq_restore(flags); 1611 1612 if (unlikely((gfpflags & __GFP_ZERO) && object)) 1613 memset(object, 0, objsize); 1614 1615 return object; 1616} 1617 1618void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) 1619{ 1620 return slab_alloc(s, gfpflags, -1, __builtin_return_address(0)); 1621} 1622EXPORT_SYMBOL(kmem_cache_alloc); 1623 1624#ifdef CONFIG_NUMA 1625void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) 1626{ 1627 return slab_alloc(s, gfpflags, node, __builtin_return_address(0)); 1628} 1629EXPORT_SYMBOL(kmem_cache_alloc_node); 1630#endif 1631 1632/* 1633 * Slow patch handling. This may still be called frequently since objects 1634 * have a longer lifetime than the cpu slabs in most processing loads. 1635 * 1636 * So we still attempt to reduce cache line usage. Just take the slab 1637 * lock and free the item. If there is no additional partial page 1638 * handling required then we can return immediately. 1639 */ 1640static void __slab_free(struct kmem_cache *s, struct page *page, 1641 void *x, void *addr, unsigned int offset) 1642{ 1643 void *prior; 1644 void **object = (void *)x; 1645 struct kmem_cache_cpu *c; 1646 1647 c = get_cpu_slab(s, raw_smp_processor_id()); 1648 stat(c, FREE_SLOWPATH); 1649 slab_lock(page); 1650 1651 if (unlikely(SLABDEBUG && PageSlubDebug(page))) 1652 goto debug; 1653 1654checks_ok: 1655 prior = object[offset] = page->freelist; 1656 page->freelist = object; 1657 page->inuse--; 1658 1659 if (unlikely(PageSlubFrozen(page))) { 1660 stat(c, FREE_FROZEN); 1661 goto out_unlock; 1662 } 1663 1664 if (unlikely(!page->inuse)) 1665 goto slab_empty; 1666 1667 /* 1668 * Objects left in the slab. If it was not on the partial list before 1669 * then add it. 1670 */ 1671 if (unlikely(!prior)) { 1672 add_partial(get_node(s, page_to_nid(page)), page, 1); 1673 stat(c, FREE_ADD_PARTIAL); 1674 } 1675 1676out_unlock: 1677 slab_unlock(page); 1678 return; 1679 1680slab_empty: 1681 if (prior) { 1682 /* 1683 * Slab still on the partial list. 1684 */ 1685 remove_partial(s, page); 1686 stat(c, FREE_REMOVE_PARTIAL); 1687 } 1688 slab_unlock(page); 1689 stat(c, FREE_SLAB); 1690 discard_slab(s, page); 1691 return; 1692 1693debug: 1694 if (!free_debug_processing(s, page, x, addr)) 1695 goto out_unlock; 1696 goto checks_ok; 1697} 1698 1699/* 1700 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that 1701 * can perform fastpath freeing without additional function calls. 1702 * 1703 * The fastpath is only possible if we are freeing to the current cpu slab 1704 * of this processor. This typically the case if we have just allocated 1705 * the item before. 1706 * 1707 * If fastpath is not possible then fall back to __slab_free where we deal 1708 * with all sorts of special processing. 1709 */ 1710static __always_inline void slab_free(struct kmem_cache *s, 1711 struct page *page, void *x, void *addr) 1712{ 1713 void **object = (void *)x; 1714 struct kmem_cache_cpu *c; 1715 unsigned long flags; 1716 1717 local_irq_save(flags); 1718 c = get_cpu_slab(s, smp_processor_id()); 1719 debug_check_no_locks_freed(object, c->objsize); 1720 if (!(s->flags & SLAB_DEBUG_OBJECTS)) 1721 debug_check_no_obj_freed(object, s->objsize); 1722 if (likely(page == c->page && c->node >= 0)) { 1723 object[c->offset] = c->freelist; 1724 c->freelist = object; 1725 stat(c, FREE_FASTPATH); 1726 } else 1727 __slab_free(s, page, x, addr, c->offset); 1728 1729 local_irq_restore(flags); 1730} 1731 1732void kmem_cache_free(struct kmem_cache *s, void *x) 1733{ 1734 struct page *page; 1735 1736 page = virt_to_head_page(x); 1737 1738 slab_free(s, page, x, __builtin_return_address(0)); 1739} 1740EXPORT_SYMBOL(kmem_cache_free); 1741 1742/* Figure out on which slab object the object resides */ 1743static struct page *get_object_page(const void *x) 1744{ 1745 struct page *page = virt_to_head_page(x); 1746 1747 if (!PageSlab(page)) 1748 return NULL; 1749 1750 return page; 1751} 1752 1753/* 1754 * Object placement in a slab is made very easy because we always start at 1755 * offset 0. If we tune the size of the object to the alignment then we can 1756 * get the required alignment by putting one properly sized object after 1757 * another. 1758 * 1759 * Notice that the allocation order determines the sizes of the per cpu 1760 * caches. Each processor has always one slab available for allocations. 1761 * Increasing the allocation order reduces the number of times that slabs 1762 * must be moved on and off the partial lists and is therefore a factor in 1763 * locking overhead. 1764 */ 1765 1766/* 1767 * Mininum / Maximum order of slab pages. This influences locking overhead 1768 * and slab fragmentation. A higher order reduces the number of partial slabs 1769 * and increases the number of allocations possible without having to 1770 * take the list_lock. 1771 */ 1772static int slub_min_order; 1773static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER; 1774static int slub_min_objects; 1775 1776/* 1777 * Merge control. If this is set then no merging of slab caches will occur. 1778 * (Could be removed. This was introduced to pacify the merge skeptics.) 1779 */ 1780static int slub_nomerge; 1781 1782/* 1783 * Calculate the order of allocation given an slab object size. 1784 * 1785 * The order of allocation has significant impact on performance and other 1786 * system components. Generally order 0 allocations should be preferred since 1787 * order 0 does not cause fragmentation in the page allocator. Larger objects 1788 * be problematic to put into order 0 slabs because there may be too much 1789 * unused space left. We go to a higher order if more than 1/16th of the slab 1790 * would be wasted. 1791 * 1792 * In order to reach satisfactory performance we must ensure that a minimum 1793 * number of objects is in one slab. Otherwise we may generate too much 1794 * activity on the partial lists which requires taking the list_lock. This is 1795 * less a concern for large slabs though which are rarely used. 1796 * 1797 * slub_max_order specifies the order where we begin to stop considering the 1798 * number of objects in a slab as critical. If we reach slub_max_order then 1799 * we try to keep the page order as low as possible. So we accept more waste 1800 * of space in favor of a small page order. 1801 * 1802 * Higher order allocations also allow the placement of more objects in a 1803 * slab and thereby reduce object handling overhead. If the user has 1804 * requested a higher mininum order then we start with that one instead of 1805 * the smallest order which will fit the object. 1806 */ 1807static inline int slab_order(int size, int min_objects, 1808 int max_order, int fract_leftover) 1809{ 1810 int order; 1811 int rem; 1812 int min_order = slub_min_order; 1813 1814 if ((PAGE_SIZE << min_order) / size > 65535) 1815 return get_order(size * 65535) - 1; 1816 1817 for (order = max(min_order, 1818 fls(min_objects * size - 1) - PAGE_SHIFT); 1819 order <= max_order; order++) { 1820 1821 unsigned long slab_size = PAGE_SIZE << order; 1822 1823 if (slab_size < min_objects * size) 1824 continue; 1825 1826 rem = slab_size % size; 1827 1828 if (rem <= slab_size / fract_leftover) 1829 break; 1830 1831 } 1832 1833 return order; 1834} 1835 1836static inline int calculate_order(int size) 1837{ 1838 int order; 1839 int min_objects; 1840 int fraction; 1841 1842 /* 1843 * Attempt to find best configuration for a slab. This 1844 * works by first attempting to generate a layout with 1845 * the best configuration and backing off gradually. 1846 * 1847 * First we reduce the acceptable waste in a slab. Then 1848 * we reduce the minimum objects required in a slab. 1849 */ 1850 min_objects = slub_min_objects; 1851 if (!min_objects) 1852 min_objects = 4 * (fls(nr_cpu_ids) + 1); 1853 while (min_objects > 1) { 1854 fraction = 16; 1855 while (fraction >= 4) { 1856 order = slab_order(size, min_objects, 1857 slub_max_order, fraction); 1858 if (order <= slub_max_order) 1859 return order; 1860 fraction /= 2; 1861 } 1862 min_objects /= 2; 1863 } 1864 1865 /* 1866 * We were unable to place multiple objects in a slab. Now 1867 * lets see if we can place a single object there. 1868 */ 1869 order = slab_order(size, 1, slub_max_order, 1); 1870 if (order <= slub_max_order) 1871 return order; 1872 1873 /* 1874 * Doh this slab cannot be placed using slub_max_order. 1875 */ 1876 order = slab_order(size, 1, MAX_ORDER, 1); 1877 if (order <= MAX_ORDER) 1878 return order; 1879 return -ENOSYS; 1880} 1881 1882/* 1883 * Figure out what the alignment of the objects will be. 1884 */ 1885static unsigned long calculate_alignment(unsigned long flags, 1886 unsigned long align, unsigned long size) 1887{ 1888 /* 1889 * If the user wants hardware cache aligned objects then follow that 1890 * suggestion if the object is sufficiently large. 1891 * 1892 * The hardware cache alignment cannot override the specified 1893 * alignment though. If that is greater then use it. 1894 */ 1895 if (flags & SLAB_HWCACHE_ALIGN) { 1896 unsigned long ralign = cache_line_size(); 1897 while (size <= ralign / 2) 1898 ralign /= 2; 1899 align = max(align, ralign); 1900 } 1901 1902 if (align < ARCH_SLAB_MINALIGN) 1903 align = ARCH_SLAB_MINALIGN; 1904 1905 return ALIGN(align, sizeof(void *)); 1906} 1907 1908static void init_kmem_cache_cpu(struct kmem_cache *s, 1909 struct kmem_cache_cpu *c) 1910{ 1911 c->page = NULL; 1912 c->freelist = NULL; 1913 c->node = 0; 1914 c->offset = s->offset / sizeof(void *); 1915 c->objsize = s->objsize; 1916#ifdef CONFIG_SLUB_STATS 1917 memset(c->stat, 0, NR_SLUB_STAT_ITEMS * sizeof(unsigned)); 1918#endif 1919} 1920 1921static void 1922init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s) 1923{ 1924 n->nr_partial = 0; 1925 1926 /* 1927 * The larger the object size is, the more pages we want on the partial 1928 * list to avoid pounding the page allocator excessively. 1929 */ 1930 n->min_partial = ilog2(s->size); 1931 if (n->min_partial < MIN_PARTIAL) 1932 n->min_partial = MIN_PARTIAL; 1933 else if (n->min_partial > MAX_PARTIAL) 1934 n->min_partial = MAX_PARTIAL; 1935 1936 spin_lock_init(&n->list_lock); 1937 INIT_LIST_HEAD(&n->partial); 1938#ifdef CONFIG_SLUB_DEBUG 1939 atomic_long_set(&n->nr_slabs, 0); 1940 atomic_long_set(&n->total_objects, 0); 1941 INIT_LIST_HEAD(&n->full); 1942#endif 1943} 1944 1945#ifdef CONFIG_SMP 1946/* 1947 * Per cpu array for per cpu structures. 1948 * 1949 * The per cpu array places all kmem_cache_cpu structures from one processor 1950 * close together meaning that it becomes possible that multiple per cpu 1951 * structures are contained in one cacheline. This may be particularly 1952 * beneficial for the kmalloc caches. 1953 * 1954 * A desktop system typically has around 60-80 slabs. With 100 here we are 1955 * likely able to get per cpu structures for all caches from the array defined 1956 * here. We must be able to cover all kmalloc caches during bootstrap. 1957 * 1958 * If the per cpu array is exhausted then fall back to kmalloc 1959 * of individual cachelines. No sharing is possible then. 1960 */ 1961#define NR_KMEM_CACHE_CPU 100 1962 1963static DEFINE_PER_CPU(struct kmem_cache_cpu, 1964 kmem_cache_cpu)[NR_KMEM_CACHE_CPU]; 1965 1966static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free); 1967static cpumask_t kmem_cach_cpu_free_init_once = CPU_MASK_NONE; 1968 1969static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s, 1970 int cpu, gfp_t flags) 1971{ 1972 struct kmem_cache_cpu *c = per_cpu(kmem_cache_cpu_free, cpu); 1973 1974 if (c) 1975 per_cpu(kmem_cache_cpu_free, cpu) = 1976 (void *)c->freelist; 1977 else { 1978 /* Table overflow: So allocate ourselves */ 1979 c = kmalloc_node( 1980 ALIGN(sizeof(struct kmem_cache_cpu), cache_line_size()), 1981 flags, cpu_to_node(cpu)); 1982 if (!c) 1983 return NULL; 1984 } 1985 1986 init_kmem_cache_cpu(s, c); 1987 return c; 1988} 1989 1990static void free_kmem_cache_cpu(struct kmem_cache_cpu *c, int cpu) 1991{ 1992 if (c < per_cpu(kmem_cache_cpu, cpu) || 1993 c > per_cpu(kmem_cache_cpu, cpu) + NR_KMEM_CACHE_CPU) { 1994 kfree(c); 1995 return; 1996 } 1997 c->freelist = (void *)per_cpu(kmem_cache_cpu_free, cpu); 1998 per_cpu(kmem_cache_cpu_free, cpu) = c; 1999} 2000 2001static void free_kmem_cache_cpus(struct kmem_cache *s) 2002{ 2003 int cpu; 2004 2005 for_each_online_cpu(cpu) { 2006 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); 2007 2008 if (c) { 2009 s->cpu_slab[cpu] = NULL; 2010 free_kmem_cache_cpu(c, cpu); 2011 } 2012 } 2013} 2014 2015static int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags) 2016{ 2017 int cpu; 2018 2019 for_each_online_cpu(cpu) { 2020 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); 2021 2022 if (c) 2023 continue; 2024 2025 c = alloc_kmem_cache_cpu(s, cpu, flags); 2026 if (!c) { 2027 free_kmem_cache_cpus(s); 2028 return 0; 2029 } 2030 s->cpu_slab[cpu] = c; 2031 } 2032 return 1; 2033} 2034 2035/* 2036 * Initialize the per cpu array. 2037 */ 2038static void init_alloc_cpu_cpu(int cpu) 2039{ 2040 int i; 2041 2042 if (cpu_isset(cpu, kmem_cach_cpu_free_init_once)) 2043 return; 2044 2045 for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--) 2046 free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu); 2047 2048 cpu_set(cpu, kmem_cach_cpu_free_init_once); 2049} 2050 2051static void __init init_alloc_cpu(void) 2052{ 2053 int cpu; 2054 2055 for_each_online_cpu(cpu) 2056 init_alloc_cpu_cpu(cpu); 2057 } 2058 2059#else 2060static inline void free_kmem_cache_cpus(struct kmem_cache *s) {} 2061static inline void init_alloc_cpu(void) {} 2062 2063static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags) 2064{ 2065 init_kmem_cache_cpu(s, &s->cpu_slab); 2066 return 1; 2067} 2068#endif 2069 2070#ifdef CONFIG_NUMA 2071/* 2072 * No kmalloc_node yet so do it by hand. We know that this is the first 2073 * slab on the node for this slabcache. There are no concurrent accesses 2074 * possible. 2075 * 2076 * Note that this function only works on the kmalloc_node_cache 2077 * when allocating for the kmalloc_node_cache. This is used for bootstrapping 2078 * memory on a fresh node that has no slab structures yet. 2079 */ 2080static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags, 2081 int node) 2082{ 2083 struct page *page; 2084 struct kmem_cache_node *n; 2085 unsigned long flags; 2086 2087 BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node)); 2088 2089 page = new_slab(kmalloc_caches, gfpflags, node); 2090 2091 BUG_ON(!page); 2092 if (page_to_nid(page) != node) { 2093 printk(KERN_ERR "SLUB: Unable to allocate memory from " 2094 "node %d\n", node); 2095 printk(KERN_ERR "SLUB: Allocating a useless per node structure " 2096 "in order to be able to continue\n"); 2097 } 2098 2099 n = page->freelist; 2100 BUG_ON(!n); 2101 page->freelist = get_freepointer(kmalloc_caches, n); 2102 page->inuse++; 2103 kmalloc_caches->node[node] = n; 2104#ifdef CONFIG_SLUB_DEBUG 2105 init_object(kmalloc_caches, n, 1); 2106 init_tracking(kmalloc_caches, n); 2107#endif 2108 init_kmem_cache_node(n, kmalloc_caches); 2109 inc_slabs_node(kmalloc_caches, node, page->objects); 2110 2111 /* 2112 * lockdep requires consistent irq usage for each lock 2113 * so even though there cannot be a race this early in 2114 * the boot sequence, we still disable irqs. 2115 */ 2116 local_irq_save(flags); 2117 add_partial(n, page, 0); 2118 local_irq_restore(flags); 2119 return n; 2120} 2121 2122static void free_kmem_cache_nodes(struct kmem_cache *s) 2123{ 2124 int node; 2125 2126 for_each_node_state(node, N_NORMAL_MEMORY) { 2127 struct kmem_cache_node *n = s->node[node]; 2128 if (n && n != &s->local_node) 2129 kmem_cache_free(kmalloc_caches, n); 2130 s->node[node] = NULL; 2131 } 2132} 2133 2134static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) 2135{ 2136 int node; 2137 int local_node; 2138 2139 if (slab_state >= UP) 2140 local_node = page_to_nid(virt_to_page(s)); 2141 else 2142 local_node = 0; 2143 2144 for_each_node_state(node, N_NORMAL_MEMORY) { 2145 struct kmem_cache_node *n; 2146 2147 if (local_node == node) 2148 n = &s->local_node; 2149 else { 2150 if (slab_state == DOWN) { 2151 n = early_kmem_cache_node_alloc(gfpflags, 2152 node); 2153 continue; 2154 } 2155 n = kmem_cache_alloc_node(kmalloc_caches, 2156 gfpflags, node); 2157 2158 if (!n) { 2159 free_kmem_cache_nodes(s); 2160 return 0; 2161 } 2162 2163 } 2164 s->node[node] = n; 2165 init_kmem_cache_node(n, s); 2166 } 2167 return 1; 2168} 2169#else 2170static void free_kmem_cache_nodes(struct kmem_cache *s) 2171{ 2172} 2173 2174static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) 2175{ 2176 init_kmem_cache_node(&s->local_node, s); 2177 return 1; 2178} 2179#endif 2180 2181/* 2182 * calculate_sizes() determines the order and the distribution of data within 2183 * a slab object. 2184 */ 2185static int calculate_sizes(struct kmem_cache *s, int forced_order) 2186{ 2187 unsigned long flags = s->flags; 2188 unsigned long size = s->objsize; 2189 unsigned long align = s->align; 2190 int order; 2191 2192 /* 2193 * Round up object size to the next word boundary. We can only 2194 * place the free pointer at word boundaries and this determines 2195 * the possible location of the free pointer. 2196 */ 2197 size = ALIGN(size, sizeof(void *)); 2198 2199#ifdef CONFIG_SLUB_DEBUG 2200 /* 2201 * Determine if we can poison the object itself. If the user of 2202 * the slab may touch the object after free or before allocation 2203 * then we should never poison the object itself. 2204 */ 2205 if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) && 2206 !s->ctor) 2207 s->flags |= __OBJECT_POISON; 2208 else 2209 s->flags &= ~__OBJECT_POISON; 2210 2211 2212 /* 2213 * If we are Redzoning then check if there is some space between the 2214 * end of the object and the free pointer. If not then add an 2215 * additional word to have some bytes to store Redzone information. 2216 */ 2217 if ((flags & SLAB_RED_ZONE) && size == s->objsize) 2218 size += sizeof(void *); 2219#endif 2220 2221 /* 2222 * With that we have determined the number of bytes in actual use 2223 * by the object. This is the potential offset to the free pointer. 2224 */ 2225 s->inuse = size; 2226 2227 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) || 2228 s->ctor)) { 2229 /* 2230 * Relocate free pointer after the object if it is not 2231 * permitted to overwrite the first word of the object on 2232 * kmem_cache_free. 2233 * 2234 * This is the case if we do RCU, have a constructor or 2235 * destructor or are poisoning the objects. 2236 */ 2237 s->offset = size; 2238 size += sizeof(void *); 2239 } 2240 2241#ifdef CONFIG_SLUB_DEBUG 2242 if (flags & SLAB_STORE_USER) 2243 /* 2244 * Need to store information about allocs and frees after 2245 * the object. 2246 */ 2247 size += 2 * sizeof(struct track); 2248 2249 if (flags & SLAB_RED_ZONE) 2250 /* 2251 * Add some empty padding so that we can catch 2252 * overwrites from earlier objects rather than let 2253 * tracking information or the free pointer be 2254 * corrupted if an user writes before the start 2255 * of the object. 2256 */ 2257 size += sizeof(void *); 2258#endif 2259 2260 /* 2261 * Determine the alignment based on various parameters that the 2262 * user specified and the dynamic determination of cache line size 2263 * on bootup. 2264 */ 2265 align = calculate_alignment(flags, align, s->objsize); 2266 2267 /* 2268 * SLUB stores one object immediately after another beginning from 2269 * offset 0. In order to align the objects we have to simply size 2270 * each object to conform to the alignment. 2271 */ 2272 size = ALIGN(size, align); 2273 s->size = size; 2274 if (forced_order >= 0) 2275 order = forced_order; 2276 else 2277 order = calculate_order(size); 2278 2279 if (order < 0) 2280 return 0; 2281 2282 s->allocflags = 0; 2283 if (order) 2284 s->allocflags |= __GFP_COMP; 2285 2286 if (s->flags & SLAB_CACHE_DMA) 2287 s->allocflags |= SLUB_DMA; 2288 2289 if (s->flags & SLAB_RECLAIM_ACCOUNT) 2290 s->allocflags |= __GFP_RECLAIMABLE; 2291 2292 /* 2293 * Determine the number of objects per slab 2294 */ 2295 s->oo = oo_make(order, size); 2296 s->min = oo_make(get_order(size), size); 2297 if (oo_objects(s->oo) > oo_objects(s->max)) 2298 s->max = s->oo; 2299 2300 return !!oo_objects(s->oo); 2301 2302} 2303 2304static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, 2305 const char *name, size_t size, 2306 size_t align, unsigned long flags, 2307 void (*ctor)(void *)) 2308{ 2309 memset(s, 0, kmem_size); 2310 s->name = name; 2311 s->ctor = ctor; 2312 s->objsize = size; 2313 s->align = align; 2314 s->flags = kmem_cache_flags(size, flags, name, ctor); 2315 2316 if (!calculate_sizes(s, -1)) 2317 goto error; 2318 2319 s->refcount = 1; 2320#ifdef CONFIG_NUMA 2321 s->remote_node_defrag_ratio = 1000; 2322#endif 2323 if (!init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA)) 2324 goto error; 2325 2326 if (alloc_kmem_cache_cpus(s, gfpflags & ~SLUB_DMA)) 2327 return 1; 2328 free_kmem_cache_nodes(s); 2329error: 2330 if (flags & SLAB_PANIC) 2331 panic("Cannot create slab %s size=%lu realsize=%u " 2332 "order=%u offset=%u flags=%lx\n", 2333 s->name, (unsigned long)size, s->size, oo_order(s->oo), 2334 s->offset, flags); 2335 return 0; 2336} 2337 2338/* 2339 * Check if a given pointer is valid 2340 */ 2341int kmem_ptr_validate(struct kmem_cache *s, const void *object) 2342{ 2343 struct page *page; 2344 2345 page = get_object_page(object); 2346 2347 if (!page || s != page->slab) 2348 /* No slab or wrong slab */ 2349 return 0; 2350 2351 if (!check_valid_pointer(s, page, object)) 2352 return 0; 2353 2354 /* 2355 * We could also check if the object is on the slabs freelist. 2356 * But this would be too expensive and it seems that the main 2357 * purpose of kmem_ptr_valid() is to check if the object belongs 2358 * to a certain slab. 2359 */ 2360 return 1; 2361} 2362EXPORT_SYMBOL(kmem_ptr_validate); 2363 2364/* 2365 * Determine the size of a slab object 2366 */ 2367unsigned int kmem_cache_size(struct kmem_cache *s) 2368{ 2369 return s->objsize; 2370} 2371EXPORT_SYMBOL(kmem_cache_size); 2372 2373const char *kmem_cache_name(struct kmem_cache *s) 2374{ 2375 return s->name; 2376} 2377EXPORT_SYMBOL(kmem_cache_name); 2378 2379static void list_slab_objects(struct kmem_cache *s, struct page *page, 2380 const char *text) 2381{ 2382#ifdef CONFIG_SLUB_DEBUG 2383 void *addr = page_address(page); 2384 void *p; 2385 DECLARE_BITMAP(map, page->objects); 2386 2387 bitmap_zero(map, page->objects); 2388 slab_err(s, page, "%s", text); 2389 slab_lock(page); 2390 for_each_free_object(p, s, page->freelist) 2391 set_bit(slab_index(p, s, addr), map); 2392 2393 for_each_object(p, s, addr, page->objects) { 2394 2395 if (!test_bit(slab_index(p, s, addr), map)) { 2396 printk(KERN_ERR "INFO: Object 0x%p @offset=%tu\n", 2397 p, p - addr); 2398 print_tracking(s, p); 2399 } 2400 } 2401 slab_unlock(page); 2402#endif 2403} 2404 2405/* 2406 * Attempt to free all partial slabs on a node. 2407 */ 2408static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) 2409{ 2410 unsigned long flags; 2411 struct page *page, *h; 2412 2413 spin_lock_irqsave(&n->list_lock, flags); 2414 list_for_each_entry_safe(page, h, &n->partial, lru) { 2415 if (!page->inuse) { 2416 list_del(&page->lru); 2417 discard_slab(s, page); 2418 n->nr_partial--; 2419 } else { 2420 list_slab_objects(s, page, 2421 "Objects remaining on kmem_cache_close()"); 2422 } 2423 } 2424 spin_unlock_irqrestore(&n->list_lock, flags); 2425} 2426 2427/* 2428 * Release all resources used by a slab cache. 2429 */ 2430static inline int kmem_cache_close(struct kmem_cache *s) 2431{ 2432 int node; 2433 2434 flush_all(s); 2435 2436 /* Attempt to free all objects */ 2437 free_kmem_cache_cpus(s); 2438 for_each_node_state(node, N_NORMAL_MEMORY) { 2439 struct kmem_cache_node *n = get_node(s, node); 2440 2441 free_partial(s, n); 2442 if (n->nr_partial || slabs_node(s, node)) 2443 return 1; 2444 } 2445 free_kmem_cache_nodes(s); 2446 return 0; 2447} 2448 2449/* 2450 * Close a cache and release the kmem_cache structure 2451 * (must be used for caches created using kmem_cache_create) 2452 */ 2453void kmem_cache_destroy(struct kmem_cache *s) 2454{ 2455 down_write(&slub_lock); 2456 s->refcount--; 2457 if (!s->refcount) { 2458 list_del(&s->list); 2459 up_write(&slub_lock); 2460 if (kmem_cache_close(s)) { 2461 printk(KERN_ERR "SLUB %s: %s called for cache that " 2462 "still has objects.\n", s->name, __func__); 2463 dump_stack(); 2464 } 2465 sysfs_slab_remove(s); 2466 } else 2467 up_write(&slub_lock); 2468} 2469EXPORT_SYMBOL(kmem_cache_destroy); 2470 2471/******************************************************************** 2472 * Kmalloc subsystem 2473 *******************************************************************/ 2474 2475struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned; 2476EXPORT_SYMBOL(kmalloc_caches); 2477 2478static int __init setup_slub_min_order(char *str) 2479{ 2480 get_option(&str, &slub_min_order); 2481 2482 return 1; 2483} 2484 2485__setup("slub_min_order=", setup_slub_min_order); 2486 2487static int __init setup_slub_max_order(char *str) 2488{ 2489 get_option(&str, &slub_max_order); 2490 2491 return 1; 2492} 2493 2494__setup("slub_max_order=", setup_slub_max_order); 2495 2496static int __init setup_slub_min_objects(char *str) 2497{ 2498 get_option(&str, &slub_min_objects); 2499 2500 return 1; 2501} 2502 2503__setup("slub_min_objects=", setup_slub_min_objects); 2504 2505static int __init setup_slub_nomerge(char *str) 2506{ 2507 slub_nomerge = 1; 2508 return 1; 2509} 2510 2511__setup("slub_nomerge", setup_slub_nomerge); 2512 2513static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s, 2514 const char *name, int size, gfp_t gfp_flags) 2515{ 2516 unsigned int flags = 0; 2517 2518 if (gfp_flags & SLUB_DMA) 2519 flags = SLAB_CACHE_DMA; 2520 2521 down_write(&slub_lock); 2522 if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, 2523 flags, NULL)) 2524 goto panic; 2525 2526 list_add(&s->list, &slab_caches); 2527 up_write(&slub_lock); 2528 if (sysfs_slab_add(s)) 2529 goto panic; 2530 return s; 2531 2532panic: 2533 panic("Creation of kmalloc slab %s size=%d failed.\n", name, size); 2534} 2535 2536#ifdef CONFIG_ZONE_DMA 2537static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1]; 2538 2539static void sysfs_add_func(struct work_struct *w) 2540{ 2541 struct kmem_cache *s; 2542 2543 down_write(&slub_lock); 2544 list_for_each_entry(s, &slab_caches, list) { 2545 if (s->flags & __SYSFS_ADD_DEFERRED) { 2546 s->flags &= ~__SYSFS_ADD_DEFERRED; 2547 sysfs_slab_add(s); 2548 } 2549 } 2550 up_write(&slub_lock); 2551} 2552 2553static DECLARE_WORK(sysfs_add_work, sysfs_add_func); 2554 2555static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags) 2556{ 2557 struct kmem_cache *s; 2558 char *text; 2559 size_t realsize; 2560 2561 s = kmalloc_caches_dma[index]; 2562 if (s) 2563 return s; 2564 2565 /* Dynamically create dma cache */ 2566 if (flags & __GFP_WAIT) 2567 down_write(&slub_lock); 2568 else { 2569 if (!down_write_trylock(&slub_lock)) 2570 goto out; 2571 } 2572 2573 if (kmalloc_caches_dma[index]) 2574 goto unlock_out; 2575 2576 realsize = kmalloc_caches[index].objsize; 2577 text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d", 2578 (unsigned int)realsize); 2579 s = kmalloc(kmem_size, flags & ~SLUB_DMA); 2580 2581 if (!s || !text || !kmem_cache_open(s, flags, text, 2582 realsize, ARCH_KMALLOC_MINALIGN, 2583 SLAB_CACHE_DMA|__SYSFS_ADD_DEFERRED, NULL)) { 2584 kfree(s); 2585 kfree(text); 2586 goto unlock_out; 2587 } 2588 2589 list_add(&s->list, &slab_caches); 2590 kmalloc_caches_dma[index] = s; 2591 2592 schedule_work(&sysfs_add_work); 2593 2594unlock_out: 2595 up_write(&slub_lock); 2596out: 2597 return kmalloc_caches_dma[index]; 2598} 2599#endif 2600 2601/* 2602 * Conversion table for small slabs sizes / 8 to the index in the 2603 * kmalloc array. This is necessary for slabs < 192 since we have non power 2604 * of two cache sizes there. The size of larger slabs can be determined using 2605 * fls. 2606 */ 2607static s8 size_index[24] = { 2608 3, /* 8 */ 2609 4, /* 16 */ 2610 5, /* 24 */ 2611 5, /* 32 */ 2612 6, /* 40 */ 2613 6, /* 48 */ 2614 6, /* 56 */ 2615 6, /* 64 */ 2616 1, /* 72 */ 2617 1, /* 80 */ 2618 1, /* 88 */ 2619 1, /* 96 */ 2620 7, /* 104 */ 2621 7, /* 112 */ 2622 7, /* 120 */ 2623 7, /* 128 */ 2624 2, /* 136 */ 2625 2, /* 144 */ 2626 2, /* 152 */ 2627 2, /* 160 */ 2628 2, /* 168 */ 2629 2, /* 176 */ 2630 2, /* 184 */ 2631 2 /* 192 */ 2632}; 2633 2634static struct kmem_cache *get_slab(size_t size, gfp_t flags) 2635{ 2636 int index; 2637 2638 if (size <= 192) { 2639 if (!size) 2640 return ZERO_SIZE_PTR; 2641 2642 index = size_index[(size - 1) / 8]; 2643 } else 2644 index = fls(size - 1); 2645 2646#ifdef CONFIG_ZONE_DMA 2647 if (unlikely((flags & SLUB_DMA))) 2648 return dma_kmalloc_cache(index, flags); 2649 2650#endif 2651 return &kmalloc_caches[index]; 2652} 2653 2654void *__kmalloc(size_t size, gfp_t flags) 2655{ 2656 struct kmem_cache *s; 2657 2658 if (unlikely(size > PAGE_SIZE)) 2659 return kmalloc_large(size, flags); 2660 2661 s = get_slab(size, flags); 2662 2663 if (unlikely(ZERO_OR_NULL_PTR(s))) 2664 return s; 2665 2666 return slab_alloc(s, flags, -1, __builtin_return_address(0)); 2667} 2668EXPORT_SYMBOL(__kmalloc); 2669 2670static void *kmalloc_large_node(size_t size, gfp_t flags, int node) 2671{ 2672 struct page *page = alloc_pages_node(node, flags | __GFP_COMP, 2673 get_order(size)); 2674 2675 if (page) 2676 return page_address(page); 2677 else 2678 return NULL; 2679} 2680 2681#ifdef CONFIG_NUMA 2682void *__kmalloc_node(size_t size, gfp_t flags, int node) 2683{ 2684 struct kmem_cache *s; 2685 2686 if (unlikely(size > PAGE_SIZE)) 2687 return kmalloc_large_node(size, flags, node); 2688 2689 s = get_slab(size, flags); 2690 2691 if (unlikely(ZERO_OR_NULL_PTR(s))) 2692 return s; 2693 2694 return slab_alloc(s, flags, node, __builtin_return_address(0)); 2695} 2696EXPORT_SYMBOL(__kmalloc_node); 2697#endif 2698 2699size_t ksize(const void *object) 2700{ 2701 struct page *page; 2702 struct kmem_cache *s; 2703 2704 if (unlikely(object == ZERO_SIZE_PTR)) 2705 return 0; 2706 2707 page = virt_to_head_page(object); 2708 2709 if (unlikely(!PageSlab(page))) { 2710 WARN_ON(!PageCompound(page)); 2711 return PAGE_SIZE << compound_order(page); 2712 } 2713 s = page->slab; 2714 2715#ifdef CONFIG_SLUB_DEBUG 2716 /* 2717 * Debugging requires use of the padding between object 2718 * and whatever may come after it. 2719 */ 2720 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) 2721 return s->objsize; 2722 2723#endif 2724 /* 2725 * If we have the need to store the freelist pointer 2726 * back there or track user information then we can 2727 * only use the space before that information. 2728 */ 2729 if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER)) 2730 return s->inuse; 2731 /* 2732 * Else we can use all the padding etc for the allocation 2733 */ 2734 return s->size; 2735} 2736 2737void kfree(const void *x) 2738{ 2739 struct page *page; 2740 void *object = (void *)x; 2741 2742 if (unlikely(ZERO_OR_NULL_PTR(x))) 2743 return; 2744 2745 page = virt_to_head_page(x); 2746 if (unlikely(!PageSlab(page))) { 2747 BUG_ON(!PageCompound(page)); 2748 put_page(page); 2749 return; 2750 } 2751 slab_free(page->slab, page, object, __builtin_return_address(0)); 2752} 2753EXPORT_SYMBOL(kfree); 2754 2755/* 2756 * kmem_cache_shrink removes empty slabs from the partial lists and sorts 2757 * the remaining slabs by the number of items in use. The slabs with the 2758 * most items in use come first. New allocations will then fill those up 2759 * and thus they can be removed from the partial lists. 2760 * 2761 * The slabs with the least items are placed last. This results in them 2762 * being allocated from last increasing the chance that the last objects 2763 * are freed in them. 2764 */ 2765int kmem_cache_shrink(struct kmem_cache *s) 2766{ 2767 int node; 2768 int i; 2769 struct kmem_cache_node *n; 2770 struct page *page; 2771 struct page *t; 2772 int objects = oo_objects(s->max); 2773 struct list_head *slabs_by_inuse = 2774 kmalloc(sizeof(struct list_head) * objects, GFP_KERNEL); 2775 unsigned long flags; 2776 2777 if (!slabs_by_inuse) 2778 return -ENOMEM; 2779 2780 flush_all(s); 2781 for_each_node_state(node, N_NORMAL_MEMORY) { 2782 n = get_node(s, node); 2783 2784 if (!n->nr_partial) 2785 continue; 2786 2787 for (i = 0; i < objects; i++) 2788 INIT_LIST_HEAD(slabs_by_inuse + i); 2789 2790 spin_lock_irqsave(&n->list_lock, flags); 2791 2792 /* 2793 * Build lists indexed by the items in use in each slab. 2794 * 2795 * Note that concurrent frees may occur while we hold the 2796 * list_lock. page->inuse here is the upper limit. 2797 */ 2798 list_for_each_entry_safe(page, t, &n->partial, lru) { 2799 if (!page->inuse && slab_trylock(page)) { 2800 /* 2801 * Must hold slab lock here because slab_free 2802 * may have freed the last object and be 2803 * waiting to release the slab. 2804 */ 2805 list_del(&page->lru); 2806 n->nr_partial--; 2807 slab_unlock(page); 2808 discard_slab(s, page); 2809 } else { 2810 list_move(&page->lru, 2811 slabs_by_inuse + page->inuse); 2812 } 2813 } 2814 2815 /* 2816 * Rebuild the partial list with the slabs filled up most 2817 * first and the least used slabs at the end. 2818 */ 2819 for (i = objects - 1; i >= 0; i--) 2820 list_splice(slabs_by_inuse + i, n->partial.prev); 2821 2822 spin_unlock_irqrestore(&n->list_lock, flags); 2823 } 2824 2825 kfree(slabs_by_inuse); 2826 return 0; 2827} 2828EXPORT_SYMBOL(kmem_cache_shrink); 2829 2830#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG) 2831static int slab_mem_going_offline_callback(void *arg) 2832{ 2833 struct kmem_cache *s; 2834 2835 down_read(&slub_lock); 2836 list_for_each_entry(s, &slab_caches, list) 2837 kmem_cache_shrink(s); 2838 up_read(&slub_lock); 2839 2840 return 0; 2841} 2842 2843static void slab_mem_offline_callback(void *arg) 2844{ 2845 struct kmem_cache_node *n; 2846 struct kmem_cache *s; 2847 struct memory_notify *marg = arg; 2848 int offline_node; 2849 2850 offline_node = marg->status_change_nid; 2851 2852 /* 2853 * If the node still has available memory. we need kmem_cache_node 2854 * for it yet. 2855 */ 2856 if (offline_node < 0) 2857 return; 2858 2859 down_read(&slub_lock); 2860 list_for_each_entry(s, &slab_caches, list) { 2861 n = get_node(s, offline_node); 2862 if (n) { 2863 /* 2864 * if n->nr_slabs > 0, slabs still exist on the node 2865 * that is going down. We were unable to free them, 2866 * and offline_pages() function shoudn't call this 2867 * callback. So, we must fail. 2868 */ 2869 BUG_ON(slabs_node(s, offline_node)); 2870 2871 s->node[offline_node] = NULL; 2872 kmem_cache_free(kmalloc_caches, n); 2873 } 2874 } 2875 up_read(&slub_lock); 2876} 2877 2878static int slab_mem_going_online_callback(void *arg) 2879{ 2880 struct kmem_cache_node *n; 2881 struct kmem_cache *s; 2882 struct memory_notify *marg = arg; 2883 int nid = marg->status_change_nid; 2884 int ret = 0; 2885 2886 /* 2887 * If the node's memory is already available, then kmem_cache_node is 2888 * already created. Nothing to do. 2889 */ 2890 if (nid < 0) 2891 return 0; 2892 2893 /* 2894 * We are bringing a node online. No memory is available yet. We must 2895 * allocate a kmem_cache_node structure in order to bring the node 2896 * online. 2897 */ 2898 down_read(&slub_lock); 2899 list_for_each_entry(s, &slab_caches, list) { 2900 /* 2901 * XXX: kmem_cache_alloc_node will fallback to other nodes 2902 * since memory is not yet available from the node that 2903 * is brought up. 2904 */ 2905 n = kmem_cache_alloc(kmalloc_caches, GFP_KERNEL); 2906 if (!n) { 2907 ret = -ENOMEM; 2908 goto out; 2909 } 2910 init_kmem_cache_node(n, s); 2911 s->node[nid] = n; 2912 } 2913out: 2914 up_read(&slub_lock); 2915 return ret; 2916} 2917 2918static int slab_memory_callback(struct notifier_block *self, 2919 unsigned long action, void *arg) 2920{ 2921 int ret = 0; 2922 2923 switch (action) { 2924 case MEM_GOING_ONLINE: 2925 ret = slab_mem_going_online_callback(arg); 2926 break; 2927 case MEM_GOING_OFFLINE: 2928 ret = slab_mem_going_offline_callback(arg); 2929 break; 2930 case MEM_OFFLINE: 2931 case MEM_CANCEL_ONLINE: 2932 slab_mem_offline_callback(arg); 2933 break; 2934 case MEM_ONLINE: 2935 case MEM_CANCEL_OFFLINE: 2936 break; 2937 } 2938 if (ret) 2939 ret = notifier_from_errno(ret); 2940 else 2941 ret = NOTIFY_OK; 2942 return ret; 2943} 2944 2945#endif /* CONFIG_MEMORY_HOTPLUG */ 2946 2947/******************************************************************** 2948 * Basic setup of slabs 2949 *******************************************************************/ 2950 2951void __init kmem_cache_init(void) 2952{ 2953 int i; 2954 int caches = 0; 2955 2956 init_alloc_cpu(); 2957 2958#ifdef CONFIG_NUMA 2959 /* 2960 * Must first have the slab cache available for the allocations of the 2961 * struct kmem_cache_node's. There is special bootstrap code in 2962 * kmem_cache_open for slab_state == DOWN. 2963 */ 2964 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node", 2965 sizeof(struct kmem_cache_node), GFP_KERNEL); 2966 kmalloc_caches[0].refcount = -1; 2967 caches++; 2968 2969 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); 2970#endif 2971 2972 /* Able to allocate the per node structures */ 2973 slab_state = PARTIAL; 2974 2975 /* Caches that are not of the two-to-the-power-of size */ 2976 if (KMALLOC_MIN_SIZE <= 64) { 2977 create_kmalloc_cache(&kmalloc_caches[1], 2978 "kmalloc-96", 96, GFP_KERNEL); 2979 caches++; 2980 create_kmalloc_cache(&kmalloc_caches[2], 2981 "kmalloc-192", 192, GFP_KERNEL); 2982 caches++; 2983 } 2984 2985 for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) { 2986 create_kmalloc_cache(&kmalloc_caches[i], 2987 "kmalloc", 1 << i, GFP_KERNEL); 2988 caches++; 2989 } 2990 2991 2992 /* 2993 * Patch up the size_index table if we have strange large alignment 2994 * requirements for the kmalloc array. This is only the case for 2995 * MIPS it seems. The standard arches will not generate any code here. 2996 * 2997 * Largest permitted alignment is 256 bytes due to the way we 2998 * handle the index determination for the smaller caches. 2999 * 3000 * Make sure that nothing crazy happens if someone starts tinkering 3001 * around with ARCH_KMALLOC_MINALIGN 3002 */ 3003 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 || 3004 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1))); 3005 3006 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) 3007 size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW; 3008 3009 if (KMALLOC_MIN_SIZE == 128) { 3010 /* 3011 * The 192 byte sized cache is not used if the alignment 3012 * is 128 byte. Redirect kmalloc to use the 256 byte cache 3013 * instead. 3014 */ 3015 for (i = 128 + 8; i <= 192; i += 8) 3016 size_index[(i - 1) / 8] = 8; 3017 } 3018 3019 slab_state = UP; 3020 3021 /* Provide the correct kmalloc names now that the caches are up */ 3022 for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) 3023 kmalloc_caches[i]. name = 3024 kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); 3025 3026#ifdef CONFIG_SMP 3027 register_cpu_notifier(&slab_notifier); 3028 kmem_size = offsetof(struct kmem_cache, cpu_slab) + 3029 nr_cpu_ids * sizeof(struct kmem_cache_cpu *); 3030#else 3031 kmem_size = sizeof(struct kmem_cache); 3032#endif 3033 3034 printk(KERN_INFO 3035 "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d," 3036 " CPUs=%d, Nodes=%d\n", 3037 caches, cache_line_size(), 3038 slub_min_order, slub_max_order, slub_min_objects, 3039 nr_cpu_ids, nr_node_ids); 3040} 3041 3042/* 3043 * Find a mergeable slab cache 3044 */ 3045static int slab_unmergeable(struct kmem_cache *s) 3046{ 3047 if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) 3048 return 1; 3049 3050 if (s->ctor) 3051 return 1; 3052 3053 /* 3054 * We may have set a slab to be unmergeable during bootstrap. 3055 */ 3056 if (s->refcount < 0) 3057 return 1; 3058 3059 return 0; 3060} 3061 3062static struct kmem_cache *find_mergeable(size_t size, 3063 size_t align, unsigned long flags, const char *name, 3064 void (*ctor)(void *)) 3065{ 3066 struct kmem_cache *s; 3067 3068 if (slub_nomerge || (flags & SLUB_NEVER_MERGE)) 3069 return NULL; 3070 3071 if (ctor) 3072 return NULL; 3073 3074 size = ALIGN(size, sizeof(void *)); 3075 align = calculate_alignment(flags, align, size); 3076 size = ALIGN(size, align); 3077 flags = kmem_cache_flags(size, flags, name, NULL); 3078 3079 list_for_each_entry(s, &slab_caches, list) { 3080 if (slab_unmergeable(s)) 3081 continue; 3082 3083 if (size > s->size) 3084 continue; 3085 3086 if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME)) 3087 continue; 3088 /* 3089 * Check if alignment is compatible. 3090 * Courtesy of Adrian Drzewiecki 3091 */ 3092 if ((s->size & ~(align - 1)) != s->size) 3093 continue; 3094 3095 if (s->size - size >= sizeof(void *)) 3096 continue; 3097 3098 return s; 3099 } 3100 return NULL; 3101} 3102 3103struct kmem_cache *kmem_cache_create(const char *name, size_t size, 3104 size_t align, unsigned long flags, void (*ctor)(void *)) 3105{ 3106 struct kmem_cache *s; 3107 3108 down_write(&slub_lock); 3109 s = find_mergeable(size, align, flags, name, ctor); 3110 if (s) { 3111 int cpu; 3112 3113 s->refcount++; 3114 /* 3115 * Adjust the object sizes so that we clear 3116 * the complete object on kzalloc. 3117 */ 3118 s->objsize = max(s->objsize, (int)size); 3119 3120 /* 3121 * And then we need to update the object size in the 3122 * per cpu structures 3123 */ 3124 for_each_online_cpu(cpu) 3125 get_cpu_slab(s, cpu)->objsize = s->objsize; 3126 3127 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); 3128 up_write(&slub_lock); 3129 3130 if (sysfs_slab_alias(s, name)) 3131 goto err; 3132 return s; 3133 } 3134 3135 s = kmalloc(kmem_size, GFP_KERNEL); 3136 if (s) { 3137 if (kmem_cache_open(s, GFP_KERNEL, name, 3138 size, align, flags, ctor)) { 3139 list_add(&s->list, &slab_caches); 3140 up_write(&slub_lock); 3141 if (sysfs_slab_add(s)) 3142 goto err; 3143 return s; 3144 } 3145 kfree(s); 3146 } 3147 up_write(&slub_lock); 3148 3149err: 3150 if (flags & SLAB_PANIC) 3151 panic("Cannot create slabcache %s\n", name); 3152 else 3153 s = NULL; 3154 return s; 3155} 3156EXPORT_SYMBOL(kmem_cache_create); 3157 3158#ifdef CONFIG_SMP 3159/* 3160 * Use the cpu notifier to insure that the cpu slabs are flushed when 3161 * necessary. 3162 */ 3163static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb, 3164 unsigned long action, void *hcpu) 3165{ 3166 long cpu = (long)hcpu; 3167 struct kmem_cache *s; 3168 unsigned long flags; 3169 3170 switch (action) { 3171 case CPU_UP_PREPARE: 3172 case CPU_UP_PREPARE_FROZEN: 3173 init_alloc_cpu_cpu(cpu); 3174 down_read(&slub_lock); 3175 list_for_each_entry(s, &slab_caches, list) 3176 s->cpu_slab[cpu] = alloc_kmem_cache_cpu(s, cpu, 3177 GFP_KERNEL); 3178 up_read(&slub_lock); 3179 break; 3180 3181 case CPU_UP_CANCELED: 3182 case CPU_UP_CANCELED_FROZEN: 3183 case CPU_DEAD: 3184 case CPU_DEAD_FROZEN: 3185 down_read(&slub_lock); 3186 list_for_each_entry(s, &slab_caches, list) { 3187 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); 3188 3189 local_irq_save(flags); 3190 __flush_cpu_slab(s, cpu); 3191 local_irq_restore(flags); 3192 free_kmem_cache_cpu(c, cpu); 3193 s->cpu_slab[cpu] = NULL; 3194 } 3195 up_read(&slub_lock); 3196 break; 3197 default: 3198 break; 3199 } 3200 return NOTIFY_OK; 3201} 3202 3203static struct notifier_block __cpuinitdata slab_notifier = { 3204 .notifier_call = slab_cpuup_callback 3205}; 3206 3207#endif 3208 3209void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) 3210{ 3211 struct kmem_cache *s; 3212 3213 if (unlikely(size > PAGE_SIZE)) 3214 return kmalloc_large(size, gfpflags); 3215 3216 s = get_slab(size, gfpflags); 3217 3218 if (unlikely(ZERO_OR_NULL_PTR(s))) 3219 return s; 3220 3221 return slab_alloc(s, gfpflags, -1, caller); 3222} 3223 3224void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, 3225 int node, void *caller) 3226{ 3227 struct kmem_cache *s; 3228 3229 if (unlikely(size > PAGE_SIZE)) 3230 return kmalloc_large_node(size, gfpflags, node); 3231 3232 s = get_slab(size, gfpflags); 3233 3234 if (unlikely(ZERO_OR_NULL_PTR(s))) 3235 return s; 3236 3237 return slab_alloc(s, gfpflags, node, caller); 3238} 3239 3240#ifdef CONFIG_SLUB_DEBUG 3241static unsigned long count_partial(struct kmem_cache_node *n, 3242 int (*get_count)(struct page *)) 3243{ 3244 unsigned long flags; 3245 unsigned long x = 0; 3246 struct page *page; 3247 3248 spin_lock_irqsave(&n->list_lock, flags); 3249 list_for_each_entry(page, &n->partial, lru) 3250 x += get_count(page); 3251 spin_unlock_irqrestore(&n->list_lock, flags); 3252 return x; 3253} 3254 3255static int count_inuse(struct page *page) 3256{ 3257 return page->inuse; 3258} 3259 3260static int count_total(struct page *page) 3261{ 3262 return page->objects; 3263} 3264 3265static int count_free(struct page *page) 3266{ 3267 return page->objects - page->inuse; 3268} 3269 3270static int validate_slab(struct kmem_cache *s, struct page *page, 3271 unsigned long *map) 3272{ 3273 void *p; 3274 void *addr = page_address(page); 3275 3276 if (!check_slab(s, page) || 3277 !on_freelist(s, page, NULL)) 3278 return 0; 3279 3280 /* Now we know that a valid freelist exists */ 3281 bitmap_zero(map, page->objects); 3282 3283 for_each_free_object(p, s, page->freelist) { 3284 set_bit(slab_index(p, s, addr), map); 3285 if (!check_object(s, page, p, 0)) 3286 return 0; 3287 } 3288 3289 for_each_object(p, s, addr, page->objects) 3290 if (!test_bit(slab_index(p, s, addr), map)) 3291 if (!check_object(s, page, p, 1)) 3292 return 0; 3293 return 1; 3294} 3295 3296static void validate_slab_slab(struct kmem_cache *s, struct page *page, 3297 unsigned long *map) 3298{ 3299 if (slab_trylock(page)) { 3300 validate_slab(s, page, map); 3301 slab_unlock(page); 3302 } else 3303 printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n", 3304 s->name, page); 3305 3306 if (s->flags & DEBUG_DEFAULT_FLAGS) { 3307 if (!PageSlubDebug(page)) 3308 printk(KERN_ERR "SLUB %s: SlubDebug not set " 3309 "on slab 0x%p\n", s->name, page); 3310 } else { 3311 if (PageSlubDebug(page)) 3312 printk(KERN_ERR "SLUB %s: SlubDebug set on " 3313 "slab 0x%p\n", s->name, page); 3314 } 3315} 3316 3317static int validate_slab_node(struct kmem_cache *s, 3318 struct kmem_cache_node *n, unsigned long *map) 3319{ 3320 unsigned long count = 0; 3321 struct page *page; 3322 unsigned long flags; 3323 3324 spin_lock_irqsave(&n->list_lock, flags); 3325 3326 list_for_each_entry(page, &n->partial, lru) { 3327 validate_slab_slab(s, page, map); 3328 count++; 3329 } 3330 if (count != n->nr_partial) 3331 printk(KERN_ERR "SLUB %s: %ld partial slabs counted but " 3332 "counter=%ld\n", s->name, count, n->nr_partial); 3333 3334 if (!(s->flags & SLAB_STORE_USER)) 3335 goto out; 3336 3337 list_for_each_entry(page, &n->full, lru) { 3338 validate_slab_slab(s, page, map); 3339 count++; 3340 } 3341 if (count != atomic_long_read(&n->nr_slabs)) 3342 printk(KERN_ERR "SLUB: %s %ld slabs counted but " 3343 "counter=%ld\n", s->name, count, 3344 atomic_long_read(&n->nr_slabs)); 3345 3346out: 3347 spin_unlock_irqrestore(&n->list_lock, flags); 3348 return count; 3349} 3350 3351static long validate_slab_cache(struct kmem_cache *s) 3352{ 3353 int node; 3354 unsigned long count = 0; 3355 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) * 3356 sizeof(unsigned long), GFP_KERNEL); 3357 3358 if (!map) 3359 return -ENOMEM; 3360 3361 flush_all(s); 3362 for_each_node_state(node, N_NORMAL_MEMORY) { 3363 struct kmem_cache_node *n = get_node(s, node); 3364 3365 count += validate_slab_node(s, n, map); 3366 } 3367 kfree(map); 3368 return count; 3369} 3370 3371#ifdef SLUB_RESILIENCY_TEST 3372static void resiliency_test(void) 3373{ 3374 u8 *p; 3375 3376 printk(KERN_ERR "SLUB resiliency testing\n"); 3377 printk(KERN_ERR "-----------------------\n"); 3378 printk(KERN_ERR "A. Corruption after allocation\n"); 3379 3380 p = kzalloc(16, GFP_KERNEL); 3381 p[16] = 0x12; 3382 printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer" 3383 " 0x12->0x%p\n\n", p + 16); 3384 3385 validate_slab_cache(kmalloc_caches + 4); 3386 3387 /* Hmmm... The next two are dangerous */ 3388 p = kzalloc(32, GFP_KERNEL); 3389 p[32 + sizeof(void *)] = 0x34; 3390 printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab" 3391 " 0x34 -> -0x%p\n", p); 3392 printk(KERN_ERR 3393 "If allocated object is overwritten then not detectable\n\n"); 3394 3395 validate_slab_cache(kmalloc_caches + 5); 3396 p = kzalloc(64, GFP_KERNEL); 3397 p += 64 + (get_cycles() & 0xff) * sizeof(void *); 3398 *p = 0x56; 3399 printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n", 3400 p); 3401 printk(KERN_ERR 3402 "If allocated object is overwritten then not detectable\n\n"); 3403 validate_slab_cache(kmalloc_caches + 6); 3404 3405 printk(KERN_ERR "\nB. Corruption after free\n"); 3406 p = kzalloc(128, GFP_KERNEL); 3407 kfree(p); 3408 *p = 0x78; 3409 printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p); 3410 validate_slab_cache(kmalloc_caches + 7); 3411 3412 p = kzalloc(256, GFP_KERNEL); 3413 kfree(p); 3414 p[50] = 0x9a; 3415 printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", 3416 p); 3417 validate_slab_cache(kmalloc_caches + 8); 3418 3419 p = kzalloc(512, GFP_KERNEL); 3420 kfree(p); 3421 p[512] = 0xab; 3422 printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p); 3423 validate_slab_cache(kmalloc_caches + 9); 3424} 3425#else 3426static void resiliency_test(void) {}; 3427#endif 3428 3429/* 3430 * Generate lists of code addresses where slabcache objects are allocated 3431 * and freed. 3432 */ 3433 3434struct location { 3435 unsigned long count; 3436 void *addr; 3437 long long sum_time; 3438 long min_time; 3439 long max_time; 3440 long min_pid; 3441 long max_pid; 3442 cpumask_t cpus; 3443 nodemask_t nodes; 3444}; 3445 3446struct loc_track { 3447 unsigned long max; 3448 unsigned long count; 3449 struct location *loc; 3450}; 3451 3452static void free_loc_track(struct loc_track *t) 3453{ 3454 if (t->max) 3455 free_pages((unsigned long)t->loc, 3456 get_order(sizeof(struct location) * t->max)); 3457} 3458 3459static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags) 3460{ 3461 struct location *l; 3462 int order; 3463 3464 order = get_order(sizeof(struct location) * max); 3465 3466 l = (void *)__get_free_pages(flags, order); 3467 if (!l) 3468 return 0; 3469 3470 if (t->count) { 3471 memcpy(l, t->loc, sizeof(struct location) * t->count); 3472 free_loc_track(t); 3473 } 3474 t->max = max; 3475 t->loc = l; 3476 return 1; 3477} 3478 3479static int add_location(struct loc_track *t, struct kmem_cache *s, 3480 const struct track *track) 3481{ 3482 long start, end, pos; 3483 struct location *l; 3484 void *caddr; 3485 unsigned long age = jiffies - track->when; 3486 3487 start = -1; 3488 end = t->count; 3489 3490 for ( ; ; ) { 3491 pos = start + (end - start + 1) / 2; 3492 3493 /* 3494 * There is nothing at "end". If we end up there 3495 * we need to add something to before end. 3496 */ 3497 if (pos == end) 3498 break; 3499 3500 caddr = t->loc[pos].addr; 3501 if (track->addr == caddr) { 3502 3503 l = &t->loc[pos]; 3504 l->count++; 3505 if (track->when) { 3506 l->sum_time += age; 3507 if (age < l->min_time) 3508 l->min_time = age; 3509 if (age > l->max_time) 3510 l->max_time = age; 3511 3512 if (track->pid < l->min_pid) 3513 l->min_pid = track->pid; 3514 if (track->pid > l->max_pid) 3515 l->max_pid = track->pid; 3516 3517 cpu_set(track->cpu, l->cpus); 3518 } 3519 node_set(page_to_nid(virt_to_page(track)), l->nodes); 3520 return 1; 3521 } 3522 3523 if (track->addr < caddr) 3524 end = pos; 3525 else 3526 start = pos; 3527 } 3528 3529 /* 3530 * Not found. Insert new tracking element. 3531 */ 3532 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC)) 3533 return 0; 3534 3535 l = t->loc + pos; 3536 if (pos < t->count) 3537 memmove(l + 1, l, 3538 (t->count - pos) * sizeof(struct location)); 3539 t->count++; 3540 l->count = 1; 3541 l->addr = track->addr; 3542 l->sum_time = age; 3543 l->min_time = age; 3544 l->max_time = age; 3545 l->min_pid = track->pid; 3546 l->max_pid = track->pid; 3547 cpus_clear(l->cpus); 3548 cpu_set(track->cpu, l->cpus); 3549 nodes_clear(l->nodes); 3550 node_set(page_to_nid(virt_to_page(track)), l->nodes); 3551 return 1; 3552} 3553 3554static void process_slab(struct loc_track *t, struct kmem_cache *s, 3555 struct page *page, enum track_item alloc) 3556{ 3557 void *addr = page_address(page); 3558 DECLARE_BITMAP(map, page->objects); 3559 void *p; 3560 3561 bitmap_zero(map, page->objects); 3562 for_each_free_object(p, s, page->freelist) 3563 set_bit(slab_index(p, s, addr), map); 3564 3565 for_each_object(p, s, addr, page->objects) 3566 if (!test_bit(slab_index(p, s, addr), map)) 3567 add_location(t, s, get_track(s, p, alloc)); 3568} 3569 3570static int list_locations(struct kmem_cache *s, char *buf, 3571 enum track_item alloc) 3572{ 3573 int len = 0; 3574 unsigned long i; 3575 struct loc_track t = { 0, 0, NULL }; 3576 int node; 3577 3578 if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location), 3579 GFP_TEMPORARY)) 3580 return sprintf(buf, "Out of memory\n"); 3581 3582 /* Push back cpu slabs */ 3583 flush_all(s); 3584 3585 for_each_node_state(node, N_NORMAL_MEMORY) { 3586 struct kmem_cache_node *n = get_node(s, node); 3587 unsigned long flags; 3588 struct page *page; 3589 3590 if (!atomic_long_read(&n->nr_slabs)) 3591 continue; 3592 3593 spin_lock_irqsave(&n->list_lock, flags); 3594 list_for_each_entry(page, &n->partial, lru) 3595 process_slab(&t, s, page, alloc); 3596 list_for_each_entry(page, &n->full, lru) 3597 process_slab(&t, s, page, alloc); 3598 spin_unlock_irqrestore(&n->list_lock, flags); 3599 } 3600 3601 for (i = 0; i < t.count; i++) { 3602 struct location *l = &t.loc[i]; 3603 3604 if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100) 3605 break; 3606 len += sprintf(buf + len, "%7ld ", l->count); 3607 3608 if (l->addr) 3609 len += sprint_symbol(buf + len, (unsigned long)l->addr); 3610 else 3611 len += sprintf(buf + len, "<not-available>"); 3612 3613 if (l->sum_time != l->min_time) { 3614 len += sprintf(buf + len, " age=%ld/%ld/%ld", 3615 l->min_time, 3616 (long)div_u64(l->sum_time, l->count), 3617 l->max_time); 3618 } else 3619 len += sprintf(buf + len, " age=%ld", 3620 l->min_time); 3621 3622 if (l->min_pid != l->max_pid) 3623 len += sprintf(buf + len, " pid=%ld-%ld", 3624 l->min_pid, l->max_pid); 3625 else 3626 len += sprintf(buf + len, " pid=%ld", 3627 l->min_pid); 3628 3629 if (num_online_cpus() > 1 && !cpus_empty(l->cpus) && 3630 len < PAGE_SIZE - 60) { 3631 len += sprintf(buf + len, " cpus="); 3632 len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50, 3633 l->cpus); 3634 } 3635 3636 if (num_online_nodes() > 1 && !nodes_empty(l->nodes) && 3637 len < PAGE_SIZE - 60) { 3638 len += sprintf(buf + len, " nodes="); 3639 len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50, 3640 l->nodes); 3641 } 3642 3643 len += sprintf(buf + len, "\n"); 3644 } 3645 3646 free_loc_track(&t); 3647 if (!t.count) 3648 len += sprintf(buf, "No data\n"); 3649 return len; 3650} 3651 3652enum slab_stat_type { 3653 SL_ALL, /* All slabs */ 3654 SL_PARTIAL, /* Only partially allocated slabs */ 3655 SL_CPU, /* Only slabs used for cpu caches */ 3656 SL_OBJECTS, /* Determine allocated objects not slabs */ 3657 SL_TOTAL /* Determine object capacity not slabs */ 3658}; 3659 3660#define SO_ALL (1 << SL_ALL) 3661#define SO_PARTIAL (1 << SL_PARTIAL) 3662#define SO_CPU (1 << SL_CPU) 3663#define SO_OBJECTS (1 << SL_OBJECTS) 3664#define SO_TOTAL (1 << SL_TOTAL) 3665 3666static ssize_t show_slab_objects(struct kmem_cache *s, 3667 char *buf, unsigned long flags) 3668{ 3669 unsigned long total = 0; 3670 int node; 3671 int x; 3672 unsigned long *nodes; 3673 unsigned long *per_cpu; 3674 3675 nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL); 3676 if (!nodes) 3677 return -ENOMEM; 3678 per_cpu = nodes + nr_node_ids; 3679 3680 if (flags & SO_CPU) { 3681 int cpu; 3682 3683 for_each_possible_cpu(cpu) { 3684 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); 3685 3686 if (!c || c->node < 0) 3687 continue; 3688 3689 if (c->page) { 3690 if (flags & SO_TOTAL) 3691 x = c->page->objects; 3692 else if (flags & SO_OBJECTS) 3693 x = c->page->inuse; 3694 else 3695 x = 1; 3696 3697 total += x; 3698 nodes[c->node] += x; 3699 } 3700 per_cpu[c->node]++; 3701 } 3702 } 3703 3704 if (flags & SO_ALL) { 3705 for_each_node_state(node, N_NORMAL_MEMORY) { 3706 struct kmem_cache_node *n = get_node(s, node); 3707 3708 if (flags & SO_TOTAL) 3709 x = atomic_long_read(&n->total_objects); 3710 else if (flags & SO_OBJECTS) 3711 x = atomic_long_read(&n->total_objects) - 3712 count_partial(n, count_free); 3713 3714 else 3715 x = atomic_long_read(&n->nr_slabs); 3716 total += x; 3717 nodes[node] += x; 3718 } 3719 3720 } else if (flags & SO_PARTIAL) { 3721 for_each_node_state(node, N_NORMAL_MEMORY) { 3722 struct kmem_cache_node *n = get_node(s, node); 3723 3724 if (flags & SO_TOTAL) 3725 x = count_partial(n, count_total); 3726 else if (flags & SO_OBJECTS) 3727 x = count_partial(n, count_inuse); 3728 else 3729 x = n->nr_partial; 3730 total += x; 3731 nodes[node] += x; 3732 } 3733 } 3734 x = sprintf(buf, "%lu", total); 3735#ifdef CONFIG_NUMA 3736 for_each_node_state(node, N_NORMAL_MEMORY) 3737 if (nodes[node]) 3738 x += sprintf(buf + x, " N%d=%lu", 3739 node, nodes[node]); 3740#endif 3741 kfree(nodes); 3742 return x + sprintf(buf + x, "\n"); 3743} 3744 3745static int any_slab_objects(struct kmem_cache *s) 3746{ 3747 int node; 3748 3749 for_each_online_node(node) { 3750 struct kmem_cache_node *n = get_node(s, node); 3751 3752 if (!n) 3753 continue; 3754 3755 if (atomic_long_read(&n->total_objects)) 3756 return 1; 3757 } 3758 return 0; 3759} 3760 3761#define to_slab_attr(n) container_of(n, struct slab_attribute, attr) 3762#define to_slab(n) container_of(n, struct kmem_cache, kobj); 3763 3764struct slab_attribute { 3765 struct attribute attr; 3766 ssize_t (*show)(struct kmem_cache *s, char *buf); 3767 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count); 3768}; 3769 3770#define SLAB_ATTR_RO(_name) \ 3771 static struct slab_attribute _name##_attr = __ATTR_RO(_name) 3772 3773#define SLAB_ATTR(_name) \ 3774 static struct slab_attribute _name##_attr = \ 3775 __ATTR(_name, 0644, _name##_show, _name##_store) 3776 3777static ssize_t slab_size_show(struct kmem_cache *s, char *buf) 3778{ 3779 return sprintf(buf, "%d\n", s->size); 3780} 3781SLAB_ATTR_RO(slab_size); 3782 3783static ssize_t align_show(struct kmem_cache *s, char *buf) 3784{ 3785 return sprintf(buf, "%d\n", s->align); 3786} 3787SLAB_ATTR_RO(align); 3788 3789static ssize_t object_size_show(struct kmem_cache *s, char *buf) 3790{ 3791 return sprintf(buf, "%d\n", s->objsize); 3792} 3793SLAB_ATTR_RO(object_size); 3794 3795static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf) 3796{ 3797 return sprintf(buf, "%d\n", oo_objects(s->oo)); 3798} 3799SLAB_ATTR_RO(objs_per_slab); 3800 3801static ssize_t order_store(struct kmem_cache *s, 3802 const char *buf, size_t length) 3803{ 3804 unsigned long order; 3805 int err; 3806 3807 err = strict_strtoul(buf, 10, &order); 3808 if (err) 3809 return err; 3810 3811 if (order > slub_max_order || order < slub_min_order) 3812 return -EINVAL; 3813 3814 calculate_sizes(s, order); 3815 return length; 3816} 3817 3818static ssize_t order_show(struct kmem_cache *s, char *buf) 3819{ 3820 return sprintf(buf, "%d\n", oo_order(s->oo)); 3821} 3822SLAB_ATTR(order); 3823 3824static ssize_t ctor_show(struct kmem_cache *s, char *buf) 3825{ 3826 if (s->ctor) { 3827 int n = sprint_symbol(buf, (unsigned long)s->ctor); 3828 3829 return n + sprintf(buf + n, "\n"); 3830 } 3831 return 0; 3832} 3833SLAB_ATTR_RO(ctor); 3834 3835static ssize_t aliases_show(struct kmem_cache *s, char *buf) 3836{ 3837 return sprintf(buf, "%d\n", s->refcount - 1); 3838} 3839SLAB_ATTR_RO(aliases); 3840 3841static ssize_t slabs_show(struct kmem_cache *s, char *buf) 3842{ 3843 return show_slab_objects(s, buf, SO_ALL); 3844} 3845SLAB_ATTR_RO(slabs); 3846 3847static ssize_t partial_show(struct kmem_cache *s, char *buf) 3848{ 3849 return show_slab_objects(s, buf, SO_PARTIAL); 3850} 3851SLAB_ATTR_RO(partial); 3852 3853static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf) 3854{ 3855 return show_slab_objects(s, buf, SO_CPU); 3856} 3857SLAB_ATTR_RO(cpu_slabs); 3858 3859static ssize_t objects_show(struct kmem_cache *s, char *buf) 3860{ 3861 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS); 3862} 3863SLAB_ATTR_RO(objects); 3864 3865static ssize_t objects_partial_show(struct kmem_cache *s, char *buf) 3866{ 3867 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS); 3868} 3869SLAB_ATTR_RO(objects_partial); 3870 3871static ssize_t total_objects_show(struct kmem_cache *s, char *buf) 3872{ 3873 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL); 3874} 3875SLAB_ATTR_RO(total_objects); 3876 3877static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf) 3878{ 3879 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE)); 3880} 3881 3882static ssize_t sanity_checks_store(struct kmem_cache *s, 3883 const char *buf, size_t length) 3884{ 3885 s->flags &= ~SLAB_DEBUG_FREE; 3886 if (buf[0] == '1') 3887 s->flags |= SLAB_DEBUG_FREE; 3888 return length; 3889} 3890SLAB_ATTR(sanity_checks); 3891 3892static ssize_t trace_show(struct kmem_cache *s, char *buf) 3893{ 3894 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE)); 3895} 3896 3897static ssize_t trace_store(struct kmem_cache *s, const char *buf, 3898 size_t length) 3899{ 3900 s->flags &= ~SLAB_TRACE; 3901 if (buf[0] == '1') 3902 s->flags |= SLAB_TRACE; 3903 return length; 3904} 3905SLAB_ATTR(trace); 3906 3907static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) 3908{ 3909 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); 3910} 3911 3912static ssize_t reclaim_account_store(struct kmem_cache *s, 3913 const char *buf, size_t length) 3914{ 3915 s->flags &= ~SLAB_RECLAIM_ACCOUNT; 3916 if (buf[0] == '1') 3917 s->flags |= SLAB_RECLAIM_ACCOUNT; 3918 return length; 3919} 3920SLAB_ATTR(reclaim_account); 3921 3922static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) 3923{ 3924 return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); 3925} 3926SLAB_ATTR_RO(hwcache_align); 3927 3928#ifdef CONFIG_ZONE_DMA 3929static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) 3930{ 3931 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA)); 3932} 3933SLAB_ATTR_RO(cache_dma); 3934#endif 3935 3936static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) 3937{ 3938 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU)); 3939} 3940SLAB_ATTR_RO(destroy_by_rcu); 3941 3942static ssize_t red_zone_show(struct kmem_cache *s, char *buf) 3943{ 3944 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE)); 3945} 3946 3947static ssize_t red_zone_store(struct kmem_cache *s, 3948 const char *buf, size_t length) 3949{ 3950 if (any_slab_objects(s)) 3951 return -EBUSY; 3952 3953 s->flags &= ~SLAB_RED_ZONE; 3954 if (buf[0] == '1') 3955 s->flags |= SLAB_RED_ZONE; 3956 calculate_sizes(s, -1); 3957 return length; 3958} 3959SLAB_ATTR(red_zone); 3960 3961static ssize_t poison_show(struct kmem_cache *s, char *buf) 3962{ 3963 return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON)); 3964} 3965 3966static ssize_t poison_store(struct kmem_cache *s, 3967 const char *buf, size_t length) 3968{ 3969 if (any_slab_objects(s)) 3970 return -EBUSY; 3971 3972 s->flags &= ~SLAB_POISON; 3973 if (buf[0] == '1') 3974 s->flags |= SLAB_POISON; 3975 calculate_sizes(s, -1); 3976 return length; 3977} 3978SLAB_ATTR(poison); 3979 3980static ssize_t store_user_show(struct kmem_cache *s, char *buf) 3981{ 3982 return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER)); 3983} 3984 3985static ssize_t store_user_store(struct kmem_cache *s, 3986 const char *buf, size_t length) 3987{ 3988 if (any_slab_objects(s)) 3989 return -EBUSY; 3990 3991 s->flags &= ~SLAB_STORE_USER; 3992 if (buf[0] == '1') 3993 s->flags |= SLAB_STORE_USER; 3994 calculate_sizes(s, -1); 3995 return length; 3996} 3997SLAB_ATTR(store_user); 3998 3999static ssize_t validate_show(struct kmem_cache *s, char *buf) 4000{ 4001 return 0; 4002} 4003 4004static ssize_t validate_store(struct kmem_cache *s, 4005 const char *buf, size_t length) 4006{ 4007 int ret = -EINVAL; 4008 4009 if (buf[0] == '1') { 4010 ret = validate_slab_cache(s); 4011 if (ret >= 0) 4012 ret = length; 4013 } 4014 return ret; 4015} 4016SLAB_ATTR(validate); 4017 4018static ssize_t shrink_show(struct kmem_cache *s, char *buf) 4019{ 4020 return 0; 4021} 4022 4023static ssize_t shrink_store(struct kmem_cache *s, 4024 const char *buf, size_t length) 4025{ 4026 if (buf[0] == '1') { 4027 int rc = kmem_cache_shrink(s); 4028 4029 if (rc) 4030 return rc; 4031 } else 4032 return -EINVAL; 4033 return length; 4034} 4035SLAB_ATTR(shrink); 4036 4037static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf) 4038{ 4039 if (!(s->flags & SLAB_STORE_USER)) 4040 return -ENOSYS; 4041 return list_locations(s, buf, TRACK_ALLOC); 4042} 4043SLAB_ATTR_RO(alloc_calls); 4044 4045static ssize_t free_calls_show(struct kmem_cache *s, char *buf) 4046{ 4047 if (!(s->flags & SLAB_STORE_USER)) 4048 return -ENOSYS; 4049 return list_locations(s, buf, TRACK_FREE); 4050} 4051SLAB_ATTR_RO(free_calls); 4052 4053#ifdef CONFIG_NUMA 4054static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) 4055{ 4056 return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10); 4057} 4058 4059static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s, 4060 const char *buf, size_t length) 4061{ 4062 unsigned long ratio; 4063 int err; 4064 4065 err = strict_strtoul(buf, 10, &ratio); 4066 if (err) 4067 return err; 4068 4069 if (ratio <= 100) 4070 s->remote_node_defrag_ratio = ratio * 10; 4071 4072 return length; 4073} 4074SLAB_ATTR(remote_node_defrag_ratio); 4075#endif 4076 4077#ifdef CONFIG_SLUB_STATS 4078static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) 4079{ 4080 unsigned long sum = 0; 4081 int cpu; 4082 int len; 4083 int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL); 4084 4085 if (!data) 4086 return -ENOMEM; 4087 4088 for_each_online_cpu(cpu) { 4089 unsigned x = get_cpu_slab(s, cpu)->stat[si]; 4090 4091 data[cpu] = x; 4092 sum += x; 4093 } 4094 4095 len = sprintf(buf, "%lu", sum); 4096 4097#ifdef CONFIG_SMP 4098 for_each_online_cpu(cpu) { 4099 if (data[cpu] && len < PAGE_SIZE - 20) 4100 len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]); 4101 } 4102#endif 4103 kfree(data); 4104 return len + sprintf(buf + len, "\n"); 4105} 4106 4107#define STAT_ATTR(si, text) \ 4108static ssize_t text##_show(struct kmem_cache *s, char *buf) \ 4109{ \ 4110 return show_stat(s, buf, si); \ 4111} \ 4112SLAB_ATTR_RO(text); \ 4113 4114STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath); 4115STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath); 4116STAT_ATTR(FREE_FASTPATH, free_fastpath); 4117STAT_ATTR(FREE_SLOWPATH, free_slowpath); 4118STAT_ATTR(FREE_FROZEN, free_frozen); 4119STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial); 4120STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial); 4121STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial); 4122STAT_ATTR(ALLOC_SLAB, alloc_slab); 4123STAT_ATTR(ALLOC_REFILL, alloc_refill); 4124STAT_ATTR(FREE_SLAB, free_slab); 4125STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush); 4126STAT_ATTR(DEACTIVATE_FULL, deactivate_full); 4127STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty); 4128STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head); 4129STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail); 4130STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees); 4131STAT_ATTR(ORDER_FALLBACK, order_fallback); 4132#endif 4133 4134static struct attribute *slab_attrs[] = { 4135 &slab_size_attr.attr, 4136 &object_size_attr.attr, 4137 &objs_per_slab_attr.attr, 4138 &order_attr.attr, 4139 &objects_attr.attr, 4140 &objects_partial_attr.attr, 4141 &total_objects_attr.attr, 4142 &slabs_attr.attr, 4143 &partial_attr.attr, 4144 &cpu_slabs_attr.attr, 4145 &ctor_attr.attr, 4146 &aliases_attr.attr, 4147 &align_attr.attr, 4148 &sanity_checks_attr.attr, 4149 &trace_attr.attr, 4150 &hwcache_align_attr.attr, 4151 &reclaim_account_attr.attr, 4152 &destroy_by_rcu_attr.attr, 4153 &red_zone_attr.attr, 4154 &poison_attr.attr, 4155 &store_user_attr.attr, 4156 &validate_attr.attr, 4157 &shrink_attr.attr, 4158 &alloc_calls_attr.attr, 4159 &free_calls_attr.attr, 4160#ifdef CONFIG_ZONE_DMA 4161 &cache_dma_attr.attr, 4162#endif 4163#ifdef CONFIG_NUMA 4164 &remote_node_defrag_ratio_attr.attr, 4165#endif 4166#ifdef CONFIG_SLUB_STATS 4167 &alloc_fastpath_attr.attr, 4168 &alloc_slowpath_attr.attr, 4169 &free_fastpath_attr.attr, 4170 &free_slowpath_attr.attr, 4171 &free_frozen_attr.attr, 4172 &free_add_partial_attr.attr, 4173 &free_remove_partial_attr.attr, 4174 &alloc_from_partial_attr.attr, 4175 &alloc_slab_attr.attr, 4176 &alloc_refill_attr.attr, 4177 &free_slab_attr.attr, 4178 &cpuslab_flush_attr.attr, 4179 &deactivate_full_attr.attr, 4180 &deactivate_empty_attr.attr, 4181 &deactivate_to_head_attr.attr, 4182 &deactivate_to_tail_attr.attr, 4183 &deactivate_remote_frees_attr.attr, 4184 &order_fallback_attr.attr, 4185#endif 4186 NULL 4187}; 4188 4189static struct attribute_group slab_attr_group = { 4190 .attrs = slab_attrs, 4191}; 4192 4193static ssize_t slab_attr_show(struct kobject *kobj, 4194 struct attribute *attr, 4195 char *buf) 4196{ 4197 struct slab_attribute *attribute; 4198 struct kmem_cache *s; 4199 int err; 4200 4201 attribute = to_slab_attr(attr); 4202 s = to_slab(kobj); 4203 4204 if (!attribute->show) 4205 return -EIO; 4206 4207 err = attribute->show(s, buf); 4208 4209 return err; 4210} 4211 4212static ssize_t slab_attr_store(struct kobject *kobj, 4213 struct attribute *attr, 4214 const char *buf, size_t len) 4215{ 4216 struct slab_attribute *attribute; 4217 struct kmem_cache *s; 4218 int err; 4219 4220 attribute = to_slab_attr(attr); 4221 s = to_slab(kobj); 4222 4223 if (!attribute->store) 4224 return -EIO; 4225 4226 err = attribute->store(s, buf, len); 4227 4228 return err; 4229} 4230 4231static void kmem_cache_release(struct kobject *kobj) 4232{ 4233 struct kmem_cache *s = to_slab(kobj); 4234 4235 kfree(s); 4236} 4237 4238static struct sysfs_ops slab_sysfs_ops = { 4239 .show = slab_attr_show, 4240 .store = slab_attr_store, 4241}; 4242 4243static struct kobj_type slab_ktype = { 4244 .sysfs_ops = &slab_sysfs_ops, 4245 .release = kmem_cache_release 4246}; 4247 4248static int uevent_filter(struct kset *kset, struct kobject *kobj) 4249{ 4250 struct kobj_type *ktype = get_ktype(kobj); 4251 4252 if (ktype == &slab_ktype) 4253 return 1; 4254 return 0; 4255} 4256 4257static struct kset_uevent_ops slab_uevent_ops = { 4258 .filter = uevent_filter, 4259}; 4260 4261static struct kset *slab_kset; 4262 4263#define ID_STR_LENGTH 64 4264 4265/* Create a unique string id for a slab cache: 4266 * 4267 * Format :[flags-]size 4268 */ 4269static char *create_unique_id(struct kmem_cache *s) 4270{ 4271 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL); 4272 char *p = name; 4273 4274 BUG_ON(!name); 4275 4276 *p++ = ':'; 4277 /* 4278 * First flags affecting slabcache operations. We will only 4279 * get here for aliasable slabs so we do not need to support 4280 * too many flags. The flags here must cover all flags that 4281 * are matched during merging to guarantee that the id is 4282 * unique. 4283 */ 4284 if (s->flags & SLAB_CACHE_DMA) 4285 *p++ = 'd'; 4286 if (s->flags & SLAB_RECLAIM_ACCOUNT) 4287 *p++ = 'a'; 4288 if (s->flags & SLAB_DEBUG_FREE) 4289 *p++ = 'F'; 4290 if (p != name + 1) 4291 *p++ = '-'; 4292 p += sprintf(p, "%07d", s->size); 4293 BUG_ON(p > name + ID_STR_LENGTH - 1); 4294 return name; 4295} 4296 4297static int sysfs_slab_add(struct kmem_cache *s) 4298{ 4299 int err; 4300 const char *name; 4301 int unmergeable; 4302 4303 if (slab_state < SYSFS) 4304 /* Defer until later */ 4305 return 0; 4306 4307 unmergeable = slab_unmergeable(s); 4308 if (unmergeable) { 4309 /* 4310 * Slabcache can never be merged so we can use the name proper. 4311 * This is typically the case for debug situations. In that 4312 * case we can catch duplicate names easily. 4313 */ 4314 sysfs_remove_link(&slab_kset->kobj, s->name); 4315 name = s->name; 4316 } else { 4317 /* 4318 * Create a unique name for the slab as a target 4319 * for the symlinks. 4320 */ 4321 name = create_unique_id(s); 4322 } 4323 4324 s->kobj.kset = slab_kset; 4325 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name); 4326 if (err) { 4327 kobject_put(&s->kobj); 4328 return err; 4329 } 4330 4331 err = sysfs_create_group(&s->kobj, &slab_attr_group); 4332 if (err) 4333 return err; 4334 kobject_uevent(&s->kobj, KOBJ_ADD); 4335 if (!unmergeable) { 4336 /* Setup first alias */ 4337 sysfs_slab_alias(s, s->name); 4338 kfree(name); 4339 } 4340 return 0; 4341} 4342 4343static void sysfs_slab_remove(struct kmem_cache *s) 4344{ 4345 kobject_uevent(&s->kobj, KOBJ_REMOVE); 4346 kobject_del(&s->kobj); 4347 kobject_put(&s->kobj); 4348} 4349 4350/* 4351 * Need to buffer aliases during bootup until sysfs becomes 4352 * available lest we loose that information. 4353 */ 4354struct saved_alias { 4355 struct kmem_cache *s; 4356 const char *name; 4357 struct saved_alias *next; 4358}; 4359 4360static struct saved_alias *alias_list; 4361 4362static int sysfs_slab_alias(struct kmem_cache *s, const char *name) 4363{ 4364 struct saved_alias *al; 4365 4366 if (slab_state == SYSFS) { 4367 /* 4368 * If we have a leftover link then remove it. 4369 */ 4370 sysfs_remove_link(&slab_kset->kobj, name); 4371 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name); 4372 } 4373 4374 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL); 4375 if (!al) 4376 return -ENOMEM; 4377 4378 al->s = s; 4379 al->name = name; 4380 al->next = alias_list; 4381 alias_list = al; 4382 return 0; 4383} 4384 4385static int __init slab_sysfs_init(void) 4386{ 4387 struct kmem_cache *s; 4388 int err; 4389 4390 slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj); 4391 if (!slab_kset) { 4392 printk(KERN_ERR "Cannot register slab subsystem.\n"); 4393 return -ENOSYS; 4394 } 4395 4396 slab_state = SYSFS; 4397 4398 list_for_each_entry(s, &slab_caches, list) { 4399 err = sysfs_slab_add(s); 4400 if (err) 4401 printk(KERN_ERR "SLUB: Unable to add boot slab %s" 4402 " to sysfs\n", s->name); 4403 } 4404 4405 while (alias_list) { 4406 struct saved_alias *al = alias_list; 4407 4408 alias_list = alias_list->next; 4409 err = sysfs_slab_alias(al->s, al->name); 4410 if (err) 4411 printk(KERN_ERR "SLUB: Unable to add boot slab alias" 4412 " %s to sysfs\n", s->name); 4413 kfree(al); 4414 } 4415 4416 resiliency_test(); 4417 return 0; 4418} 4419 4420__initcall(slab_sysfs_init); 4421#endif 4422 4423/* 4424 * The /proc/slabinfo ABI 4425 */ 4426#ifdef CONFIG_SLABINFO 4427static void print_slabinfo_header(struct seq_file *m) 4428{ 4429 seq_puts(m, "slabinfo - version: 2.1\n"); 4430 seq_puts(m, "# name <active_objs> <num_objs> <objsize> " 4431 "<objperslab> <pagesperslab>"); 4432 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>"); 4433 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); 4434 seq_putc(m, '\n'); 4435} 4436 4437static void *s_start(struct seq_file *m, loff_t *pos) 4438{ 4439 loff_t n = *pos; 4440 4441 down_read(&slub_lock); 4442 if (!n) 4443 print_slabinfo_header(m); 4444 4445 return seq_list_start(&slab_caches, *pos); 4446} 4447 4448static void *s_next(struct seq_file *m, void *p, loff_t *pos) 4449{ 4450 return seq_list_next(p, &slab_caches, pos); 4451} 4452 4453static void s_stop(struct seq_file *m, void *p) 4454{ 4455 up_read(&slub_lock); 4456} 4457 4458static int s_show(struct seq_file *m, void *p) 4459{ 4460 unsigned long nr_partials = 0; 4461 unsigned long nr_slabs = 0; 4462 unsigned long nr_inuse = 0; 4463 unsigned long nr_objs = 0; 4464 unsigned long nr_free = 0; 4465 struct kmem_cache *s; 4466 int node; 4467 4468 s = list_entry(p, struct kmem_cache, list); 4469 4470 for_each_online_node(node) { 4471 struct kmem_cache_node *n = get_node(s, node); 4472 4473 if (!n) 4474 continue; 4475 4476 nr_partials += n->nr_partial; 4477 nr_slabs += atomic_long_read(&n->nr_slabs); 4478 nr_objs += atomic_long_read(&n->total_objects); 4479 nr_free += count_partial(n, count_free); 4480 } 4481 4482 nr_inuse = nr_objs - nr_free; 4483 4484 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse, 4485 nr_objs, s->size, oo_objects(s->oo), 4486 (1 << oo_order(s->oo))); 4487 seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0); 4488 seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs, 4489 0UL); 4490 seq_putc(m, '\n'); 4491 return 0; 4492} 4493 4494static const struct seq_operations slabinfo_op = { 4495 .start = s_start, 4496 .next = s_next, 4497 .stop = s_stop, 4498 .show = s_show, 4499}; 4500 4501static int slabinfo_open(struct inode *inode, struct file *file) 4502{ 4503 return seq_open(file, &slabinfo_op); 4504} 4505 4506static const struct file_operations proc_slabinfo_operations = { 4507 .open = slabinfo_open, 4508 .read = seq_read, 4509 .llseek = seq_lseek, 4510 .release = seq_release, 4511}; 4512 4513static int __init slab_proc_init(void) 4514{ 4515 proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations); 4516 return 0; 4517} 4518module_init(slab_proc_init); 4519#endif /* CONFIG_SLABINFO */ 4520