slub.c revision 5595cffc8248e4672c5803547445e85e4053c8fc
1/* 2 * SLUB: A slab allocator that limits cache line use instead of queuing 3 * objects in per cpu and per node lists. 4 * 5 * The allocator synchronizes using per slab locks and only 6 * uses a centralized lock to manage a pool of partial slabs. 7 * 8 * (C) 2007 SGI, Christoph Lameter 9 */ 10 11#include <linux/mm.h> 12#include <linux/module.h> 13#include <linux/bit_spinlock.h> 14#include <linux/interrupt.h> 15#include <linux/bitops.h> 16#include <linux/slab.h> 17#include <linux/seq_file.h> 18#include <linux/cpu.h> 19#include <linux/cpuset.h> 20#include <linux/mempolicy.h> 21#include <linux/ctype.h> 22#include <linux/debugobjects.h> 23#include <linux/kallsyms.h> 24#include <linux/memory.h> 25#include <linux/math64.h> 26 27/* 28 * Lock order: 29 * 1. slab_lock(page) 30 * 2. slab->list_lock 31 * 32 * The slab_lock protects operations on the object of a particular 33 * slab and its metadata in the page struct. If the slab lock 34 * has been taken then no allocations nor frees can be performed 35 * on the objects in the slab nor can the slab be added or removed 36 * from the partial or full lists since this would mean modifying 37 * the page_struct of the slab. 38 * 39 * The list_lock protects the partial and full list on each node and 40 * the partial slab counter. If taken then no new slabs may be added or 41 * removed from the lists nor make the number of partial slabs be modified. 42 * (Note that the total number of slabs is an atomic value that may be 43 * modified without taking the list lock). 44 * 45 * The list_lock is a centralized lock and thus we avoid taking it as 46 * much as possible. As long as SLUB does not have to handle partial 47 * slabs, operations can continue without any centralized lock. F.e. 48 * allocating a long series of objects that fill up slabs does not require 49 * the list lock. 50 * 51 * The lock order is sometimes inverted when we are trying to get a slab 52 * off a list. We take the list_lock and then look for a page on the list 53 * to use. While we do that objects in the slabs may be freed. We can 54 * only operate on the slab if we have also taken the slab_lock. So we use 55 * a slab_trylock() on the slab. If trylock was successful then no frees 56 * can occur anymore and we can use the slab for allocations etc. If the 57 * slab_trylock() does not succeed then frees are in progress in the slab and 58 * we must stay away from it for a while since we may cause a bouncing 59 * cacheline if we try to acquire the lock. So go onto the next slab. 60 * If all pages are busy then we may allocate a new slab instead of reusing 61 * a partial slab. A new slab has noone operating on it and thus there is 62 * no danger of cacheline contention. 63 * 64 * Interrupts are disabled during allocation and deallocation in order to 65 * make the slab allocator safe to use in the context of an irq. In addition 66 * interrupts are disabled to ensure that the processor does not change 67 * while handling per_cpu slabs, due to kernel preemption. 68 * 69 * SLUB assigns one slab for allocation to each processor. 70 * Allocations only occur from these slabs called cpu slabs. 71 * 72 * Slabs with free elements are kept on a partial list and during regular 73 * operations no list for full slabs is used. If an object in a full slab is 74 * freed then the slab will show up again on the partial lists. 75 * We track full slabs for debugging purposes though because otherwise we 76 * cannot scan all objects. 77 * 78 * Slabs are freed when they become empty. Teardown and setup is 79 * minimal so we rely on the page allocators per cpu caches for 80 * fast frees and allocs. 81 * 82 * Overloading of page flags that are otherwise used for LRU management. 83 * 84 * PageActive The slab is frozen and exempt from list processing. 85 * This means that the slab is dedicated to a purpose 86 * such as satisfying allocations for a specific 87 * processor. Objects may be freed in the slab while 88 * it is frozen but slab_free will then skip the usual 89 * list operations. It is up to the processor holding 90 * the slab to integrate the slab into the slab lists 91 * when the slab is no longer needed. 92 * 93 * One use of this flag is to mark slabs that are 94 * used for allocations. Then such a slab becomes a cpu 95 * slab. The cpu slab may be equipped with an additional 96 * freelist that allows lockless access to 97 * free objects in addition to the regular freelist 98 * that requires the slab lock. 99 * 100 * PageError Slab requires special handling due to debug 101 * options set. This moves slab handling out of 102 * the fast path and disables lockless freelists. 103 */ 104 105#ifdef CONFIG_SLUB_DEBUG 106#define SLABDEBUG 1 107#else 108#define SLABDEBUG 0 109#endif 110 111/* 112 * Issues still to be resolved: 113 * 114 * - Support PAGE_ALLOC_DEBUG. Should be easy to do. 115 * 116 * - Variable sizing of the per node arrays 117 */ 118 119/* Enable to test recovery from slab corruption on boot */ 120#undef SLUB_RESILIENCY_TEST 121 122/* 123 * Mininum number of partial slabs. These will be left on the partial 124 * lists even if they are empty. kmem_cache_shrink may reclaim them. 125 */ 126#define MIN_PARTIAL 5 127 128/* 129 * Maximum number of desirable partial slabs. 130 * The existence of more partial slabs makes kmem_cache_shrink 131 * sort the partial list by the number of objects in the. 132 */ 133#define MAX_PARTIAL 10 134 135#define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \ 136 SLAB_POISON | SLAB_STORE_USER) 137 138/* 139 * Set of flags that will prevent slab merging 140 */ 141#define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ 142 SLAB_TRACE | SLAB_DESTROY_BY_RCU) 143 144#define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ 145 SLAB_CACHE_DMA) 146 147#ifndef ARCH_KMALLOC_MINALIGN 148#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) 149#endif 150 151#ifndef ARCH_SLAB_MINALIGN 152#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) 153#endif 154 155/* Internal SLUB flags */ 156#define __OBJECT_POISON 0x80000000 /* Poison object */ 157#define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */ 158 159static int kmem_size = sizeof(struct kmem_cache); 160 161#ifdef CONFIG_SMP 162static struct notifier_block slab_notifier; 163#endif 164 165static enum { 166 DOWN, /* No slab functionality available */ 167 PARTIAL, /* kmem_cache_open() works but kmalloc does not */ 168 UP, /* Everything works but does not show up in sysfs */ 169 SYSFS /* Sysfs up */ 170} slab_state = DOWN; 171 172/* A list of all slab caches on the system */ 173static DECLARE_RWSEM(slub_lock); 174static LIST_HEAD(slab_caches); 175 176/* 177 * Tracking user of a slab. 178 */ 179struct track { 180 void *addr; /* Called from address */ 181 int cpu; /* Was running on cpu */ 182 int pid; /* Pid context */ 183 unsigned long when; /* When did the operation occur */ 184}; 185 186enum track_item { TRACK_ALLOC, TRACK_FREE }; 187 188#ifdef CONFIG_SLUB_DEBUG 189static int sysfs_slab_add(struct kmem_cache *); 190static int sysfs_slab_alias(struct kmem_cache *, const char *); 191static void sysfs_slab_remove(struct kmem_cache *); 192 193#else 194static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } 195static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) 196 { return 0; } 197static inline void sysfs_slab_remove(struct kmem_cache *s) 198{ 199 kfree(s); 200} 201 202#endif 203 204static inline void stat(struct kmem_cache_cpu *c, enum stat_item si) 205{ 206#ifdef CONFIG_SLUB_STATS 207 c->stat[si]++; 208#endif 209} 210 211/******************************************************************** 212 * Core slab cache functions 213 *******************************************************************/ 214 215int slab_is_available(void) 216{ 217 return slab_state >= UP; 218} 219 220static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) 221{ 222#ifdef CONFIG_NUMA 223 return s->node[node]; 224#else 225 return &s->local_node; 226#endif 227} 228 229static inline struct kmem_cache_cpu *get_cpu_slab(struct kmem_cache *s, int cpu) 230{ 231#ifdef CONFIG_SMP 232 return s->cpu_slab[cpu]; 233#else 234 return &s->cpu_slab; 235#endif 236} 237 238/* Verify that a pointer has an address that is valid within a slab page */ 239static inline int check_valid_pointer(struct kmem_cache *s, 240 struct page *page, const void *object) 241{ 242 void *base; 243 244 if (!object) 245 return 1; 246 247 base = page_address(page); 248 if (object < base || object >= base + page->objects * s->size || 249 (object - base) % s->size) { 250 return 0; 251 } 252 253 return 1; 254} 255 256/* 257 * Slow version of get and set free pointer. 258 * 259 * This version requires touching the cache lines of kmem_cache which 260 * we avoid to do in the fast alloc free paths. There we obtain the offset 261 * from the page struct. 262 */ 263static inline void *get_freepointer(struct kmem_cache *s, void *object) 264{ 265 return *(void **)(object + s->offset); 266} 267 268static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) 269{ 270 *(void **)(object + s->offset) = fp; 271} 272 273/* Loop over all objects in a slab */ 274#define for_each_object(__p, __s, __addr, __objects) \ 275 for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\ 276 __p += (__s)->size) 277 278/* Scan freelist */ 279#define for_each_free_object(__p, __s, __free) \ 280 for (__p = (__free); __p; __p = get_freepointer((__s), __p)) 281 282/* Determine object index from a given position */ 283static inline int slab_index(void *p, struct kmem_cache *s, void *addr) 284{ 285 return (p - addr) / s->size; 286} 287 288static inline struct kmem_cache_order_objects oo_make(int order, 289 unsigned long size) 290{ 291 struct kmem_cache_order_objects x = { 292 (order << 16) + (PAGE_SIZE << order) / size 293 }; 294 295 return x; 296} 297 298static inline int oo_order(struct kmem_cache_order_objects x) 299{ 300 return x.x >> 16; 301} 302 303static inline int oo_objects(struct kmem_cache_order_objects x) 304{ 305 return x.x & ((1 << 16) - 1); 306} 307 308#ifdef CONFIG_SLUB_DEBUG 309/* 310 * Debug settings: 311 */ 312#ifdef CONFIG_SLUB_DEBUG_ON 313static int slub_debug = DEBUG_DEFAULT_FLAGS; 314#else 315static int slub_debug; 316#endif 317 318static char *slub_debug_slabs; 319 320/* 321 * Object debugging 322 */ 323static void print_section(char *text, u8 *addr, unsigned int length) 324{ 325 int i, offset; 326 int newline = 1; 327 char ascii[17]; 328 329 ascii[16] = 0; 330 331 for (i = 0; i < length; i++) { 332 if (newline) { 333 printk(KERN_ERR "%8s 0x%p: ", text, addr + i); 334 newline = 0; 335 } 336 printk(KERN_CONT " %02x", addr[i]); 337 offset = i % 16; 338 ascii[offset] = isgraph(addr[i]) ? addr[i] : '.'; 339 if (offset == 15) { 340 printk(KERN_CONT " %s\n", ascii); 341 newline = 1; 342 } 343 } 344 if (!newline) { 345 i %= 16; 346 while (i < 16) { 347 printk(KERN_CONT " "); 348 ascii[i] = ' '; 349 i++; 350 } 351 printk(KERN_CONT " %s\n", ascii); 352 } 353} 354 355static struct track *get_track(struct kmem_cache *s, void *object, 356 enum track_item alloc) 357{ 358 struct track *p; 359 360 if (s->offset) 361 p = object + s->offset + sizeof(void *); 362 else 363 p = object + s->inuse; 364 365 return p + alloc; 366} 367 368static void set_track(struct kmem_cache *s, void *object, 369 enum track_item alloc, void *addr) 370{ 371 struct track *p; 372 373 if (s->offset) 374 p = object + s->offset + sizeof(void *); 375 else 376 p = object + s->inuse; 377 378 p += alloc; 379 if (addr) { 380 p->addr = addr; 381 p->cpu = smp_processor_id(); 382 p->pid = current->pid; 383 p->when = jiffies; 384 } else 385 memset(p, 0, sizeof(struct track)); 386} 387 388static void init_tracking(struct kmem_cache *s, void *object) 389{ 390 if (!(s->flags & SLAB_STORE_USER)) 391 return; 392 393 set_track(s, object, TRACK_FREE, NULL); 394 set_track(s, object, TRACK_ALLOC, NULL); 395} 396 397static void print_track(const char *s, struct track *t) 398{ 399 if (!t->addr) 400 return; 401 402 printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n", 403 s, t->addr, jiffies - t->when, t->cpu, t->pid); 404} 405 406static void print_tracking(struct kmem_cache *s, void *object) 407{ 408 if (!(s->flags & SLAB_STORE_USER)) 409 return; 410 411 print_track("Allocated", get_track(s, object, TRACK_ALLOC)); 412 print_track("Freed", get_track(s, object, TRACK_FREE)); 413} 414 415static void print_page_info(struct page *page) 416{ 417 printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n", 418 page, page->objects, page->inuse, page->freelist, page->flags); 419 420} 421 422static void slab_bug(struct kmem_cache *s, char *fmt, ...) 423{ 424 va_list args; 425 char buf[100]; 426 427 va_start(args, fmt); 428 vsnprintf(buf, sizeof(buf), fmt, args); 429 va_end(args); 430 printk(KERN_ERR "========================================" 431 "=====================================\n"); 432 printk(KERN_ERR "BUG %s: %s\n", s->name, buf); 433 printk(KERN_ERR "----------------------------------------" 434 "-------------------------------------\n\n"); 435} 436 437static void slab_fix(struct kmem_cache *s, char *fmt, ...) 438{ 439 va_list args; 440 char buf[100]; 441 442 va_start(args, fmt); 443 vsnprintf(buf, sizeof(buf), fmt, args); 444 va_end(args); 445 printk(KERN_ERR "FIX %s: %s\n", s->name, buf); 446} 447 448static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) 449{ 450 unsigned int off; /* Offset of last byte */ 451 u8 *addr = page_address(page); 452 453 print_tracking(s, p); 454 455 print_page_info(page); 456 457 printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p\n\n", 458 p, p - addr, get_freepointer(s, p)); 459 460 if (p > addr + 16) 461 print_section("Bytes b4", p - 16, 16); 462 463 print_section("Object", p, min_t(unsigned long, s->objsize, PAGE_SIZE)); 464 465 if (s->flags & SLAB_RED_ZONE) 466 print_section("Redzone", p + s->objsize, 467 s->inuse - s->objsize); 468 469 if (s->offset) 470 off = s->offset + sizeof(void *); 471 else 472 off = s->inuse; 473 474 if (s->flags & SLAB_STORE_USER) 475 off += 2 * sizeof(struct track); 476 477 if (off != s->size) 478 /* Beginning of the filler is the free pointer */ 479 print_section("Padding", p + off, s->size - off); 480 481 dump_stack(); 482} 483 484static void object_err(struct kmem_cache *s, struct page *page, 485 u8 *object, char *reason) 486{ 487 slab_bug(s, "%s", reason); 488 print_trailer(s, page, object); 489} 490 491static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...) 492{ 493 va_list args; 494 char buf[100]; 495 496 va_start(args, fmt); 497 vsnprintf(buf, sizeof(buf), fmt, args); 498 va_end(args); 499 slab_bug(s, "%s", buf); 500 print_page_info(page); 501 dump_stack(); 502} 503 504static void init_object(struct kmem_cache *s, void *object, int active) 505{ 506 u8 *p = object; 507 508 if (s->flags & __OBJECT_POISON) { 509 memset(p, POISON_FREE, s->objsize - 1); 510 p[s->objsize - 1] = POISON_END; 511 } 512 513 if (s->flags & SLAB_RED_ZONE) 514 memset(p + s->objsize, 515 active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE, 516 s->inuse - s->objsize); 517} 518 519static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes) 520{ 521 while (bytes) { 522 if (*start != (u8)value) 523 return start; 524 start++; 525 bytes--; 526 } 527 return NULL; 528} 529 530static void restore_bytes(struct kmem_cache *s, char *message, u8 data, 531 void *from, void *to) 532{ 533 slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data); 534 memset(from, data, to - from); 535} 536 537static int check_bytes_and_report(struct kmem_cache *s, struct page *page, 538 u8 *object, char *what, 539 u8 *start, unsigned int value, unsigned int bytes) 540{ 541 u8 *fault; 542 u8 *end; 543 544 fault = check_bytes(start, value, bytes); 545 if (!fault) 546 return 1; 547 548 end = start + bytes; 549 while (end > fault && end[-1] == value) 550 end--; 551 552 slab_bug(s, "%s overwritten", what); 553 printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n", 554 fault, end - 1, fault[0], value); 555 print_trailer(s, page, object); 556 557 restore_bytes(s, what, value, fault, end); 558 return 0; 559} 560 561/* 562 * Object layout: 563 * 564 * object address 565 * Bytes of the object to be managed. 566 * If the freepointer may overlay the object then the free 567 * pointer is the first word of the object. 568 * 569 * Poisoning uses 0x6b (POISON_FREE) and the last byte is 570 * 0xa5 (POISON_END) 571 * 572 * object + s->objsize 573 * Padding to reach word boundary. This is also used for Redzoning. 574 * Padding is extended by another word if Redzoning is enabled and 575 * objsize == inuse. 576 * 577 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with 578 * 0xcc (RED_ACTIVE) for objects in use. 579 * 580 * object + s->inuse 581 * Meta data starts here. 582 * 583 * A. Free pointer (if we cannot overwrite object on free) 584 * B. Tracking data for SLAB_STORE_USER 585 * C. Padding to reach required alignment boundary or at mininum 586 * one word if debugging is on to be able to detect writes 587 * before the word boundary. 588 * 589 * Padding is done using 0x5a (POISON_INUSE) 590 * 591 * object + s->size 592 * Nothing is used beyond s->size. 593 * 594 * If slabcaches are merged then the objsize and inuse boundaries are mostly 595 * ignored. And therefore no slab options that rely on these boundaries 596 * may be used with merged slabcaches. 597 */ 598 599static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p) 600{ 601 unsigned long off = s->inuse; /* The end of info */ 602 603 if (s->offset) 604 /* Freepointer is placed after the object. */ 605 off += sizeof(void *); 606 607 if (s->flags & SLAB_STORE_USER) 608 /* We also have user information there */ 609 off += 2 * sizeof(struct track); 610 611 if (s->size == off) 612 return 1; 613 614 return check_bytes_and_report(s, page, p, "Object padding", 615 p + off, POISON_INUSE, s->size - off); 616} 617 618/* Check the pad bytes at the end of a slab page */ 619static int slab_pad_check(struct kmem_cache *s, struct page *page) 620{ 621 u8 *start; 622 u8 *fault; 623 u8 *end; 624 int length; 625 int remainder; 626 627 if (!(s->flags & SLAB_POISON)) 628 return 1; 629 630 start = page_address(page); 631 length = (PAGE_SIZE << compound_order(page)); 632 end = start + length; 633 remainder = length % s->size; 634 if (!remainder) 635 return 1; 636 637 fault = check_bytes(end - remainder, POISON_INUSE, remainder); 638 if (!fault) 639 return 1; 640 while (end > fault && end[-1] == POISON_INUSE) 641 end--; 642 643 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1); 644 print_section("Padding", end - remainder, remainder); 645 646 restore_bytes(s, "slab padding", POISON_INUSE, start, end); 647 return 0; 648} 649 650static int check_object(struct kmem_cache *s, struct page *page, 651 void *object, int active) 652{ 653 u8 *p = object; 654 u8 *endobject = object + s->objsize; 655 656 if (s->flags & SLAB_RED_ZONE) { 657 unsigned int red = 658 active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE; 659 660 if (!check_bytes_and_report(s, page, object, "Redzone", 661 endobject, red, s->inuse - s->objsize)) 662 return 0; 663 } else { 664 if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) { 665 check_bytes_and_report(s, page, p, "Alignment padding", 666 endobject, POISON_INUSE, s->inuse - s->objsize); 667 } 668 } 669 670 if (s->flags & SLAB_POISON) { 671 if (!active && (s->flags & __OBJECT_POISON) && 672 (!check_bytes_and_report(s, page, p, "Poison", p, 673 POISON_FREE, s->objsize - 1) || 674 !check_bytes_and_report(s, page, p, "Poison", 675 p + s->objsize - 1, POISON_END, 1))) 676 return 0; 677 /* 678 * check_pad_bytes cleans up on its own. 679 */ 680 check_pad_bytes(s, page, p); 681 } 682 683 if (!s->offset && active) 684 /* 685 * Object and freepointer overlap. Cannot check 686 * freepointer while object is allocated. 687 */ 688 return 1; 689 690 /* Check free pointer validity */ 691 if (!check_valid_pointer(s, page, get_freepointer(s, p))) { 692 object_err(s, page, p, "Freepointer corrupt"); 693 /* 694 * No choice but to zap it and thus loose the remainder 695 * of the free objects in this slab. May cause 696 * another error because the object count is now wrong. 697 */ 698 set_freepointer(s, p, NULL); 699 return 0; 700 } 701 return 1; 702} 703 704static int check_slab(struct kmem_cache *s, struct page *page) 705{ 706 int maxobj; 707 708 VM_BUG_ON(!irqs_disabled()); 709 710 if (!PageSlab(page)) { 711 slab_err(s, page, "Not a valid slab page"); 712 return 0; 713 } 714 715 maxobj = (PAGE_SIZE << compound_order(page)) / s->size; 716 if (page->objects > maxobj) { 717 slab_err(s, page, "objects %u > max %u", 718 s->name, page->objects, maxobj); 719 return 0; 720 } 721 if (page->inuse > page->objects) { 722 slab_err(s, page, "inuse %u > max %u", 723 s->name, page->inuse, page->objects); 724 return 0; 725 } 726 /* Slab_pad_check fixes things up after itself */ 727 slab_pad_check(s, page); 728 return 1; 729} 730 731/* 732 * Determine if a certain object on a page is on the freelist. Must hold the 733 * slab lock to guarantee that the chains are in a consistent state. 734 */ 735static int on_freelist(struct kmem_cache *s, struct page *page, void *search) 736{ 737 int nr = 0; 738 void *fp = page->freelist; 739 void *object = NULL; 740 unsigned long max_objects; 741 742 while (fp && nr <= page->objects) { 743 if (fp == search) 744 return 1; 745 if (!check_valid_pointer(s, page, fp)) { 746 if (object) { 747 object_err(s, page, object, 748 "Freechain corrupt"); 749 set_freepointer(s, object, NULL); 750 break; 751 } else { 752 slab_err(s, page, "Freepointer corrupt"); 753 page->freelist = NULL; 754 page->inuse = page->objects; 755 slab_fix(s, "Freelist cleared"); 756 return 0; 757 } 758 break; 759 } 760 object = fp; 761 fp = get_freepointer(s, object); 762 nr++; 763 } 764 765 max_objects = (PAGE_SIZE << compound_order(page)) / s->size; 766 if (max_objects > 65535) 767 max_objects = 65535; 768 769 if (page->objects != max_objects) { 770 slab_err(s, page, "Wrong number of objects. Found %d but " 771 "should be %d", page->objects, max_objects); 772 page->objects = max_objects; 773 slab_fix(s, "Number of objects adjusted."); 774 } 775 if (page->inuse != page->objects - nr) { 776 slab_err(s, page, "Wrong object count. Counter is %d but " 777 "counted were %d", page->inuse, page->objects - nr); 778 page->inuse = page->objects - nr; 779 slab_fix(s, "Object count adjusted."); 780 } 781 return search == NULL; 782} 783 784static void trace(struct kmem_cache *s, struct page *page, void *object, 785 int alloc) 786{ 787 if (s->flags & SLAB_TRACE) { 788 printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n", 789 s->name, 790 alloc ? "alloc" : "free", 791 object, page->inuse, 792 page->freelist); 793 794 if (!alloc) 795 print_section("Object", (void *)object, s->objsize); 796 797 dump_stack(); 798 } 799} 800 801/* 802 * Tracking of fully allocated slabs for debugging purposes. 803 */ 804static void add_full(struct kmem_cache_node *n, struct page *page) 805{ 806 spin_lock(&n->list_lock); 807 list_add(&page->lru, &n->full); 808 spin_unlock(&n->list_lock); 809} 810 811static void remove_full(struct kmem_cache *s, struct page *page) 812{ 813 struct kmem_cache_node *n; 814 815 if (!(s->flags & SLAB_STORE_USER)) 816 return; 817 818 n = get_node(s, page_to_nid(page)); 819 820 spin_lock(&n->list_lock); 821 list_del(&page->lru); 822 spin_unlock(&n->list_lock); 823} 824 825/* Tracking of the number of slabs for debugging purposes */ 826static inline unsigned long slabs_node(struct kmem_cache *s, int node) 827{ 828 struct kmem_cache_node *n = get_node(s, node); 829 830 return atomic_long_read(&n->nr_slabs); 831} 832 833static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) 834{ 835 struct kmem_cache_node *n = get_node(s, node); 836 837 /* 838 * May be called early in order to allocate a slab for the 839 * kmem_cache_node structure. Solve the chicken-egg 840 * dilemma by deferring the increment of the count during 841 * bootstrap (see early_kmem_cache_node_alloc). 842 */ 843 if (!NUMA_BUILD || n) { 844 atomic_long_inc(&n->nr_slabs); 845 atomic_long_add(objects, &n->total_objects); 846 } 847} 848static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) 849{ 850 struct kmem_cache_node *n = get_node(s, node); 851 852 atomic_long_dec(&n->nr_slabs); 853 atomic_long_sub(objects, &n->total_objects); 854} 855 856/* Object debug checks for alloc/free paths */ 857static void setup_object_debug(struct kmem_cache *s, struct page *page, 858 void *object) 859{ 860 if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))) 861 return; 862 863 init_object(s, object, 0); 864 init_tracking(s, object); 865} 866 867static int alloc_debug_processing(struct kmem_cache *s, struct page *page, 868 void *object, void *addr) 869{ 870 if (!check_slab(s, page)) 871 goto bad; 872 873 if (!on_freelist(s, page, object)) { 874 object_err(s, page, object, "Object already allocated"); 875 goto bad; 876 } 877 878 if (!check_valid_pointer(s, page, object)) { 879 object_err(s, page, object, "Freelist Pointer check fails"); 880 goto bad; 881 } 882 883 if (!check_object(s, page, object, 0)) 884 goto bad; 885 886 /* Success perform special debug activities for allocs */ 887 if (s->flags & SLAB_STORE_USER) 888 set_track(s, object, TRACK_ALLOC, addr); 889 trace(s, page, object, 1); 890 init_object(s, object, 1); 891 return 1; 892 893bad: 894 if (PageSlab(page)) { 895 /* 896 * If this is a slab page then lets do the best we can 897 * to avoid issues in the future. Marking all objects 898 * as used avoids touching the remaining objects. 899 */ 900 slab_fix(s, "Marking all objects used"); 901 page->inuse = page->objects; 902 page->freelist = NULL; 903 } 904 return 0; 905} 906 907static int free_debug_processing(struct kmem_cache *s, struct page *page, 908 void *object, void *addr) 909{ 910 if (!check_slab(s, page)) 911 goto fail; 912 913 if (!check_valid_pointer(s, page, object)) { 914 slab_err(s, page, "Invalid object pointer 0x%p", object); 915 goto fail; 916 } 917 918 if (on_freelist(s, page, object)) { 919 object_err(s, page, object, "Object already free"); 920 goto fail; 921 } 922 923 if (!check_object(s, page, object, 1)) 924 return 0; 925 926 if (unlikely(s != page->slab)) { 927 if (!PageSlab(page)) { 928 slab_err(s, page, "Attempt to free object(0x%p) " 929 "outside of slab", object); 930 } else if (!page->slab) { 931 printk(KERN_ERR 932 "SLUB <none>: no slab for object 0x%p.\n", 933 object); 934 dump_stack(); 935 } else 936 object_err(s, page, object, 937 "page slab pointer corrupt."); 938 goto fail; 939 } 940 941 /* Special debug activities for freeing objects */ 942 if (!PageSlubFrozen(page) && !page->freelist) 943 remove_full(s, page); 944 if (s->flags & SLAB_STORE_USER) 945 set_track(s, object, TRACK_FREE, addr); 946 trace(s, page, object, 0); 947 init_object(s, object, 0); 948 return 1; 949 950fail: 951 slab_fix(s, "Object at 0x%p not freed", object); 952 return 0; 953} 954 955static int __init setup_slub_debug(char *str) 956{ 957 slub_debug = DEBUG_DEFAULT_FLAGS; 958 if (*str++ != '=' || !*str) 959 /* 960 * No options specified. Switch on full debugging. 961 */ 962 goto out; 963 964 if (*str == ',') 965 /* 966 * No options but restriction on slabs. This means full 967 * debugging for slabs matching a pattern. 968 */ 969 goto check_slabs; 970 971 slub_debug = 0; 972 if (*str == '-') 973 /* 974 * Switch off all debugging measures. 975 */ 976 goto out; 977 978 /* 979 * Determine which debug features should be switched on 980 */ 981 for (; *str && *str != ','; str++) { 982 switch (tolower(*str)) { 983 case 'f': 984 slub_debug |= SLAB_DEBUG_FREE; 985 break; 986 case 'z': 987 slub_debug |= SLAB_RED_ZONE; 988 break; 989 case 'p': 990 slub_debug |= SLAB_POISON; 991 break; 992 case 'u': 993 slub_debug |= SLAB_STORE_USER; 994 break; 995 case 't': 996 slub_debug |= SLAB_TRACE; 997 break; 998 default: 999 printk(KERN_ERR "slub_debug option '%c' " 1000 "unknown. skipped\n", *str); 1001 } 1002 } 1003 1004check_slabs: 1005 if (*str == ',') 1006 slub_debug_slabs = str + 1; 1007out: 1008 return 1; 1009} 1010 1011__setup("slub_debug", setup_slub_debug); 1012 1013static unsigned long kmem_cache_flags(unsigned long objsize, 1014 unsigned long flags, const char *name, 1015 void (*ctor)(void *)) 1016{ 1017 /* 1018 * Enable debugging if selected on the kernel commandline. 1019 */ 1020 if (slub_debug && (!slub_debug_slabs || 1021 strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)) == 0)) 1022 flags |= slub_debug; 1023 1024 return flags; 1025} 1026#else 1027static inline void setup_object_debug(struct kmem_cache *s, 1028 struct page *page, void *object) {} 1029 1030static inline int alloc_debug_processing(struct kmem_cache *s, 1031 struct page *page, void *object, void *addr) { return 0; } 1032 1033static inline int free_debug_processing(struct kmem_cache *s, 1034 struct page *page, void *object, void *addr) { return 0; } 1035 1036static inline int slab_pad_check(struct kmem_cache *s, struct page *page) 1037 { return 1; } 1038static inline int check_object(struct kmem_cache *s, struct page *page, 1039 void *object, int active) { return 1; } 1040static inline void add_full(struct kmem_cache_node *n, struct page *page) {} 1041static inline unsigned long kmem_cache_flags(unsigned long objsize, 1042 unsigned long flags, const char *name, 1043 void (*ctor)(void *)) 1044{ 1045 return flags; 1046} 1047#define slub_debug 0 1048 1049static inline unsigned long slabs_node(struct kmem_cache *s, int node) 1050 { return 0; } 1051static inline void inc_slabs_node(struct kmem_cache *s, int node, 1052 int objects) {} 1053static inline void dec_slabs_node(struct kmem_cache *s, int node, 1054 int objects) {} 1055#endif 1056 1057/* 1058 * Slab allocation and freeing 1059 */ 1060static inline struct page *alloc_slab_page(gfp_t flags, int node, 1061 struct kmem_cache_order_objects oo) 1062{ 1063 int order = oo_order(oo); 1064 1065 if (node == -1) 1066 return alloc_pages(flags, order); 1067 else 1068 return alloc_pages_node(node, flags, order); 1069} 1070 1071static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) 1072{ 1073 struct page *page; 1074 struct kmem_cache_order_objects oo = s->oo; 1075 1076 flags |= s->allocflags; 1077 1078 page = alloc_slab_page(flags | __GFP_NOWARN | __GFP_NORETRY, node, 1079 oo); 1080 if (unlikely(!page)) { 1081 oo = s->min; 1082 /* 1083 * Allocation may have failed due to fragmentation. 1084 * Try a lower order alloc if possible 1085 */ 1086 page = alloc_slab_page(flags, node, oo); 1087 if (!page) 1088 return NULL; 1089 1090 stat(get_cpu_slab(s, raw_smp_processor_id()), ORDER_FALLBACK); 1091 } 1092 page->objects = oo_objects(oo); 1093 mod_zone_page_state(page_zone(page), 1094 (s->flags & SLAB_RECLAIM_ACCOUNT) ? 1095 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, 1096 1 << oo_order(oo)); 1097 1098 return page; 1099} 1100 1101static void setup_object(struct kmem_cache *s, struct page *page, 1102 void *object) 1103{ 1104 setup_object_debug(s, page, object); 1105 if (unlikely(s->ctor)) 1106 s->ctor(object); 1107} 1108 1109static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) 1110{ 1111 struct page *page; 1112 void *start; 1113 void *last; 1114 void *p; 1115 1116 BUG_ON(flags & GFP_SLAB_BUG_MASK); 1117 1118 page = allocate_slab(s, 1119 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); 1120 if (!page) 1121 goto out; 1122 1123 inc_slabs_node(s, page_to_nid(page), page->objects); 1124 page->slab = s; 1125 page->flags |= 1 << PG_slab; 1126 if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON | 1127 SLAB_STORE_USER | SLAB_TRACE)) 1128 __SetPageSlubDebug(page); 1129 1130 start = page_address(page); 1131 1132 if (unlikely(s->flags & SLAB_POISON)) 1133 memset(start, POISON_INUSE, PAGE_SIZE << compound_order(page)); 1134 1135 last = start; 1136 for_each_object(p, s, start, page->objects) { 1137 setup_object(s, page, last); 1138 set_freepointer(s, last, p); 1139 last = p; 1140 } 1141 setup_object(s, page, last); 1142 set_freepointer(s, last, NULL); 1143 1144 page->freelist = start; 1145 page->inuse = 0; 1146out: 1147 return page; 1148} 1149 1150static void __free_slab(struct kmem_cache *s, struct page *page) 1151{ 1152 int order = compound_order(page); 1153 int pages = 1 << order; 1154 1155 if (unlikely(SLABDEBUG && PageSlubDebug(page))) { 1156 void *p; 1157 1158 slab_pad_check(s, page); 1159 for_each_object(p, s, page_address(page), 1160 page->objects) 1161 check_object(s, page, p, 0); 1162 __ClearPageSlubDebug(page); 1163 } 1164 1165 mod_zone_page_state(page_zone(page), 1166 (s->flags & SLAB_RECLAIM_ACCOUNT) ? 1167 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, 1168 -pages); 1169 1170 __ClearPageSlab(page); 1171 reset_page_mapcount(page); 1172 __free_pages(page, order); 1173} 1174 1175static void rcu_free_slab(struct rcu_head *h) 1176{ 1177 struct page *page; 1178 1179 page = container_of((struct list_head *)h, struct page, lru); 1180 __free_slab(page->slab, page); 1181} 1182 1183static void free_slab(struct kmem_cache *s, struct page *page) 1184{ 1185 if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) { 1186 /* 1187 * RCU free overloads the RCU head over the LRU 1188 */ 1189 struct rcu_head *head = (void *)&page->lru; 1190 1191 call_rcu(head, rcu_free_slab); 1192 } else 1193 __free_slab(s, page); 1194} 1195 1196static void discard_slab(struct kmem_cache *s, struct page *page) 1197{ 1198 dec_slabs_node(s, page_to_nid(page), page->objects); 1199 free_slab(s, page); 1200} 1201 1202/* 1203 * Per slab locking using the pagelock 1204 */ 1205static __always_inline void slab_lock(struct page *page) 1206{ 1207 bit_spin_lock(PG_locked, &page->flags); 1208} 1209 1210static __always_inline void slab_unlock(struct page *page) 1211{ 1212 __bit_spin_unlock(PG_locked, &page->flags); 1213} 1214 1215static __always_inline int slab_trylock(struct page *page) 1216{ 1217 int rc = 1; 1218 1219 rc = bit_spin_trylock(PG_locked, &page->flags); 1220 return rc; 1221} 1222 1223/* 1224 * Management of partially allocated slabs 1225 */ 1226static void add_partial(struct kmem_cache_node *n, 1227 struct page *page, int tail) 1228{ 1229 spin_lock(&n->list_lock); 1230 n->nr_partial++; 1231 if (tail) 1232 list_add_tail(&page->lru, &n->partial); 1233 else 1234 list_add(&page->lru, &n->partial); 1235 spin_unlock(&n->list_lock); 1236} 1237 1238static void remove_partial(struct kmem_cache *s, struct page *page) 1239{ 1240 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1241 1242 spin_lock(&n->list_lock); 1243 list_del(&page->lru); 1244 n->nr_partial--; 1245 spin_unlock(&n->list_lock); 1246} 1247 1248/* 1249 * Lock slab and remove from the partial list. 1250 * 1251 * Must hold list_lock. 1252 */ 1253static inline int lock_and_freeze_slab(struct kmem_cache_node *n, 1254 struct page *page) 1255{ 1256 if (slab_trylock(page)) { 1257 list_del(&page->lru); 1258 n->nr_partial--; 1259 __SetPageSlubFrozen(page); 1260 return 1; 1261 } 1262 return 0; 1263} 1264 1265/* 1266 * Try to allocate a partial slab from a specific node. 1267 */ 1268static struct page *get_partial_node(struct kmem_cache_node *n) 1269{ 1270 struct page *page; 1271 1272 /* 1273 * Racy check. If we mistakenly see no partial slabs then we 1274 * just allocate an empty slab. If we mistakenly try to get a 1275 * partial slab and there is none available then get_partials() 1276 * will return NULL. 1277 */ 1278 if (!n || !n->nr_partial) 1279 return NULL; 1280 1281 spin_lock(&n->list_lock); 1282 list_for_each_entry(page, &n->partial, lru) 1283 if (lock_and_freeze_slab(n, page)) 1284 goto out; 1285 page = NULL; 1286out: 1287 spin_unlock(&n->list_lock); 1288 return page; 1289} 1290 1291/* 1292 * Get a page from somewhere. Search in increasing NUMA distances. 1293 */ 1294static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags) 1295{ 1296#ifdef CONFIG_NUMA 1297 struct zonelist *zonelist; 1298 struct zoneref *z; 1299 struct zone *zone; 1300 enum zone_type high_zoneidx = gfp_zone(flags); 1301 struct page *page; 1302 1303 /* 1304 * The defrag ratio allows a configuration of the tradeoffs between 1305 * inter node defragmentation and node local allocations. A lower 1306 * defrag_ratio increases the tendency to do local allocations 1307 * instead of attempting to obtain partial slabs from other nodes. 1308 * 1309 * If the defrag_ratio is set to 0 then kmalloc() always 1310 * returns node local objects. If the ratio is higher then kmalloc() 1311 * may return off node objects because partial slabs are obtained 1312 * from other nodes and filled up. 1313 * 1314 * If /sys/kernel/slab/xx/defrag_ratio is set to 100 (which makes 1315 * defrag_ratio = 1000) then every (well almost) allocation will 1316 * first attempt to defrag slab caches on other nodes. This means 1317 * scanning over all nodes to look for partial slabs which may be 1318 * expensive if we do it every time we are trying to find a slab 1319 * with available objects. 1320 */ 1321 if (!s->remote_node_defrag_ratio || 1322 get_cycles() % 1024 > s->remote_node_defrag_ratio) 1323 return NULL; 1324 1325 zonelist = node_zonelist(slab_node(current->mempolicy), flags); 1326 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { 1327 struct kmem_cache_node *n; 1328 1329 n = get_node(s, zone_to_nid(zone)); 1330 1331 if (n && cpuset_zone_allowed_hardwall(zone, flags) && 1332 n->nr_partial > n->min_partial) { 1333 page = get_partial_node(n); 1334 if (page) 1335 return page; 1336 } 1337 } 1338#endif 1339 return NULL; 1340} 1341 1342/* 1343 * Get a partial page, lock it and return it. 1344 */ 1345static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node) 1346{ 1347 struct page *page; 1348 int searchnode = (node == -1) ? numa_node_id() : node; 1349 1350 page = get_partial_node(get_node(s, searchnode)); 1351 if (page || (flags & __GFP_THISNODE)) 1352 return page; 1353 1354 return get_any_partial(s, flags); 1355} 1356 1357/* 1358 * Move a page back to the lists. 1359 * 1360 * Must be called with the slab lock held. 1361 * 1362 * On exit the slab lock will have been dropped. 1363 */ 1364static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) 1365{ 1366 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1367 struct kmem_cache_cpu *c = get_cpu_slab(s, smp_processor_id()); 1368 1369 __ClearPageSlubFrozen(page); 1370 if (page->inuse) { 1371 1372 if (page->freelist) { 1373 add_partial(n, page, tail); 1374 stat(c, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD); 1375 } else { 1376 stat(c, DEACTIVATE_FULL); 1377 if (SLABDEBUG && PageSlubDebug(page) && 1378 (s->flags & SLAB_STORE_USER)) 1379 add_full(n, page); 1380 } 1381 slab_unlock(page); 1382 } else { 1383 stat(c, DEACTIVATE_EMPTY); 1384 if (n->nr_partial < n->min_partial) { 1385 /* 1386 * Adding an empty slab to the partial slabs in order 1387 * to avoid page allocator overhead. This slab needs 1388 * to come after the other slabs with objects in 1389 * so that the others get filled first. That way the 1390 * size of the partial list stays small. 1391 * 1392 * kmem_cache_shrink can reclaim any empty slabs from 1393 * the partial list. 1394 */ 1395 add_partial(n, page, 1); 1396 slab_unlock(page); 1397 } else { 1398 slab_unlock(page); 1399 stat(get_cpu_slab(s, raw_smp_processor_id()), FREE_SLAB); 1400 discard_slab(s, page); 1401 } 1402 } 1403} 1404 1405/* 1406 * Remove the cpu slab 1407 */ 1408static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) 1409{ 1410 struct page *page = c->page; 1411 int tail = 1; 1412 1413 if (page->freelist) 1414 stat(c, DEACTIVATE_REMOTE_FREES); 1415 /* 1416 * Merge cpu freelist into slab freelist. Typically we get here 1417 * because both freelists are empty. So this is unlikely 1418 * to occur. 1419 */ 1420 while (unlikely(c->freelist)) { 1421 void **object; 1422 1423 tail = 0; /* Hot objects. Put the slab first */ 1424 1425 /* Retrieve object from cpu_freelist */ 1426 object = c->freelist; 1427 c->freelist = c->freelist[c->offset]; 1428 1429 /* And put onto the regular freelist */ 1430 object[c->offset] = page->freelist; 1431 page->freelist = object; 1432 page->inuse--; 1433 } 1434 c->page = NULL; 1435 unfreeze_slab(s, page, tail); 1436} 1437 1438static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) 1439{ 1440 stat(c, CPUSLAB_FLUSH); 1441 slab_lock(c->page); 1442 deactivate_slab(s, c); 1443} 1444 1445/* 1446 * Flush cpu slab. 1447 * 1448 * Called from IPI handler with interrupts disabled. 1449 */ 1450static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) 1451{ 1452 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); 1453 1454 if (likely(c && c->page)) 1455 flush_slab(s, c); 1456} 1457 1458static void flush_cpu_slab(void *d) 1459{ 1460 struct kmem_cache *s = d; 1461 1462 __flush_cpu_slab(s, smp_processor_id()); 1463} 1464 1465static void flush_all(struct kmem_cache *s) 1466{ 1467 on_each_cpu(flush_cpu_slab, s, 1); 1468} 1469 1470/* 1471 * Check if the objects in a per cpu structure fit numa 1472 * locality expectations. 1473 */ 1474static inline int node_match(struct kmem_cache_cpu *c, int node) 1475{ 1476#ifdef CONFIG_NUMA 1477 if (node != -1 && c->node != node) 1478 return 0; 1479#endif 1480 return 1; 1481} 1482 1483/* 1484 * Slow path. The lockless freelist is empty or we need to perform 1485 * debugging duties. 1486 * 1487 * Interrupts are disabled. 1488 * 1489 * Processing is still very fast if new objects have been freed to the 1490 * regular freelist. In that case we simply take over the regular freelist 1491 * as the lockless freelist and zap the regular freelist. 1492 * 1493 * If that is not working then we fall back to the partial lists. We take the 1494 * first element of the freelist as the object to allocate now and move the 1495 * rest of the freelist to the lockless freelist. 1496 * 1497 * And if we were unable to get a new slab from the partial slab lists then 1498 * we need to allocate a new slab. This is the slowest path since it involves 1499 * a call to the page allocator and the setup of a new slab. 1500 */ 1501static void *__slab_alloc(struct kmem_cache *s, 1502 gfp_t gfpflags, int node, void *addr, struct kmem_cache_cpu *c) 1503{ 1504 void **object; 1505 struct page *new; 1506 1507 /* We handle __GFP_ZERO in the caller */ 1508 gfpflags &= ~__GFP_ZERO; 1509 1510 if (!c->page) 1511 goto new_slab; 1512 1513 slab_lock(c->page); 1514 if (unlikely(!node_match(c, node))) 1515 goto another_slab; 1516 1517 stat(c, ALLOC_REFILL); 1518 1519load_freelist: 1520 object = c->page->freelist; 1521 if (unlikely(!object)) 1522 goto another_slab; 1523 if (unlikely(SLABDEBUG && PageSlubDebug(c->page))) 1524 goto debug; 1525 1526 c->freelist = object[c->offset]; 1527 c->page->inuse = c->page->objects; 1528 c->page->freelist = NULL; 1529 c->node = page_to_nid(c->page); 1530unlock_out: 1531 slab_unlock(c->page); 1532 stat(c, ALLOC_SLOWPATH); 1533 return object; 1534 1535another_slab: 1536 deactivate_slab(s, c); 1537 1538new_slab: 1539 new = get_partial(s, gfpflags, node); 1540 if (new) { 1541 c->page = new; 1542 stat(c, ALLOC_FROM_PARTIAL); 1543 goto load_freelist; 1544 } 1545 1546 if (gfpflags & __GFP_WAIT) 1547 local_irq_enable(); 1548 1549 new = new_slab(s, gfpflags, node); 1550 1551 if (gfpflags & __GFP_WAIT) 1552 local_irq_disable(); 1553 1554 if (new) { 1555 c = get_cpu_slab(s, smp_processor_id()); 1556 stat(c, ALLOC_SLAB); 1557 if (c->page) 1558 flush_slab(s, c); 1559 slab_lock(new); 1560 __SetPageSlubFrozen(new); 1561 c->page = new; 1562 goto load_freelist; 1563 } 1564 return NULL; 1565debug: 1566 if (!alloc_debug_processing(s, c->page, object, addr)) 1567 goto another_slab; 1568 1569 c->page->inuse++; 1570 c->page->freelist = object[c->offset]; 1571 c->node = -1; 1572 goto unlock_out; 1573} 1574 1575/* 1576 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) 1577 * have the fastpath folded into their functions. So no function call 1578 * overhead for requests that can be satisfied on the fastpath. 1579 * 1580 * The fastpath works by first checking if the lockless freelist can be used. 1581 * If not then __slab_alloc is called for slow processing. 1582 * 1583 * Otherwise we can simply pick the next object from the lockless free list. 1584 */ 1585static __always_inline void *slab_alloc(struct kmem_cache *s, 1586 gfp_t gfpflags, int node, void *addr) 1587{ 1588 void **object; 1589 struct kmem_cache_cpu *c; 1590 unsigned long flags; 1591 unsigned int objsize; 1592 1593 local_irq_save(flags); 1594 c = get_cpu_slab(s, smp_processor_id()); 1595 objsize = c->objsize; 1596 if (unlikely(!c->freelist || !node_match(c, node))) 1597 1598 object = __slab_alloc(s, gfpflags, node, addr, c); 1599 1600 else { 1601 object = c->freelist; 1602 c->freelist = object[c->offset]; 1603 stat(c, ALLOC_FASTPATH); 1604 } 1605 local_irq_restore(flags); 1606 1607 if (unlikely((gfpflags & __GFP_ZERO) && object)) 1608 memset(object, 0, objsize); 1609 1610 return object; 1611} 1612 1613void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) 1614{ 1615 return slab_alloc(s, gfpflags, -1, __builtin_return_address(0)); 1616} 1617EXPORT_SYMBOL(kmem_cache_alloc); 1618 1619#ifdef CONFIG_NUMA 1620void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) 1621{ 1622 return slab_alloc(s, gfpflags, node, __builtin_return_address(0)); 1623} 1624EXPORT_SYMBOL(kmem_cache_alloc_node); 1625#endif 1626 1627/* 1628 * Slow patch handling. This may still be called frequently since objects 1629 * have a longer lifetime than the cpu slabs in most processing loads. 1630 * 1631 * So we still attempt to reduce cache line usage. Just take the slab 1632 * lock and free the item. If there is no additional partial page 1633 * handling required then we can return immediately. 1634 */ 1635static void __slab_free(struct kmem_cache *s, struct page *page, 1636 void *x, void *addr, unsigned int offset) 1637{ 1638 void *prior; 1639 void **object = (void *)x; 1640 struct kmem_cache_cpu *c; 1641 1642 c = get_cpu_slab(s, raw_smp_processor_id()); 1643 stat(c, FREE_SLOWPATH); 1644 slab_lock(page); 1645 1646 if (unlikely(SLABDEBUG && PageSlubDebug(page))) 1647 goto debug; 1648 1649checks_ok: 1650 prior = object[offset] = page->freelist; 1651 page->freelist = object; 1652 page->inuse--; 1653 1654 if (unlikely(PageSlubFrozen(page))) { 1655 stat(c, FREE_FROZEN); 1656 goto out_unlock; 1657 } 1658 1659 if (unlikely(!page->inuse)) 1660 goto slab_empty; 1661 1662 /* 1663 * Objects left in the slab. If it was not on the partial list before 1664 * then add it. 1665 */ 1666 if (unlikely(!prior)) { 1667 add_partial(get_node(s, page_to_nid(page)), page, 1); 1668 stat(c, FREE_ADD_PARTIAL); 1669 } 1670 1671out_unlock: 1672 slab_unlock(page); 1673 return; 1674 1675slab_empty: 1676 if (prior) { 1677 /* 1678 * Slab still on the partial list. 1679 */ 1680 remove_partial(s, page); 1681 stat(c, FREE_REMOVE_PARTIAL); 1682 } 1683 slab_unlock(page); 1684 stat(c, FREE_SLAB); 1685 discard_slab(s, page); 1686 return; 1687 1688debug: 1689 if (!free_debug_processing(s, page, x, addr)) 1690 goto out_unlock; 1691 goto checks_ok; 1692} 1693 1694/* 1695 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that 1696 * can perform fastpath freeing without additional function calls. 1697 * 1698 * The fastpath is only possible if we are freeing to the current cpu slab 1699 * of this processor. This typically the case if we have just allocated 1700 * the item before. 1701 * 1702 * If fastpath is not possible then fall back to __slab_free where we deal 1703 * with all sorts of special processing. 1704 */ 1705static __always_inline void slab_free(struct kmem_cache *s, 1706 struct page *page, void *x, void *addr) 1707{ 1708 void **object = (void *)x; 1709 struct kmem_cache_cpu *c; 1710 unsigned long flags; 1711 1712 local_irq_save(flags); 1713 c = get_cpu_slab(s, smp_processor_id()); 1714 debug_check_no_locks_freed(object, c->objsize); 1715 if (!(s->flags & SLAB_DEBUG_OBJECTS)) 1716 debug_check_no_obj_freed(object, s->objsize); 1717 if (likely(page == c->page && c->node >= 0)) { 1718 object[c->offset] = c->freelist; 1719 c->freelist = object; 1720 stat(c, FREE_FASTPATH); 1721 } else 1722 __slab_free(s, page, x, addr, c->offset); 1723 1724 local_irq_restore(flags); 1725} 1726 1727void kmem_cache_free(struct kmem_cache *s, void *x) 1728{ 1729 struct page *page; 1730 1731 page = virt_to_head_page(x); 1732 1733 slab_free(s, page, x, __builtin_return_address(0)); 1734} 1735EXPORT_SYMBOL(kmem_cache_free); 1736 1737/* Figure out on which slab object the object resides */ 1738static struct page *get_object_page(const void *x) 1739{ 1740 struct page *page = virt_to_head_page(x); 1741 1742 if (!PageSlab(page)) 1743 return NULL; 1744 1745 return page; 1746} 1747 1748/* 1749 * Object placement in a slab is made very easy because we always start at 1750 * offset 0. If we tune the size of the object to the alignment then we can 1751 * get the required alignment by putting one properly sized object after 1752 * another. 1753 * 1754 * Notice that the allocation order determines the sizes of the per cpu 1755 * caches. Each processor has always one slab available for allocations. 1756 * Increasing the allocation order reduces the number of times that slabs 1757 * must be moved on and off the partial lists and is therefore a factor in 1758 * locking overhead. 1759 */ 1760 1761/* 1762 * Mininum / Maximum order of slab pages. This influences locking overhead 1763 * and slab fragmentation. A higher order reduces the number of partial slabs 1764 * and increases the number of allocations possible without having to 1765 * take the list_lock. 1766 */ 1767static int slub_min_order; 1768static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER; 1769static int slub_min_objects; 1770 1771/* 1772 * Merge control. If this is set then no merging of slab caches will occur. 1773 * (Could be removed. This was introduced to pacify the merge skeptics.) 1774 */ 1775static int slub_nomerge; 1776 1777/* 1778 * Calculate the order of allocation given an slab object size. 1779 * 1780 * The order of allocation has significant impact on performance and other 1781 * system components. Generally order 0 allocations should be preferred since 1782 * order 0 does not cause fragmentation in the page allocator. Larger objects 1783 * be problematic to put into order 0 slabs because there may be too much 1784 * unused space left. We go to a higher order if more than 1/16th of the slab 1785 * would be wasted. 1786 * 1787 * In order to reach satisfactory performance we must ensure that a minimum 1788 * number of objects is in one slab. Otherwise we may generate too much 1789 * activity on the partial lists which requires taking the list_lock. This is 1790 * less a concern for large slabs though which are rarely used. 1791 * 1792 * slub_max_order specifies the order where we begin to stop considering the 1793 * number of objects in a slab as critical. If we reach slub_max_order then 1794 * we try to keep the page order as low as possible. So we accept more waste 1795 * of space in favor of a small page order. 1796 * 1797 * Higher order allocations also allow the placement of more objects in a 1798 * slab and thereby reduce object handling overhead. If the user has 1799 * requested a higher mininum order then we start with that one instead of 1800 * the smallest order which will fit the object. 1801 */ 1802static inline int slab_order(int size, int min_objects, 1803 int max_order, int fract_leftover) 1804{ 1805 int order; 1806 int rem; 1807 int min_order = slub_min_order; 1808 1809 if ((PAGE_SIZE << min_order) / size > 65535) 1810 return get_order(size * 65535) - 1; 1811 1812 for (order = max(min_order, 1813 fls(min_objects * size - 1) - PAGE_SHIFT); 1814 order <= max_order; order++) { 1815 1816 unsigned long slab_size = PAGE_SIZE << order; 1817 1818 if (slab_size < min_objects * size) 1819 continue; 1820 1821 rem = slab_size % size; 1822 1823 if (rem <= slab_size / fract_leftover) 1824 break; 1825 1826 } 1827 1828 return order; 1829} 1830 1831static inline int calculate_order(int size) 1832{ 1833 int order; 1834 int min_objects; 1835 int fraction; 1836 1837 /* 1838 * Attempt to find best configuration for a slab. This 1839 * works by first attempting to generate a layout with 1840 * the best configuration and backing off gradually. 1841 * 1842 * First we reduce the acceptable waste in a slab. Then 1843 * we reduce the minimum objects required in a slab. 1844 */ 1845 min_objects = slub_min_objects; 1846 if (!min_objects) 1847 min_objects = 4 * (fls(nr_cpu_ids) + 1); 1848 while (min_objects > 1) { 1849 fraction = 16; 1850 while (fraction >= 4) { 1851 order = slab_order(size, min_objects, 1852 slub_max_order, fraction); 1853 if (order <= slub_max_order) 1854 return order; 1855 fraction /= 2; 1856 } 1857 min_objects /= 2; 1858 } 1859 1860 /* 1861 * We were unable to place multiple objects in a slab. Now 1862 * lets see if we can place a single object there. 1863 */ 1864 order = slab_order(size, 1, slub_max_order, 1); 1865 if (order <= slub_max_order) 1866 return order; 1867 1868 /* 1869 * Doh this slab cannot be placed using slub_max_order. 1870 */ 1871 order = slab_order(size, 1, MAX_ORDER, 1); 1872 if (order <= MAX_ORDER) 1873 return order; 1874 return -ENOSYS; 1875} 1876 1877/* 1878 * Figure out what the alignment of the objects will be. 1879 */ 1880static unsigned long calculate_alignment(unsigned long flags, 1881 unsigned long align, unsigned long size) 1882{ 1883 /* 1884 * If the user wants hardware cache aligned objects then follow that 1885 * suggestion if the object is sufficiently large. 1886 * 1887 * The hardware cache alignment cannot override the specified 1888 * alignment though. If that is greater then use it. 1889 */ 1890 if (flags & SLAB_HWCACHE_ALIGN) { 1891 unsigned long ralign = cache_line_size(); 1892 while (size <= ralign / 2) 1893 ralign /= 2; 1894 align = max(align, ralign); 1895 } 1896 1897 if (align < ARCH_SLAB_MINALIGN) 1898 align = ARCH_SLAB_MINALIGN; 1899 1900 return ALIGN(align, sizeof(void *)); 1901} 1902 1903static void init_kmem_cache_cpu(struct kmem_cache *s, 1904 struct kmem_cache_cpu *c) 1905{ 1906 c->page = NULL; 1907 c->freelist = NULL; 1908 c->node = 0; 1909 c->offset = s->offset / sizeof(void *); 1910 c->objsize = s->objsize; 1911#ifdef CONFIG_SLUB_STATS 1912 memset(c->stat, 0, NR_SLUB_STAT_ITEMS * sizeof(unsigned)); 1913#endif 1914} 1915 1916static void 1917init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s) 1918{ 1919 n->nr_partial = 0; 1920 1921 /* 1922 * The larger the object size is, the more pages we want on the partial 1923 * list to avoid pounding the page allocator excessively. 1924 */ 1925 n->min_partial = ilog2(s->size); 1926 if (n->min_partial < MIN_PARTIAL) 1927 n->min_partial = MIN_PARTIAL; 1928 else if (n->min_partial > MAX_PARTIAL) 1929 n->min_partial = MAX_PARTIAL; 1930 1931 spin_lock_init(&n->list_lock); 1932 INIT_LIST_HEAD(&n->partial); 1933#ifdef CONFIG_SLUB_DEBUG 1934 atomic_long_set(&n->nr_slabs, 0); 1935 INIT_LIST_HEAD(&n->full); 1936#endif 1937} 1938 1939#ifdef CONFIG_SMP 1940/* 1941 * Per cpu array for per cpu structures. 1942 * 1943 * The per cpu array places all kmem_cache_cpu structures from one processor 1944 * close together meaning that it becomes possible that multiple per cpu 1945 * structures are contained in one cacheline. This may be particularly 1946 * beneficial for the kmalloc caches. 1947 * 1948 * A desktop system typically has around 60-80 slabs. With 100 here we are 1949 * likely able to get per cpu structures for all caches from the array defined 1950 * here. We must be able to cover all kmalloc caches during bootstrap. 1951 * 1952 * If the per cpu array is exhausted then fall back to kmalloc 1953 * of individual cachelines. No sharing is possible then. 1954 */ 1955#define NR_KMEM_CACHE_CPU 100 1956 1957static DEFINE_PER_CPU(struct kmem_cache_cpu, 1958 kmem_cache_cpu)[NR_KMEM_CACHE_CPU]; 1959 1960static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free); 1961static cpumask_t kmem_cach_cpu_free_init_once = CPU_MASK_NONE; 1962 1963static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s, 1964 int cpu, gfp_t flags) 1965{ 1966 struct kmem_cache_cpu *c = per_cpu(kmem_cache_cpu_free, cpu); 1967 1968 if (c) 1969 per_cpu(kmem_cache_cpu_free, cpu) = 1970 (void *)c->freelist; 1971 else { 1972 /* Table overflow: So allocate ourselves */ 1973 c = kmalloc_node( 1974 ALIGN(sizeof(struct kmem_cache_cpu), cache_line_size()), 1975 flags, cpu_to_node(cpu)); 1976 if (!c) 1977 return NULL; 1978 } 1979 1980 init_kmem_cache_cpu(s, c); 1981 return c; 1982} 1983 1984static void free_kmem_cache_cpu(struct kmem_cache_cpu *c, int cpu) 1985{ 1986 if (c < per_cpu(kmem_cache_cpu, cpu) || 1987 c > per_cpu(kmem_cache_cpu, cpu) + NR_KMEM_CACHE_CPU) { 1988 kfree(c); 1989 return; 1990 } 1991 c->freelist = (void *)per_cpu(kmem_cache_cpu_free, cpu); 1992 per_cpu(kmem_cache_cpu_free, cpu) = c; 1993} 1994 1995static void free_kmem_cache_cpus(struct kmem_cache *s) 1996{ 1997 int cpu; 1998 1999 for_each_online_cpu(cpu) { 2000 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); 2001 2002 if (c) { 2003 s->cpu_slab[cpu] = NULL; 2004 free_kmem_cache_cpu(c, cpu); 2005 } 2006 } 2007} 2008 2009static int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags) 2010{ 2011 int cpu; 2012 2013 for_each_online_cpu(cpu) { 2014 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); 2015 2016 if (c) 2017 continue; 2018 2019 c = alloc_kmem_cache_cpu(s, cpu, flags); 2020 if (!c) { 2021 free_kmem_cache_cpus(s); 2022 return 0; 2023 } 2024 s->cpu_slab[cpu] = c; 2025 } 2026 return 1; 2027} 2028 2029/* 2030 * Initialize the per cpu array. 2031 */ 2032static void init_alloc_cpu_cpu(int cpu) 2033{ 2034 int i; 2035 2036 if (cpu_isset(cpu, kmem_cach_cpu_free_init_once)) 2037 return; 2038 2039 for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--) 2040 free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu); 2041 2042 cpu_set(cpu, kmem_cach_cpu_free_init_once); 2043} 2044 2045static void __init init_alloc_cpu(void) 2046{ 2047 int cpu; 2048 2049 for_each_online_cpu(cpu) 2050 init_alloc_cpu_cpu(cpu); 2051 } 2052 2053#else 2054static inline void free_kmem_cache_cpus(struct kmem_cache *s) {} 2055static inline void init_alloc_cpu(void) {} 2056 2057static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags) 2058{ 2059 init_kmem_cache_cpu(s, &s->cpu_slab); 2060 return 1; 2061} 2062#endif 2063 2064#ifdef CONFIG_NUMA 2065/* 2066 * No kmalloc_node yet so do it by hand. We know that this is the first 2067 * slab on the node for this slabcache. There are no concurrent accesses 2068 * possible. 2069 * 2070 * Note that this function only works on the kmalloc_node_cache 2071 * when allocating for the kmalloc_node_cache. This is used for bootstrapping 2072 * memory on a fresh node that has no slab structures yet. 2073 */ 2074static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags, 2075 int node) 2076{ 2077 struct page *page; 2078 struct kmem_cache_node *n; 2079 unsigned long flags; 2080 2081 BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node)); 2082 2083 page = new_slab(kmalloc_caches, gfpflags, node); 2084 2085 BUG_ON(!page); 2086 if (page_to_nid(page) != node) { 2087 printk(KERN_ERR "SLUB: Unable to allocate memory from " 2088 "node %d\n", node); 2089 printk(KERN_ERR "SLUB: Allocating a useless per node structure " 2090 "in order to be able to continue\n"); 2091 } 2092 2093 n = page->freelist; 2094 BUG_ON(!n); 2095 page->freelist = get_freepointer(kmalloc_caches, n); 2096 page->inuse++; 2097 kmalloc_caches->node[node] = n; 2098#ifdef CONFIG_SLUB_DEBUG 2099 init_object(kmalloc_caches, n, 1); 2100 init_tracking(kmalloc_caches, n); 2101#endif 2102 init_kmem_cache_node(n, kmalloc_caches); 2103 inc_slabs_node(kmalloc_caches, node, page->objects); 2104 2105 /* 2106 * lockdep requires consistent irq usage for each lock 2107 * so even though there cannot be a race this early in 2108 * the boot sequence, we still disable irqs. 2109 */ 2110 local_irq_save(flags); 2111 add_partial(n, page, 0); 2112 local_irq_restore(flags); 2113 return n; 2114} 2115 2116static void free_kmem_cache_nodes(struct kmem_cache *s) 2117{ 2118 int node; 2119 2120 for_each_node_state(node, N_NORMAL_MEMORY) { 2121 struct kmem_cache_node *n = s->node[node]; 2122 if (n && n != &s->local_node) 2123 kmem_cache_free(kmalloc_caches, n); 2124 s->node[node] = NULL; 2125 } 2126} 2127 2128static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) 2129{ 2130 int node; 2131 int local_node; 2132 2133 if (slab_state >= UP) 2134 local_node = page_to_nid(virt_to_page(s)); 2135 else 2136 local_node = 0; 2137 2138 for_each_node_state(node, N_NORMAL_MEMORY) { 2139 struct kmem_cache_node *n; 2140 2141 if (local_node == node) 2142 n = &s->local_node; 2143 else { 2144 if (slab_state == DOWN) { 2145 n = early_kmem_cache_node_alloc(gfpflags, 2146 node); 2147 continue; 2148 } 2149 n = kmem_cache_alloc_node(kmalloc_caches, 2150 gfpflags, node); 2151 2152 if (!n) { 2153 free_kmem_cache_nodes(s); 2154 return 0; 2155 } 2156 2157 } 2158 s->node[node] = n; 2159 init_kmem_cache_node(n, s); 2160 } 2161 return 1; 2162} 2163#else 2164static void free_kmem_cache_nodes(struct kmem_cache *s) 2165{ 2166} 2167 2168static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) 2169{ 2170 init_kmem_cache_node(&s->local_node, s); 2171 return 1; 2172} 2173#endif 2174 2175/* 2176 * calculate_sizes() determines the order and the distribution of data within 2177 * a slab object. 2178 */ 2179static int calculate_sizes(struct kmem_cache *s, int forced_order) 2180{ 2181 unsigned long flags = s->flags; 2182 unsigned long size = s->objsize; 2183 unsigned long align = s->align; 2184 int order; 2185 2186 /* 2187 * Round up object size to the next word boundary. We can only 2188 * place the free pointer at word boundaries and this determines 2189 * the possible location of the free pointer. 2190 */ 2191 size = ALIGN(size, sizeof(void *)); 2192 2193#ifdef CONFIG_SLUB_DEBUG 2194 /* 2195 * Determine if we can poison the object itself. If the user of 2196 * the slab may touch the object after free or before allocation 2197 * then we should never poison the object itself. 2198 */ 2199 if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) && 2200 !s->ctor) 2201 s->flags |= __OBJECT_POISON; 2202 else 2203 s->flags &= ~__OBJECT_POISON; 2204 2205 2206 /* 2207 * If we are Redzoning then check if there is some space between the 2208 * end of the object and the free pointer. If not then add an 2209 * additional word to have some bytes to store Redzone information. 2210 */ 2211 if ((flags & SLAB_RED_ZONE) && size == s->objsize) 2212 size += sizeof(void *); 2213#endif 2214 2215 /* 2216 * With that we have determined the number of bytes in actual use 2217 * by the object. This is the potential offset to the free pointer. 2218 */ 2219 s->inuse = size; 2220 2221 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) || 2222 s->ctor)) { 2223 /* 2224 * Relocate free pointer after the object if it is not 2225 * permitted to overwrite the first word of the object on 2226 * kmem_cache_free. 2227 * 2228 * This is the case if we do RCU, have a constructor or 2229 * destructor or are poisoning the objects. 2230 */ 2231 s->offset = size; 2232 size += sizeof(void *); 2233 } 2234 2235#ifdef CONFIG_SLUB_DEBUG 2236 if (flags & SLAB_STORE_USER) 2237 /* 2238 * Need to store information about allocs and frees after 2239 * the object. 2240 */ 2241 size += 2 * sizeof(struct track); 2242 2243 if (flags & SLAB_RED_ZONE) 2244 /* 2245 * Add some empty padding so that we can catch 2246 * overwrites from earlier objects rather than let 2247 * tracking information or the free pointer be 2248 * corrupted if an user writes before the start 2249 * of the object. 2250 */ 2251 size += sizeof(void *); 2252#endif 2253 2254 /* 2255 * Determine the alignment based on various parameters that the 2256 * user specified and the dynamic determination of cache line size 2257 * on bootup. 2258 */ 2259 align = calculate_alignment(flags, align, s->objsize); 2260 2261 /* 2262 * SLUB stores one object immediately after another beginning from 2263 * offset 0. In order to align the objects we have to simply size 2264 * each object to conform to the alignment. 2265 */ 2266 size = ALIGN(size, align); 2267 s->size = size; 2268 if (forced_order >= 0) 2269 order = forced_order; 2270 else 2271 order = calculate_order(size); 2272 2273 if (order < 0) 2274 return 0; 2275 2276 s->allocflags = 0; 2277 if (order) 2278 s->allocflags |= __GFP_COMP; 2279 2280 if (s->flags & SLAB_CACHE_DMA) 2281 s->allocflags |= SLUB_DMA; 2282 2283 if (s->flags & SLAB_RECLAIM_ACCOUNT) 2284 s->allocflags |= __GFP_RECLAIMABLE; 2285 2286 /* 2287 * Determine the number of objects per slab 2288 */ 2289 s->oo = oo_make(order, size); 2290 s->min = oo_make(get_order(size), size); 2291 if (oo_objects(s->oo) > oo_objects(s->max)) 2292 s->max = s->oo; 2293 2294 return !!oo_objects(s->oo); 2295 2296} 2297 2298static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, 2299 const char *name, size_t size, 2300 size_t align, unsigned long flags, 2301 void (*ctor)(void *)) 2302{ 2303 memset(s, 0, kmem_size); 2304 s->name = name; 2305 s->ctor = ctor; 2306 s->objsize = size; 2307 s->align = align; 2308 s->flags = kmem_cache_flags(size, flags, name, ctor); 2309 2310 if (!calculate_sizes(s, -1)) 2311 goto error; 2312 2313 s->refcount = 1; 2314#ifdef CONFIG_NUMA 2315 s->remote_node_defrag_ratio = 100; 2316#endif 2317 if (!init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA)) 2318 goto error; 2319 2320 if (alloc_kmem_cache_cpus(s, gfpflags & ~SLUB_DMA)) 2321 return 1; 2322 free_kmem_cache_nodes(s); 2323error: 2324 if (flags & SLAB_PANIC) 2325 panic("Cannot create slab %s size=%lu realsize=%u " 2326 "order=%u offset=%u flags=%lx\n", 2327 s->name, (unsigned long)size, s->size, oo_order(s->oo), 2328 s->offset, flags); 2329 return 0; 2330} 2331 2332/* 2333 * Check if a given pointer is valid 2334 */ 2335int kmem_ptr_validate(struct kmem_cache *s, const void *object) 2336{ 2337 struct page *page; 2338 2339 page = get_object_page(object); 2340 2341 if (!page || s != page->slab) 2342 /* No slab or wrong slab */ 2343 return 0; 2344 2345 if (!check_valid_pointer(s, page, object)) 2346 return 0; 2347 2348 /* 2349 * We could also check if the object is on the slabs freelist. 2350 * But this would be too expensive and it seems that the main 2351 * purpose of kmem_ptr_valid() is to check if the object belongs 2352 * to a certain slab. 2353 */ 2354 return 1; 2355} 2356EXPORT_SYMBOL(kmem_ptr_validate); 2357 2358/* 2359 * Determine the size of a slab object 2360 */ 2361unsigned int kmem_cache_size(struct kmem_cache *s) 2362{ 2363 return s->objsize; 2364} 2365EXPORT_SYMBOL(kmem_cache_size); 2366 2367const char *kmem_cache_name(struct kmem_cache *s) 2368{ 2369 return s->name; 2370} 2371EXPORT_SYMBOL(kmem_cache_name); 2372 2373static void list_slab_objects(struct kmem_cache *s, struct page *page, 2374 const char *text) 2375{ 2376#ifdef CONFIG_SLUB_DEBUG 2377 void *addr = page_address(page); 2378 void *p; 2379 DECLARE_BITMAP(map, page->objects); 2380 2381 bitmap_zero(map, page->objects); 2382 slab_err(s, page, "%s", text); 2383 slab_lock(page); 2384 for_each_free_object(p, s, page->freelist) 2385 set_bit(slab_index(p, s, addr), map); 2386 2387 for_each_object(p, s, addr, page->objects) { 2388 2389 if (!test_bit(slab_index(p, s, addr), map)) { 2390 printk(KERN_ERR "INFO: Object 0x%p @offset=%tu\n", 2391 p, p - addr); 2392 print_tracking(s, p); 2393 } 2394 } 2395 slab_unlock(page); 2396#endif 2397} 2398 2399/* 2400 * Attempt to free all partial slabs on a node. 2401 */ 2402static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) 2403{ 2404 unsigned long flags; 2405 struct page *page, *h; 2406 2407 spin_lock_irqsave(&n->list_lock, flags); 2408 list_for_each_entry_safe(page, h, &n->partial, lru) { 2409 if (!page->inuse) { 2410 list_del(&page->lru); 2411 discard_slab(s, page); 2412 n->nr_partial--; 2413 } else { 2414 list_slab_objects(s, page, 2415 "Objects remaining on kmem_cache_close()"); 2416 } 2417 } 2418 spin_unlock_irqrestore(&n->list_lock, flags); 2419} 2420 2421/* 2422 * Release all resources used by a slab cache. 2423 */ 2424static inline int kmem_cache_close(struct kmem_cache *s) 2425{ 2426 int node; 2427 2428 flush_all(s); 2429 2430 /* Attempt to free all objects */ 2431 free_kmem_cache_cpus(s); 2432 for_each_node_state(node, N_NORMAL_MEMORY) { 2433 struct kmem_cache_node *n = get_node(s, node); 2434 2435 free_partial(s, n); 2436 if (n->nr_partial || slabs_node(s, node)) 2437 return 1; 2438 } 2439 free_kmem_cache_nodes(s); 2440 return 0; 2441} 2442 2443/* 2444 * Close a cache and release the kmem_cache structure 2445 * (must be used for caches created using kmem_cache_create) 2446 */ 2447void kmem_cache_destroy(struct kmem_cache *s) 2448{ 2449 down_write(&slub_lock); 2450 s->refcount--; 2451 if (!s->refcount) { 2452 list_del(&s->list); 2453 up_write(&slub_lock); 2454 if (kmem_cache_close(s)) { 2455 printk(KERN_ERR "SLUB %s: %s called for cache that " 2456 "still has objects.\n", s->name, __func__); 2457 dump_stack(); 2458 } 2459 sysfs_slab_remove(s); 2460 } else 2461 up_write(&slub_lock); 2462} 2463EXPORT_SYMBOL(kmem_cache_destroy); 2464 2465/******************************************************************** 2466 * Kmalloc subsystem 2467 *******************************************************************/ 2468 2469struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned; 2470EXPORT_SYMBOL(kmalloc_caches); 2471 2472static int __init setup_slub_min_order(char *str) 2473{ 2474 get_option(&str, &slub_min_order); 2475 2476 return 1; 2477} 2478 2479__setup("slub_min_order=", setup_slub_min_order); 2480 2481static int __init setup_slub_max_order(char *str) 2482{ 2483 get_option(&str, &slub_max_order); 2484 2485 return 1; 2486} 2487 2488__setup("slub_max_order=", setup_slub_max_order); 2489 2490static int __init setup_slub_min_objects(char *str) 2491{ 2492 get_option(&str, &slub_min_objects); 2493 2494 return 1; 2495} 2496 2497__setup("slub_min_objects=", setup_slub_min_objects); 2498 2499static int __init setup_slub_nomerge(char *str) 2500{ 2501 slub_nomerge = 1; 2502 return 1; 2503} 2504 2505__setup("slub_nomerge", setup_slub_nomerge); 2506 2507static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s, 2508 const char *name, int size, gfp_t gfp_flags) 2509{ 2510 unsigned int flags = 0; 2511 2512 if (gfp_flags & SLUB_DMA) 2513 flags = SLAB_CACHE_DMA; 2514 2515 down_write(&slub_lock); 2516 if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, 2517 flags, NULL)) 2518 goto panic; 2519 2520 list_add(&s->list, &slab_caches); 2521 up_write(&slub_lock); 2522 if (sysfs_slab_add(s)) 2523 goto panic; 2524 return s; 2525 2526panic: 2527 panic("Creation of kmalloc slab %s size=%d failed.\n", name, size); 2528} 2529 2530#ifdef CONFIG_ZONE_DMA 2531static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1]; 2532 2533static void sysfs_add_func(struct work_struct *w) 2534{ 2535 struct kmem_cache *s; 2536 2537 down_write(&slub_lock); 2538 list_for_each_entry(s, &slab_caches, list) { 2539 if (s->flags & __SYSFS_ADD_DEFERRED) { 2540 s->flags &= ~__SYSFS_ADD_DEFERRED; 2541 sysfs_slab_add(s); 2542 } 2543 } 2544 up_write(&slub_lock); 2545} 2546 2547static DECLARE_WORK(sysfs_add_work, sysfs_add_func); 2548 2549static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags) 2550{ 2551 struct kmem_cache *s; 2552 char *text; 2553 size_t realsize; 2554 2555 s = kmalloc_caches_dma[index]; 2556 if (s) 2557 return s; 2558 2559 /* Dynamically create dma cache */ 2560 if (flags & __GFP_WAIT) 2561 down_write(&slub_lock); 2562 else { 2563 if (!down_write_trylock(&slub_lock)) 2564 goto out; 2565 } 2566 2567 if (kmalloc_caches_dma[index]) 2568 goto unlock_out; 2569 2570 realsize = kmalloc_caches[index].objsize; 2571 text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d", 2572 (unsigned int)realsize); 2573 s = kmalloc(kmem_size, flags & ~SLUB_DMA); 2574 2575 if (!s || !text || !kmem_cache_open(s, flags, text, 2576 realsize, ARCH_KMALLOC_MINALIGN, 2577 SLAB_CACHE_DMA|__SYSFS_ADD_DEFERRED, NULL)) { 2578 kfree(s); 2579 kfree(text); 2580 goto unlock_out; 2581 } 2582 2583 list_add(&s->list, &slab_caches); 2584 kmalloc_caches_dma[index] = s; 2585 2586 schedule_work(&sysfs_add_work); 2587 2588unlock_out: 2589 up_write(&slub_lock); 2590out: 2591 return kmalloc_caches_dma[index]; 2592} 2593#endif 2594 2595/* 2596 * Conversion table for small slabs sizes / 8 to the index in the 2597 * kmalloc array. This is necessary for slabs < 192 since we have non power 2598 * of two cache sizes there. The size of larger slabs can be determined using 2599 * fls. 2600 */ 2601static s8 size_index[24] = { 2602 3, /* 8 */ 2603 4, /* 16 */ 2604 5, /* 24 */ 2605 5, /* 32 */ 2606 6, /* 40 */ 2607 6, /* 48 */ 2608 6, /* 56 */ 2609 6, /* 64 */ 2610 1, /* 72 */ 2611 1, /* 80 */ 2612 1, /* 88 */ 2613 1, /* 96 */ 2614 7, /* 104 */ 2615 7, /* 112 */ 2616 7, /* 120 */ 2617 7, /* 128 */ 2618 2, /* 136 */ 2619 2, /* 144 */ 2620 2, /* 152 */ 2621 2, /* 160 */ 2622 2, /* 168 */ 2623 2, /* 176 */ 2624 2, /* 184 */ 2625 2 /* 192 */ 2626}; 2627 2628static struct kmem_cache *get_slab(size_t size, gfp_t flags) 2629{ 2630 int index; 2631 2632 if (size <= 192) { 2633 if (!size) 2634 return ZERO_SIZE_PTR; 2635 2636 index = size_index[(size - 1) / 8]; 2637 } else 2638 index = fls(size - 1); 2639 2640#ifdef CONFIG_ZONE_DMA 2641 if (unlikely((flags & SLUB_DMA))) 2642 return dma_kmalloc_cache(index, flags); 2643 2644#endif 2645 return &kmalloc_caches[index]; 2646} 2647 2648void *__kmalloc(size_t size, gfp_t flags) 2649{ 2650 struct kmem_cache *s; 2651 2652 if (unlikely(size > PAGE_SIZE)) 2653 return kmalloc_large(size, flags); 2654 2655 s = get_slab(size, flags); 2656 2657 if (unlikely(ZERO_OR_NULL_PTR(s))) 2658 return s; 2659 2660 return slab_alloc(s, flags, -1, __builtin_return_address(0)); 2661} 2662EXPORT_SYMBOL(__kmalloc); 2663 2664static void *kmalloc_large_node(size_t size, gfp_t flags, int node) 2665{ 2666 struct page *page = alloc_pages_node(node, flags | __GFP_COMP, 2667 get_order(size)); 2668 2669 if (page) 2670 return page_address(page); 2671 else 2672 return NULL; 2673} 2674 2675#ifdef CONFIG_NUMA 2676void *__kmalloc_node(size_t size, gfp_t flags, int node) 2677{ 2678 struct kmem_cache *s; 2679 2680 if (unlikely(size > PAGE_SIZE)) 2681 return kmalloc_large_node(size, flags, node); 2682 2683 s = get_slab(size, flags); 2684 2685 if (unlikely(ZERO_OR_NULL_PTR(s))) 2686 return s; 2687 2688 return slab_alloc(s, flags, node, __builtin_return_address(0)); 2689} 2690EXPORT_SYMBOL(__kmalloc_node); 2691#endif 2692 2693size_t ksize(const void *object) 2694{ 2695 struct page *page; 2696 struct kmem_cache *s; 2697 2698 if (unlikely(object == ZERO_SIZE_PTR)) 2699 return 0; 2700 2701 page = virt_to_head_page(object); 2702 2703 if (unlikely(!PageSlab(page))) { 2704 WARN_ON(!PageCompound(page)); 2705 return PAGE_SIZE << compound_order(page); 2706 } 2707 s = page->slab; 2708 2709#ifdef CONFIG_SLUB_DEBUG 2710 /* 2711 * Debugging requires use of the padding between object 2712 * and whatever may come after it. 2713 */ 2714 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) 2715 return s->objsize; 2716 2717#endif 2718 /* 2719 * If we have the need to store the freelist pointer 2720 * back there or track user information then we can 2721 * only use the space before that information. 2722 */ 2723 if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER)) 2724 return s->inuse; 2725 /* 2726 * Else we can use all the padding etc for the allocation 2727 */ 2728 return s->size; 2729} 2730 2731void kfree(const void *x) 2732{ 2733 struct page *page; 2734 void *object = (void *)x; 2735 2736 if (unlikely(ZERO_OR_NULL_PTR(x))) 2737 return; 2738 2739 page = virt_to_head_page(x); 2740 if (unlikely(!PageSlab(page))) { 2741 BUG_ON(!PageCompound(page)); 2742 put_page(page); 2743 return; 2744 } 2745 slab_free(page->slab, page, object, __builtin_return_address(0)); 2746} 2747EXPORT_SYMBOL(kfree); 2748 2749/* 2750 * kmem_cache_shrink removes empty slabs from the partial lists and sorts 2751 * the remaining slabs by the number of items in use. The slabs with the 2752 * most items in use come first. New allocations will then fill those up 2753 * and thus they can be removed from the partial lists. 2754 * 2755 * The slabs with the least items are placed last. This results in them 2756 * being allocated from last increasing the chance that the last objects 2757 * are freed in them. 2758 */ 2759int kmem_cache_shrink(struct kmem_cache *s) 2760{ 2761 int node; 2762 int i; 2763 struct kmem_cache_node *n; 2764 struct page *page; 2765 struct page *t; 2766 int objects = oo_objects(s->max); 2767 struct list_head *slabs_by_inuse = 2768 kmalloc(sizeof(struct list_head) * objects, GFP_KERNEL); 2769 unsigned long flags; 2770 2771 if (!slabs_by_inuse) 2772 return -ENOMEM; 2773 2774 flush_all(s); 2775 for_each_node_state(node, N_NORMAL_MEMORY) { 2776 n = get_node(s, node); 2777 2778 if (!n->nr_partial) 2779 continue; 2780 2781 for (i = 0; i < objects; i++) 2782 INIT_LIST_HEAD(slabs_by_inuse + i); 2783 2784 spin_lock_irqsave(&n->list_lock, flags); 2785 2786 /* 2787 * Build lists indexed by the items in use in each slab. 2788 * 2789 * Note that concurrent frees may occur while we hold the 2790 * list_lock. page->inuse here is the upper limit. 2791 */ 2792 list_for_each_entry_safe(page, t, &n->partial, lru) { 2793 if (!page->inuse && slab_trylock(page)) { 2794 /* 2795 * Must hold slab lock here because slab_free 2796 * may have freed the last object and be 2797 * waiting to release the slab. 2798 */ 2799 list_del(&page->lru); 2800 n->nr_partial--; 2801 slab_unlock(page); 2802 discard_slab(s, page); 2803 } else { 2804 list_move(&page->lru, 2805 slabs_by_inuse + page->inuse); 2806 } 2807 } 2808 2809 /* 2810 * Rebuild the partial list with the slabs filled up most 2811 * first and the least used slabs at the end. 2812 */ 2813 for (i = objects - 1; i >= 0; i--) 2814 list_splice(slabs_by_inuse + i, n->partial.prev); 2815 2816 spin_unlock_irqrestore(&n->list_lock, flags); 2817 } 2818 2819 kfree(slabs_by_inuse); 2820 return 0; 2821} 2822EXPORT_SYMBOL(kmem_cache_shrink); 2823 2824#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG) 2825static int slab_mem_going_offline_callback(void *arg) 2826{ 2827 struct kmem_cache *s; 2828 2829 down_read(&slub_lock); 2830 list_for_each_entry(s, &slab_caches, list) 2831 kmem_cache_shrink(s); 2832 up_read(&slub_lock); 2833 2834 return 0; 2835} 2836 2837static void slab_mem_offline_callback(void *arg) 2838{ 2839 struct kmem_cache_node *n; 2840 struct kmem_cache *s; 2841 struct memory_notify *marg = arg; 2842 int offline_node; 2843 2844 offline_node = marg->status_change_nid; 2845 2846 /* 2847 * If the node still has available memory. we need kmem_cache_node 2848 * for it yet. 2849 */ 2850 if (offline_node < 0) 2851 return; 2852 2853 down_read(&slub_lock); 2854 list_for_each_entry(s, &slab_caches, list) { 2855 n = get_node(s, offline_node); 2856 if (n) { 2857 /* 2858 * if n->nr_slabs > 0, slabs still exist on the node 2859 * that is going down. We were unable to free them, 2860 * and offline_pages() function shoudn't call this 2861 * callback. So, we must fail. 2862 */ 2863 BUG_ON(slabs_node(s, offline_node)); 2864 2865 s->node[offline_node] = NULL; 2866 kmem_cache_free(kmalloc_caches, n); 2867 } 2868 } 2869 up_read(&slub_lock); 2870} 2871 2872static int slab_mem_going_online_callback(void *arg) 2873{ 2874 struct kmem_cache_node *n; 2875 struct kmem_cache *s; 2876 struct memory_notify *marg = arg; 2877 int nid = marg->status_change_nid; 2878 int ret = 0; 2879 2880 /* 2881 * If the node's memory is already available, then kmem_cache_node is 2882 * already created. Nothing to do. 2883 */ 2884 if (nid < 0) 2885 return 0; 2886 2887 /* 2888 * We are bringing a node online. No memory is available yet. We must 2889 * allocate a kmem_cache_node structure in order to bring the node 2890 * online. 2891 */ 2892 down_read(&slub_lock); 2893 list_for_each_entry(s, &slab_caches, list) { 2894 /* 2895 * XXX: kmem_cache_alloc_node will fallback to other nodes 2896 * since memory is not yet available from the node that 2897 * is brought up. 2898 */ 2899 n = kmem_cache_alloc(kmalloc_caches, GFP_KERNEL); 2900 if (!n) { 2901 ret = -ENOMEM; 2902 goto out; 2903 } 2904 init_kmem_cache_node(n, s); 2905 s->node[nid] = n; 2906 } 2907out: 2908 up_read(&slub_lock); 2909 return ret; 2910} 2911 2912static int slab_memory_callback(struct notifier_block *self, 2913 unsigned long action, void *arg) 2914{ 2915 int ret = 0; 2916 2917 switch (action) { 2918 case MEM_GOING_ONLINE: 2919 ret = slab_mem_going_online_callback(arg); 2920 break; 2921 case MEM_GOING_OFFLINE: 2922 ret = slab_mem_going_offline_callback(arg); 2923 break; 2924 case MEM_OFFLINE: 2925 case MEM_CANCEL_ONLINE: 2926 slab_mem_offline_callback(arg); 2927 break; 2928 case MEM_ONLINE: 2929 case MEM_CANCEL_OFFLINE: 2930 break; 2931 } 2932 2933 ret = notifier_from_errno(ret); 2934 return ret; 2935} 2936 2937#endif /* CONFIG_MEMORY_HOTPLUG */ 2938 2939/******************************************************************** 2940 * Basic setup of slabs 2941 *******************************************************************/ 2942 2943void __init kmem_cache_init(void) 2944{ 2945 int i; 2946 int caches = 0; 2947 2948 init_alloc_cpu(); 2949 2950#ifdef CONFIG_NUMA 2951 /* 2952 * Must first have the slab cache available for the allocations of the 2953 * struct kmem_cache_node's. There is special bootstrap code in 2954 * kmem_cache_open for slab_state == DOWN. 2955 */ 2956 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node", 2957 sizeof(struct kmem_cache_node), GFP_KERNEL); 2958 kmalloc_caches[0].refcount = -1; 2959 caches++; 2960 2961 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); 2962#endif 2963 2964 /* Able to allocate the per node structures */ 2965 slab_state = PARTIAL; 2966 2967 /* Caches that are not of the two-to-the-power-of size */ 2968 if (KMALLOC_MIN_SIZE <= 64) { 2969 create_kmalloc_cache(&kmalloc_caches[1], 2970 "kmalloc-96", 96, GFP_KERNEL); 2971 caches++; 2972 create_kmalloc_cache(&kmalloc_caches[2], 2973 "kmalloc-192", 192, GFP_KERNEL); 2974 caches++; 2975 } 2976 2977 for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) { 2978 create_kmalloc_cache(&kmalloc_caches[i], 2979 "kmalloc", 1 << i, GFP_KERNEL); 2980 caches++; 2981 } 2982 2983 2984 /* 2985 * Patch up the size_index table if we have strange large alignment 2986 * requirements for the kmalloc array. This is only the case for 2987 * MIPS it seems. The standard arches will not generate any code here. 2988 * 2989 * Largest permitted alignment is 256 bytes due to the way we 2990 * handle the index determination for the smaller caches. 2991 * 2992 * Make sure that nothing crazy happens if someone starts tinkering 2993 * around with ARCH_KMALLOC_MINALIGN 2994 */ 2995 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 || 2996 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1))); 2997 2998 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) 2999 size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW; 3000 3001 if (KMALLOC_MIN_SIZE == 128) { 3002 /* 3003 * The 192 byte sized cache is not used if the alignment 3004 * is 128 byte. Redirect kmalloc to use the 256 byte cache 3005 * instead. 3006 */ 3007 for (i = 128 + 8; i <= 192; i += 8) 3008 size_index[(i - 1) / 8] = 8; 3009 } 3010 3011 slab_state = UP; 3012 3013 /* Provide the correct kmalloc names now that the caches are up */ 3014 for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) 3015 kmalloc_caches[i]. name = 3016 kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); 3017 3018#ifdef CONFIG_SMP 3019 register_cpu_notifier(&slab_notifier); 3020 kmem_size = offsetof(struct kmem_cache, cpu_slab) + 3021 nr_cpu_ids * sizeof(struct kmem_cache_cpu *); 3022#else 3023 kmem_size = sizeof(struct kmem_cache); 3024#endif 3025 3026 printk(KERN_INFO 3027 "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d," 3028 " CPUs=%d, Nodes=%d\n", 3029 caches, cache_line_size(), 3030 slub_min_order, slub_max_order, slub_min_objects, 3031 nr_cpu_ids, nr_node_ids); 3032} 3033 3034/* 3035 * Find a mergeable slab cache 3036 */ 3037static int slab_unmergeable(struct kmem_cache *s) 3038{ 3039 if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) 3040 return 1; 3041 3042 if (s->ctor) 3043 return 1; 3044 3045 /* 3046 * We may have set a slab to be unmergeable during bootstrap. 3047 */ 3048 if (s->refcount < 0) 3049 return 1; 3050 3051 return 0; 3052} 3053 3054static struct kmem_cache *find_mergeable(size_t size, 3055 size_t align, unsigned long flags, const char *name, 3056 void (*ctor)(void *)) 3057{ 3058 struct kmem_cache *s; 3059 3060 if (slub_nomerge || (flags & SLUB_NEVER_MERGE)) 3061 return NULL; 3062 3063 if (ctor) 3064 return NULL; 3065 3066 size = ALIGN(size, sizeof(void *)); 3067 align = calculate_alignment(flags, align, size); 3068 size = ALIGN(size, align); 3069 flags = kmem_cache_flags(size, flags, name, NULL); 3070 3071 list_for_each_entry(s, &slab_caches, list) { 3072 if (slab_unmergeable(s)) 3073 continue; 3074 3075 if (size > s->size) 3076 continue; 3077 3078 if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME)) 3079 continue; 3080 /* 3081 * Check if alignment is compatible. 3082 * Courtesy of Adrian Drzewiecki 3083 */ 3084 if ((s->size & ~(align - 1)) != s->size) 3085 continue; 3086 3087 if (s->size - size >= sizeof(void *)) 3088 continue; 3089 3090 return s; 3091 } 3092 return NULL; 3093} 3094 3095struct kmem_cache *kmem_cache_create(const char *name, size_t size, 3096 size_t align, unsigned long flags, void (*ctor)(void *)) 3097{ 3098 struct kmem_cache *s; 3099 3100 down_write(&slub_lock); 3101 s = find_mergeable(size, align, flags, name, ctor); 3102 if (s) { 3103 int cpu; 3104 3105 s->refcount++; 3106 /* 3107 * Adjust the object sizes so that we clear 3108 * the complete object on kzalloc. 3109 */ 3110 s->objsize = max(s->objsize, (int)size); 3111 3112 /* 3113 * And then we need to update the object size in the 3114 * per cpu structures 3115 */ 3116 for_each_online_cpu(cpu) 3117 get_cpu_slab(s, cpu)->objsize = s->objsize; 3118 3119 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); 3120 up_write(&slub_lock); 3121 3122 if (sysfs_slab_alias(s, name)) 3123 goto err; 3124 return s; 3125 } 3126 3127 s = kmalloc(kmem_size, GFP_KERNEL); 3128 if (s) { 3129 if (kmem_cache_open(s, GFP_KERNEL, name, 3130 size, align, flags, ctor)) { 3131 list_add(&s->list, &slab_caches); 3132 up_write(&slub_lock); 3133 if (sysfs_slab_add(s)) 3134 goto err; 3135 return s; 3136 } 3137 kfree(s); 3138 } 3139 up_write(&slub_lock); 3140 3141err: 3142 if (flags & SLAB_PANIC) 3143 panic("Cannot create slabcache %s\n", name); 3144 else 3145 s = NULL; 3146 return s; 3147} 3148EXPORT_SYMBOL(kmem_cache_create); 3149 3150#ifdef CONFIG_SMP 3151/* 3152 * Use the cpu notifier to insure that the cpu slabs are flushed when 3153 * necessary. 3154 */ 3155static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb, 3156 unsigned long action, void *hcpu) 3157{ 3158 long cpu = (long)hcpu; 3159 struct kmem_cache *s; 3160 unsigned long flags; 3161 3162 switch (action) { 3163 case CPU_UP_PREPARE: 3164 case CPU_UP_PREPARE_FROZEN: 3165 init_alloc_cpu_cpu(cpu); 3166 down_read(&slub_lock); 3167 list_for_each_entry(s, &slab_caches, list) 3168 s->cpu_slab[cpu] = alloc_kmem_cache_cpu(s, cpu, 3169 GFP_KERNEL); 3170 up_read(&slub_lock); 3171 break; 3172 3173 case CPU_UP_CANCELED: 3174 case CPU_UP_CANCELED_FROZEN: 3175 case CPU_DEAD: 3176 case CPU_DEAD_FROZEN: 3177 down_read(&slub_lock); 3178 list_for_each_entry(s, &slab_caches, list) { 3179 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); 3180 3181 local_irq_save(flags); 3182 __flush_cpu_slab(s, cpu); 3183 local_irq_restore(flags); 3184 free_kmem_cache_cpu(c, cpu); 3185 s->cpu_slab[cpu] = NULL; 3186 } 3187 up_read(&slub_lock); 3188 break; 3189 default: 3190 break; 3191 } 3192 return NOTIFY_OK; 3193} 3194 3195static struct notifier_block __cpuinitdata slab_notifier = { 3196 .notifier_call = slab_cpuup_callback 3197}; 3198 3199#endif 3200 3201void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) 3202{ 3203 struct kmem_cache *s; 3204 3205 if (unlikely(size > PAGE_SIZE)) 3206 return kmalloc_large(size, gfpflags); 3207 3208 s = get_slab(size, gfpflags); 3209 3210 if (unlikely(ZERO_OR_NULL_PTR(s))) 3211 return s; 3212 3213 return slab_alloc(s, gfpflags, -1, caller); 3214} 3215 3216void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, 3217 int node, void *caller) 3218{ 3219 struct kmem_cache *s; 3220 3221 if (unlikely(size > PAGE_SIZE)) 3222 return kmalloc_large_node(size, gfpflags, node); 3223 3224 s = get_slab(size, gfpflags); 3225 3226 if (unlikely(ZERO_OR_NULL_PTR(s))) 3227 return s; 3228 3229 return slab_alloc(s, gfpflags, node, caller); 3230} 3231 3232#ifdef CONFIG_SLUB_DEBUG 3233static unsigned long count_partial(struct kmem_cache_node *n, 3234 int (*get_count)(struct page *)) 3235{ 3236 unsigned long flags; 3237 unsigned long x = 0; 3238 struct page *page; 3239 3240 spin_lock_irqsave(&n->list_lock, flags); 3241 list_for_each_entry(page, &n->partial, lru) 3242 x += get_count(page); 3243 spin_unlock_irqrestore(&n->list_lock, flags); 3244 return x; 3245} 3246 3247static int count_inuse(struct page *page) 3248{ 3249 return page->inuse; 3250} 3251 3252static int count_total(struct page *page) 3253{ 3254 return page->objects; 3255} 3256 3257static int count_free(struct page *page) 3258{ 3259 return page->objects - page->inuse; 3260} 3261 3262static int validate_slab(struct kmem_cache *s, struct page *page, 3263 unsigned long *map) 3264{ 3265 void *p; 3266 void *addr = page_address(page); 3267 3268 if (!check_slab(s, page) || 3269 !on_freelist(s, page, NULL)) 3270 return 0; 3271 3272 /* Now we know that a valid freelist exists */ 3273 bitmap_zero(map, page->objects); 3274 3275 for_each_free_object(p, s, page->freelist) { 3276 set_bit(slab_index(p, s, addr), map); 3277 if (!check_object(s, page, p, 0)) 3278 return 0; 3279 } 3280 3281 for_each_object(p, s, addr, page->objects) 3282 if (!test_bit(slab_index(p, s, addr), map)) 3283 if (!check_object(s, page, p, 1)) 3284 return 0; 3285 return 1; 3286} 3287 3288static void validate_slab_slab(struct kmem_cache *s, struct page *page, 3289 unsigned long *map) 3290{ 3291 if (slab_trylock(page)) { 3292 validate_slab(s, page, map); 3293 slab_unlock(page); 3294 } else 3295 printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n", 3296 s->name, page); 3297 3298 if (s->flags & DEBUG_DEFAULT_FLAGS) { 3299 if (!PageSlubDebug(page)) 3300 printk(KERN_ERR "SLUB %s: SlubDebug not set " 3301 "on slab 0x%p\n", s->name, page); 3302 } else { 3303 if (PageSlubDebug(page)) 3304 printk(KERN_ERR "SLUB %s: SlubDebug set on " 3305 "slab 0x%p\n", s->name, page); 3306 } 3307} 3308 3309static int validate_slab_node(struct kmem_cache *s, 3310 struct kmem_cache_node *n, unsigned long *map) 3311{ 3312 unsigned long count = 0; 3313 struct page *page; 3314 unsigned long flags; 3315 3316 spin_lock_irqsave(&n->list_lock, flags); 3317 3318 list_for_each_entry(page, &n->partial, lru) { 3319 validate_slab_slab(s, page, map); 3320 count++; 3321 } 3322 if (count != n->nr_partial) 3323 printk(KERN_ERR "SLUB %s: %ld partial slabs counted but " 3324 "counter=%ld\n", s->name, count, n->nr_partial); 3325 3326 if (!(s->flags & SLAB_STORE_USER)) 3327 goto out; 3328 3329 list_for_each_entry(page, &n->full, lru) { 3330 validate_slab_slab(s, page, map); 3331 count++; 3332 } 3333 if (count != atomic_long_read(&n->nr_slabs)) 3334 printk(KERN_ERR "SLUB: %s %ld slabs counted but " 3335 "counter=%ld\n", s->name, count, 3336 atomic_long_read(&n->nr_slabs)); 3337 3338out: 3339 spin_unlock_irqrestore(&n->list_lock, flags); 3340 return count; 3341} 3342 3343static long validate_slab_cache(struct kmem_cache *s) 3344{ 3345 int node; 3346 unsigned long count = 0; 3347 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) * 3348 sizeof(unsigned long), GFP_KERNEL); 3349 3350 if (!map) 3351 return -ENOMEM; 3352 3353 flush_all(s); 3354 for_each_node_state(node, N_NORMAL_MEMORY) { 3355 struct kmem_cache_node *n = get_node(s, node); 3356 3357 count += validate_slab_node(s, n, map); 3358 } 3359 kfree(map); 3360 return count; 3361} 3362 3363#ifdef SLUB_RESILIENCY_TEST 3364static void resiliency_test(void) 3365{ 3366 u8 *p; 3367 3368 printk(KERN_ERR "SLUB resiliency testing\n"); 3369 printk(KERN_ERR "-----------------------\n"); 3370 printk(KERN_ERR "A. Corruption after allocation\n"); 3371 3372 p = kzalloc(16, GFP_KERNEL); 3373 p[16] = 0x12; 3374 printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer" 3375 " 0x12->0x%p\n\n", p + 16); 3376 3377 validate_slab_cache(kmalloc_caches + 4); 3378 3379 /* Hmmm... The next two are dangerous */ 3380 p = kzalloc(32, GFP_KERNEL); 3381 p[32 + sizeof(void *)] = 0x34; 3382 printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab" 3383 " 0x34 -> -0x%p\n", p); 3384 printk(KERN_ERR 3385 "If allocated object is overwritten then not detectable\n\n"); 3386 3387 validate_slab_cache(kmalloc_caches + 5); 3388 p = kzalloc(64, GFP_KERNEL); 3389 p += 64 + (get_cycles() & 0xff) * sizeof(void *); 3390 *p = 0x56; 3391 printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n", 3392 p); 3393 printk(KERN_ERR 3394 "If allocated object is overwritten then not detectable\n\n"); 3395 validate_slab_cache(kmalloc_caches + 6); 3396 3397 printk(KERN_ERR "\nB. Corruption after free\n"); 3398 p = kzalloc(128, GFP_KERNEL); 3399 kfree(p); 3400 *p = 0x78; 3401 printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p); 3402 validate_slab_cache(kmalloc_caches + 7); 3403 3404 p = kzalloc(256, GFP_KERNEL); 3405 kfree(p); 3406 p[50] = 0x9a; 3407 printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", 3408 p); 3409 validate_slab_cache(kmalloc_caches + 8); 3410 3411 p = kzalloc(512, GFP_KERNEL); 3412 kfree(p); 3413 p[512] = 0xab; 3414 printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p); 3415 validate_slab_cache(kmalloc_caches + 9); 3416} 3417#else 3418static void resiliency_test(void) {}; 3419#endif 3420 3421/* 3422 * Generate lists of code addresses where slabcache objects are allocated 3423 * and freed. 3424 */ 3425 3426struct location { 3427 unsigned long count; 3428 void *addr; 3429 long long sum_time; 3430 long min_time; 3431 long max_time; 3432 long min_pid; 3433 long max_pid; 3434 cpumask_t cpus; 3435 nodemask_t nodes; 3436}; 3437 3438struct loc_track { 3439 unsigned long max; 3440 unsigned long count; 3441 struct location *loc; 3442}; 3443 3444static void free_loc_track(struct loc_track *t) 3445{ 3446 if (t->max) 3447 free_pages((unsigned long)t->loc, 3448 get_order(sizeof(struct location) * t->max)); 3449} 3450 3451static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags) 3452{ 3453 struct location *l; 3454 int order; 3455 3456 order = get_order(sizeof(struct location) * max); 3457 3458 l = (void *)__get_free_pages(flags, order); 3459 if (!l) 3460 return 0; 3461 3462 if (t->count) { 3463 memcpy(l, t->loc, sizeof(struct location) * t->count); 3464 free_loc_track(t); 3465 } 3466 t->max = max; 3467 t->loc = l; 3468 return 1; 3469} 3470 3471static int add_location(struct loc_track *t, struct kmem_cache *s, 3472 const struct track *track) 3473{ 3474 long start, end, pos; 3475 struct location *l; 3476 void *caddr; 3477 unsigned long age = jiffies - track->when; 3478 3479 start = -1; 3480 end = t->count; 3481 3482 for ( ; ; ) { 3483 pos = start + (end - start + 1) / 2; 3484 3485 /* 3486 * There is nothing at "end". If we end up there 3487 * we need to add something to before end. 3488 */ 3489 if (pos == end) 3490 break; 3491 3492 caddr = t->loc[pos].addr; 3493 if (track->addr == caddr) { 3494 3495 l = &t->loc[pos]; 3496 l->count++; 3497 if (track->when) { 3498 l->sum_time += age; 3499 if (age < l->min_time) 3500 l->min_time = age; 3501 if (age > l->max_time) 3502 l->max_time = age; 3503 3504 if (track->pid < l->min_pid) 3505 l->min_pid = track->pid; 3506 if (track->pid > l->max_pid) 3507 l->max_pid = track->pid; 3508 3509 cpu_set(track->cpu, l->cpus); 3510 } 3511 node_set(page_to_nid(virt_to_page(track)), l->nodes); 3512 return 1; 3513 } 3514 3515 if (track->addr < caddr) 3516 end = pos; 3517 else 3518 start = pos; 3519 } 3520 3521 /* 3522 * Not found. Insert new tracking element. 3523 */ 3524 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC)) 3525 return 0; 3526 3527 l = t->loc + pos; 3528 if (pos < t->count) 3529 memmove(l + 1, l, 3530 (t->count - pos) * sizeof(struct location)); 3531 t->count++; 3532 l->count = 1; 3533 l->addr = track->addr; 3534 l->sum_time = age; 3535 l->min_time = age; 3536 l->max_time = age; 3537 l->min_pid = track->pid; 3538 l->max_pid = track->pid; 3539 cpus_clear(l->cpus); 3540 cpu_set(track->cpu, l->cpus); 3541 nodes_clear(l->nodes); 3542 node_set(page_to_nid(virt_to_page(track)), l->nodes); 3543 return 1; 3544} 3545 3546static void process_slab(struct loc_track *t, struct kmem_cache *s, 3547 struct page *page, enum track_item alloc) 3548{ 3549 void *addr = page_address(page); 3550 DECLARE_BITMAP(map, page->objects); 3551 void *p; 3552 3553 bitmap_zero(map, page->objects); 3554 for_each_free_object(p, s, page->freelist) 3555 set_bit(slab_index(p, s, addr), map); 3556 3557 for_each_object(p, s, addr, page->objects) 3558 if (!test_bit(slab_index(p, s, addr), map)) 3559 add_location(t, s, get_track(s, p, alloc)); 3560} 3561 3562static int list_locations(struct kmem_cache *s, char *buf, 3563 enum track_item alloc) 3564{ 3565 int len = 0; 3566 unsigned long i; 3567 struct loc_track t = { 0, 0, NULL }; 3568 int node; 3569 3570 if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location), 3571 GFP_TEMPORARY)) 3572 return sprintf(buf, "Out of memory\n"); 3573 3574 /* Push back cpu slabs */ 3575 flush_all(s); 3576 3577 for_each_node_state(node, N_NORMAL_MEMORY) { 3578 struct kmem_cache_node *n = get_node(s, node); 3579 unsigned long flags; 3580 struct page *page; 3581 3582 if (!atomic_long_read(&n->nr_slabs)) 3583 continue; 3584 3585 spin_lock_irqsave(&n->list_lock, flags); 3586 list_for_each_entry(page, &n->partial, lru) 3587 process_slab(&t, s, page, alloc); 3588 list_for_each_entry(page, &n->full, lru) 3589 process_slab(&t, s, page, alloc); 3590 spin_unlock_irqrestore(&n->list_lock, flags); 3591 } 3592 3593 for (i = 0; i < t.count; i++) { 3594 struct location *l = &t.loc[i]; 3595 3596 if (len > PAGE_SIZE - 100) 3597 break; 3598 len += sprintf(buf + len, "%7ld ", l->count); 3599 3600 if (l->addr) 3601 len += sprint_symbol(buf + len, (unsigned long)l->addr); 3602 else 3603 len += sprintf(buf + len, "<not-available>"); 3604 3605 if (l->sum_time != l->min_time) { 3606 len += sprintf(buf + len, " age=%ld/%ld/%ld", 3607 l->min_time, 3608 (long)div_u64(l->sum_time, l->count), 3609 l->max_time); 3610 } else 3611 len += sprintf(buf + len, " age=%ld", 3612 l->min_time); 3613 3614 if (l->min_pid != l->max_pid) 3615 len += sprintf(buf + len, " pid=%ld-%ld", 3616 l->min_pid, l->max_pid); 3617 else 3618 len += sprintf(buf + len, " pid=%ld", 3619 l->min_pid); 3620 3621 if (num_online_cpus() > 1 && !cpus_empty(l->cpus) && 3622 len < PAGE_SIZE - 60) { 3623 len += sprintf(buf + len, " cpus="); 3624 len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50, 3625 l->cpus); 3626 } 3627 3628 if (num_online_nodes() > 1 && !nodes_empty(l->nodes) && 3629 len < PAGE_SIZE - 60) { 3630 len += sprintf(buf + len, " nodes="); 3631 len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50, 3632 l->nodes); 3633 } 3634 3635 len += sprintf(buf + len, "\n"); 3636 } 3637 3638 free_loc_track(&t); 3639 if (!t.count) 3640 len += sprintf(buf, "No data\n"); 3641 return len; 3642} 3643 3644enum slab_stat_type { 3645 SL_ALL, /* All slabs */ 3646 SL_PARTIAL, /* Only partially allocated slabs */ 3647 SL_CPU, /* Only slabs used for cpu caches */ 3648 SL_OBJECTS, /* Determine allocated objects not slabs */ 3649 SL_TOTAL /* Determine object capacity not slabs */ 3650}; 3651 3652#define SO_ALL (1 << SL_ALL) 3653#define SO_PARTIAL (1 << SL_PARTIAL) 3654#define SO_CPU (1 << SL_CPU) 3655#define SO_OBJECTS (1 << SL_OBJECTS) 3656#define SO_TOTAL (1 << SL_TOTAL) 3657 3658static ssize_t show_slab_objects(struct kmem_cache *s, 3659 char *buf, unsigned long flags) 3660{ 3661 unsigned long total = 0; 3662 int node; 3663 int x; 3664 unsigned long *nodes; 3665 unsigned long *per_cpu; 3666 3667 nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL); 3668 if (!nodes) 3669 return -ENOMEM; 3670 per_cpu = nodes + nr_node_ids; 3671 3672 if (flags & SO_CPU) { 3673 int cpu; 3674 3675 for_each_possible_cpu(cpu) { 3676 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); 3677 3678 if (!c || c->node < 0) 3679 continue; 3680 3681 if (c->page) { 3682 if (flags & SO_TOTAL) 3683 x = c->page->objects; 3684 else if (flags & SO_OBJECTS) 3685 x = c->page->inuse; 3686 else 3687 x = 1; 3688 3689 total += x; 3690 nodes[c->node] += x; 3691 } 3692 per_cpu[c->node]++; 3693 } 3694 } 3695 3696 if (flags & SO_ALL) { 3697 for_each_node_state(node, N_NORMAL_MEMORY) { 3698 struct kmem_cache_node *n = get_node(s, node); 3699 3700 if (flags & SO_TOTAL) 3701 x = atomic_long_read(&n->total_objects); 3702 else if (flags & SO_OBJECTS) 3703 x = atomic_long_read(&n->total_objects) - 3704 count_partial(n, count_free); 3705 3706 else 3707 x = atomic_long_read(&n->nr_slabs); 3708 total += x; 3709 nodes[node] += x; 3710 } 3711 3712 } else if (flags & SO_PARTIAL) { 3713 for_each_node_state(node, N_NORMAL_MEMORY) { 3714 struct kmem_cache_node *n = get_node(s, node); 3715 3716 if (flags & SO_TOTAL) 3717 x = count_partial(n, count_total); 3718 else if (flags & SO_OBJECTS) 3719 x = count_partial(n, count_inuse); 3720 else 3721 x = n->nr_partial; 3722 total += x; 3723 nodes[node] += x; 3724 } 3725 } 3726 x = sprintf(buf, "%lu", total); 3727#ifdef CONFIG_NUMA 3728 for_each_node_state(node, N_NORMAL_MEMORY) 3729 if (nodes[node]) 3730 x += sprintf(buf + x, " N%d=%lu", 3731 node, nodes[node]); 3732#endif 3733 kfree(nodes); 3734 return x + sprintf(buf + x, "\n"); 3735} 3736 3737static int any_slab_objects(struct kmem_cache *s) 3738{ 3739 int node; 3740 3741 for_each_online_node(node) { 3742 struct kmem_cache_node *n = get_node(s, node); 3743 3744 if (!n) 3745 continue; 3746 3747 if (atomic_long_read(&n->total_objects)) 3748 return 1; 3749 } 3750 return 0; 3751} 3752 3753#define to_slab_attr(n) container_of(n, struct slab_attribute, attr) 3754#define to_slab(n) container_of(n, struct kmem_cache, kobj); 3755 3756struct slab_attribute { 3757 struct attribute attr; 3758 ssize_t (*show)(struct kmem_cache *s, char *buf); 3759 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count); 3760}; 3761 3762#define SLAB_ATTR_RO(_name) \ 3763 static struct slab_attribute _name##_attr = __ATTR_RO(_name) 3764 3765#define SLAB_ATTR(_name) \ 3766 static struct slab_attribute _name##_attr = \ 3767 __ATTR(_name, 0644, _name##_show, _name##_store) 3768 3769static ssize_t slab_size_show(struct kmem_cache *s, char *buf) 3770{ 3771 return sprintf(buf, "%d\n", s->size); 3772} 3773SLAB_ATTR_RO(slab_size); 3774 3775static ssize_t align_show(struct kmem_cache *s, char *buf) 3776{ 3777 return sprintf(buf, "%d\n", s->align); 3778} 3779SLAB_ATTR_RO(align); 3780 3781static ssize_t object_size_show(struct kmem_cache *s, char *buf) 3782{ 3783 return sprintf(buf, "%d\n", s->objsize); 3784} 3785SLAB_ATTR_RO(object_size); 3786 3787static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf) 3788{ 3789 return sprintf(buf, "%d\n", oo_objects(s->oo)); 3790} 3791SLAB_ATTR_RO(objs_per_slab); 3792 3793static ssize_t order_store(struct kmem_cache *s, 3794 const char *buf, size_t length) 3795{ 3796 unsigned long order; 3797 int err; 3798 3799 err = strict_strtoul(buf, 10, &order); 3800 if (err) 3801 return err; 3802 3803 if (order > slub_max_order || order < slub_min_order) 3804 return -EINVAL; 3805 3806 calculate_sizes(s, order); 3807 return length; 3808} 3809 3810static ssize_t order_show(struct kmem_cache *s, char *buf) 3811{ 3812 return sprintf(buf, "%d\n", oo_order(s->oo)); 3813} 3814SLAB_ATTR(order); 3815 3816static ssize_t ctor_show(struct kmem_cache *s, char *buf) 3817{ 3818 if (s->ctor) { 3819 int n = sprint_symbol(buf, (unsigned long)s->ctor); 3820 3821 return n + sprintf(buf + n, "\n"); 3822 } 3823 return 0; 3824} 3825SLAB_ATTR_RO(ctor); 3826 3827static ssize_t aliases_show(struct kmem_cache *s, char *buf) 3828{ 3829 return sprintf(buf, "%d\n", s->refcount - 1); 3830} 3831SLAB_ATTR_RO(aliases); 3832 3833static ssize_t slabs_show(struct kmem_cache *s, char *buf) 3834{ 3835 return show_slab_objects(s, buf, SO_ALL); 3836} 3837SLAB_ATTR_RO(slabs); 3838 3839static ssize_t partial_show(struct kmem_cache *s, char *buf) 3840{ 3841 return show_slab_objects(s, buf, SO_PARTIAL); 3842} 3843SLAB_ATTR_RO(partial); 3844 3845static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf) 3846{ 3847 return show_slab_objects(s, buf, SO_CPU); 3848} 3849SLAB_ATTR_RO(cpu_slabs); 3850 3851static ssize_t objects_show(struct kmem_cache *s, char *buf) 3852{ 3853 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS); 3854} 3855SLAB_ATTR_RO(objects); 3856 3857static ssize_t objects_partial_show(struct kmem_cache *s, char *buf) 3858{ 3859 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS); 3860} 3861SLAB_ATTR_RO(objects_partial); 3862 3863static ssize_t total_objects_show(struct kmem_cache *s, char *buf) 3864{ 3865 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL); 3866} 3867SLAB_ATTR_RO(total_objects); 3868 3869static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf) 3870{ 3871 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE)); 3872} 3873 3874static ssize_t sanity_checks_store(struct kmem_cache *s, 3875 const char *buf, size_t length) 3876{ 3877 s->flags &= ~SLAB_DEBUG_FREE; 3878 if (buf[0] == '1') 3879 s->flags |= SLAB_DEBUG_FREE; 3880 return length; 3881} 3882SLAB_ATTR(sanity_checks); 3883 3884static ssize_t trace_show(struct kmem_cache *s, char *buf) 3885{ 3886 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE)); 3887} 3888 3889static ssize_t trace_store(struct kmem_cache *s, const char *buf, 3890 size_t length) 3891{ 3892 s->flags &= ~SLAB_TRACE; 3893 if (buf[0] == '1') 3894 s->flags |= SLAB_TRACE; 3895 return length; 3896} 3897SLAB_ATTR(trace); 3898 3899static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) 3900{ 3901 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); 3902} 3903 3904static ssize_t reclaim_account_store(struct kmem_cache *s, 3905 const char *buf, size_t length) 3906{ 3907 s->flags &= ~SLAB_RECLAIM_ACCOUNT; 3908 if (buf[0] == '1') 3909 s->flags |= SLAB_RECLAIM_ACCOUNT; 3910 return length; 3911} 3912SLAB_ATTR(reclaim_account); 3913 3914static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) 3915{ 3916 return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); 3917} 3918SLAB_ATTR_RO(hwcache_align); 3919 3920#ifdef CONFIG_ZONE_DMA 3921static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) 3922{ 3923 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA)); 3924} 3925SLAB_ATTR_RO(cache_dma); 3926#endif 3927 3928static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) 3929{ 3930 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU)); 3931} 3932SLAB_ATTR_RO(destroy_by_rcu); 3933 3934static ssize_t red_zone_show(struct kmem_cache *s, char *buf) 3935{ 3936 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE)); 3937} 3938 3939static ssize_t red_zone_store(struct kmem_cache *s, 3940 const char *buf, size_t length) 3941{ 3942 if (any_slab_objects(s)) 3943 return -EBUSY; 3944 3945 s->flags &= ~SLAB_RED_ZONE; 3946 if (buf[0] == '1') 3947 s->flags |= SLAB_RED_ZONE; 3948 calculate_sizes(s, -1); 3949 return length; 3950} 3951SLAB_ATTR(red_zone); 3952 3953static ssize_t poison_show(struct kmem_cache *s, char *buf) 3954{ 3955 return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON)); 3956} 3957 3958static ssize_t poison_store(struct kmem_cache *s, 3959 const char *buf, size_t length) 3960{ 3961 if (any_slab_objects(s)) 3962 return -EBUSY; 3963 3964 s->flags &= ~SLAB_POISON; 3965 if (buf[0] == '1') 3966 s->flags |= SLAB_POISON; 3967 calculate_sizes(s, -1); 3968 return length; 3969} 3970SLAB_ATTR(poison); 3971 3972static ssize_t store_user_show(struct kmem_cache *s, char *buf) 3973{ 3974 return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER)); 3975} 3976 3977static ssize_t store_user_store(struct kmem_cache *s, 3978 const char *buf, size_t length) 3979{ 3980 if (any_slab_objects(s)) 3981 return -EBUSY; 3982 3983 s->flags &= ~SLAB_STORE_USER; 3984 if (buf[0] == '1') 3985 s->flags |= SLAB_STORE_USER; 3986 calculate_sizes(s, -1); 3987 return length; 3988} 3989SLAB_ATTR(store_user); 3990 3991static ssize_t validate_show(struct kmem_cache *s, char *buf) 3992{ 3993 return 0; 3994} 3995 3996static ssize_t validate_store(struct kmem_cache *s, 3997 const char *buf, size_t length) 3998{ 3999 int ret = -EINVAL; 4000 4001 if (buf[0] == '1') { 4002 ret = validate_slab_cache(s); 4003 if (ret >= 0) 4004 ret = length; 4005 } 4006 return ret; 4007} 4008SLAB_ATTR(validate); 4009 4010static ssize_t shrink_show(struct kmem_cache *s, char *buf) 4011{ 4012 return 0; 4013} 4014 4015static ssize_t shrink_store(struct kmem_cache *s, 4016 const char *buf, size_t length) 4017{ 4018 if (buf[0] == '1') { 4019 int rc = kmem_cache_shrink(s); 4020 4021 if (rc) 4022 return rc; 4023 } else 4024 return -EINVAL; 4025 return length; 4026} 4027SLAB_ATTR(shrink); 4028 4029static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf) 4030{ 4031 if (!(s->flags & SLAB_STORE_USER)) 4032 return -ENOSYS; 4033 return list_locations(s, buf, TRACK_ALLOC); 4034} 4035SLAB_ATTR_RO(alloc_calls); 4036 4037static ssize_t free_calls_show(struct kmem_cache *s, char *buf) 4038{ 4039 if (!(s->flags & SLAB_STORE_USER)) 4040 return -ENOSYS; 4041 return list_locations(s, buf, TRACK_FREE); 4042} 4043SLAB_ATTR_RO(free_calls); 4044 4045#ifdef CONFIG_NUMA 4046static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) 4047{ 4048 return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10); 4049} 4050 4051static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s, 4052 const char *buf, size_t length) 4053{ 4054 unsigned long ratio; 4055 int err; 4056 4057 err = strict_strtoul(buf, 10, &ratio); 4058 if (err) 4059 return err; 4060 4061 if (ratio < 100) 4062 s->remote_node_defrag_ratio = ratio * 10; 4063 4064 return length; 4065} 4066SLAB_ATTR(remote_node_defrag_ratio); 4067#endif 4068 4069#ifdef CONFIG_SLUB_STATS 4070static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) 4071{ 4072 unsigned long sum = 0; 4073 int cpu; 4074 int len; 4075 int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL); 4076 4077 if (!data) 4078 return -ENOMEM; 4079 4080 for_each_online_cpu(cpu) { 4081 unsigned x = get_cpu_slab(s, cpu)->stat[si]; 4082 4083 data[cpu] = x; 4084 sum += x; 4085 } 4086 4087 len = sprintf(buf, "%lu", sum); 4088 4089#ifdef CONFIG_SMP 4090 for_each_online_cpu(cpu) { 4091 if (data[cpu] && len < PAGE_SIZE - 20) 4092 len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]); 4093 } 4094#endif 4095 kfree(data); 4096 return len + sprintf(buf + len, "\n"); 4097} 4098 4099#define STAT_ATTR(si, text) \ 4100static ssize_t text##_show(struct kmem_cache *s, char *buf) \ 4101{ \ 4102 return show_stat(s, buf, si); \ 4103} \ 4104SLAB_ATTR_RO(text); \ 4105 4106STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath); 4107STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath); 4108STAT_ATTR(FREE_FASTPATH, free_fastpath); 4109STAT_ATTR(FREE_SLOWPATH, free_slowpath); 4110STAT_ATTR(FREE_FROZEN, free_frozen); 4111STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial); 4112STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial); 4113STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial); 4114STAT_ATTR(ALLOC_SLAB, alloc_slab); 4115STAT_ATTR(ALLOC_REFILL, alloc_refill); 4116STAT_ATTR(FREE_SLAB, free_slab); 4117STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush); 4118STAT_ATTR(DEACTIVATE_FULL, deactivate_full); 4119STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty); 4120STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head); 4121STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail); 4122STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees); 4123STAT_ATTR(ORDER_FALLBACK, order_fallback); 4124#endif 4125 4126static struct attribute *slab_attrs[] = { 4127 &slab_size_attr.attr, 4128 &object_size_attr.attr, 4129 &objs_per_slab_attr.attr, 4130 &order_attr.attr, 4131 &objects_attr.attr, 4132 &objects_partial_attr.attr, 4133 &total_objects_attr.attr, 4134 &slabs_attr.attr, 4135 &partial_attr.attr, 4136 &cpu_slabs_attr.attr, 4137 &ctor_attr.attr, 4138 &aliases_attr.attr, 4139 &align_attr.attr, 4140 &sanity_checks_attr.attr, 4141 &trace_attr.attr, 4142 &hwcache_align_attr.attr, 4143 &reclaim_account_attr.attr, 4144 &destroy_by_rcu_attr.attr, 4145 &red_zone_attr.attr, 4146 &poison_attr.attr, 4147 &store_user_attr.attr, 4148 &validate_attr.attr, 4149 &shrink_attr.attr, 4150 &alloc_calls_attr.attr, 4151 &free_calls_attr.attr, 4152#ifdef CONFIG_ZONE_DMA 4153 &cache_dma_attr.attr, 4154#endif 4155#ifdef CONFIG_NUMA 4156 &remote_node_defrag_ratio_attr.attr, 4157#endif 4158#ifdef CONFIG_SLUB_STATS 4159 &alloc_fastpath_attr.attr, 4160 &alloc_slowpath_attr.attr, 4161 &free_fastpath_attr.attr, 4162 &free_slowpath_attr.attr, 4163 &free_frozen_attr.attr, 4164 &free_add_partial_attr.attr, 4165 &free_remove_partial_attr.attr, 4166 &alloc_from_partial_attr.attr, 4167 &alloc_slab_attr.attr, 4168 &alloc_refill_attr.attr, 4169 &free_slab_attr.attr, 4170 &cpuslab_flush_attr.attr, 4171 &deactivate_full_attr.attr, 4172 &deactivate_empty_attr.attr, 4173 &deactivate_to_head_attr.attr, 4174 &deactivate_to_tail_attr.attr, 4175 &deactivate_remote_frees_attr.attr, 4176 &order_fallback_attr.attr, 4177#endif 4178 NULL 4179}; 4180 4181static struct attribute_group slab_attr_group = { 4182 .attrs = slab_attrs, 4183}; 4184 4185static ssize_t slab_attr_show(struct kobject *kobj, 4186 struct attribute *attr, 4187 char *buf) 4188{ 4189 struct slab_attribute *attribute; 4190 struct kmem_cache *s; 4191 int err; 4192 4193 attribute = to_slab_attr(attr); 4194 s = to_slab(kobj); 4195 4196 if (!attribute->show) 4197 return -EIO; 4198 4199 err = attribute->show(s, buf); 4200 4201 return err; 4202} 4203 4204static ssize_t slab_attr_store(struct kobject *kobj, 4205 struct attribute *attr, 4206 const char *buf, size_t len) 4207{ 4208 struct slab_attribute *attribute; 4209 struct kmem_cache *s; 4210 int err; 4211 4212 attribute = to_slab_attr(attr); 4213 s = to_slab(kobj); 4214 4215 if (!attribute->store) 4216 return -EIO; 4217 4218 err = attribute->store(s, buf, len); 4219 4220 return err; 4221} 4222 4223static void kmem_cache_release(struct kobject *kobj) 4224{ 4225 struct kmem_cache *s = to_slab(kobj); 4226 4227 kfree(s); 4228} 4229 4230static struct sysfs_ops slab_sysfs_ops = { 4231 .show = slab_attr_show, 4232 .store = slab_attr_store, 4233}; 4234 4235static struct kobj_type slab_ktype = { 4236 .sysfs_ops = &slab_sysfs_ops, 4237 .release = kmem_cache_release 4238}; 4239 4240static int uevent_filter(struct kset *kset, struct kobject *kobj) 4241{ 4242 struct kobj_type *ktype = get_ktype(kobj); 4243 4244 if (ktype == &slab_ktype) 4245 return 1; 4246 return 0; 4247} 4248 4249static struct kset_uevent_ops slab_uevent_ops = { 4250 .filter = uevent_filter, 4251}; 4252 4253static struct kset *slab_kset; 4254 4255#define ID_STR_LENGTH 64 4256 4257/* Create a unique string id for a slab cache: 4258 * 4259 * Format :[flags-]size 4260 */ 4261static char *create_unique_id(struct kmem_cache *s) 4262{ 4263 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL); 4264 char *p = name; 4265 4266 BUG_ON(!name); 4267 4268 *p++ = ':'; 4269 /* 4270 * First flags affecting slabcache operations. We will only 4271 * get here for aliasable slabs so we do not need to support 4272 * too many flags. The flags here must cover all flags that 4273 * are matched during merging to guarantee that the id is 4274 * unique. 4275 */ 4276 if (s->flags & SLAB_CACHE_DMA) 4277 *p++ = 'd'; 4278 if (s->flags & SLAB_RECLAIM_ACCOUNT) 4279 *p++ = 'a'; 4280 if (s->flags & SLAB_DEBUG_FREE) 4281 *p++ = 'F'; 4282 if (p != name + 1) 4283 *p++ = '-'; 4284 p += sprintf(p, "%07d", s->size); 4285 BUG_ON(p > name + ID_STR_LENGTH - 1); 4286 return name; 4287} 4288 4289static int sysfs_slab_add(struct kmem_cache *s) 4290{ 4291 int err; 4292 const char *name; 4293 int unmergeable; 4294 4295 if (slab_state < SYSFS) 4296 /* Defer until later */ 4297 return 0; 4298 4299 unmergeable = slab_unmergeable(s); 4300 if (unmergeable) { 4301 /* 4302 * Slabcache can never be merged so we can use the name proper. 4303 * This is typically the case for debug situations. In that 4304 * case we can catch duplicate names easily. 4305 */ 4306 sysfs_remove_link(&slab_kset->kobj, s->name); 4307 name = s->name; 4308 } else { 4309 /* 4310 * Create a unique name for the slab as a target 4311 * for the symlinks. 4312 */ 4313 name = create_unique_id(s); 4314 } 4315 4316 s->kobj.kset = slab_kset; 4317 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name); 4318 if (err) { 4319 kobject_put(&s->kobj); 4320 return err; 4321 } 4322 4323 err = sysfs_create_group(&s->kobj, &slab_attr_group); 4324 if (err) 4325 return err; 4326 kobject_uevent(&s->kobj, KOBJ_ADD); 4327 if (!unmergeable) { 4328 /* Setup first alias */ 4329 sysfs_slab_alias(s, s->name); 4330 kfree(name); 4331 } 4332 return 0; 4333} 4334 4335static void sysfs_slab_remove(struct kmem_cache *s) 4336{ 4337 kobject_uevent(&s->kobj, KOBJ_REMOVE); 4338 kobject_del(&s->kobj); 4339 kobject_put(&s->kobj); 4340} 4341 4342/* 4343 * Need to buffer aliases during bootup until sysfs becomes 4344 * available lest we loose that information. 4345 */ 4346struct saved_alias { 4347 struct kmem_cache *s; 4348 const char *name; 4349 struct saved_alias *next; 4350}; 4351 4352static struct saved_alias *alias_list; 4353 4354static int sysfs_slab_alias(struct kmem_cache *s, const char *name) 4355{ 4356 struct saved_alias *al; 4357 4358 if (slab_state == SYSFS) { 4359 /* 4360 * If we have a leftover link then remove it. 4361 */ 4362 sysfs_remove_link(&slab_kset->kobj, name); 4363 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name); 4364 } 4365 4366 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL); 4367 if (!al) 4368 return -ENOMEM; 4369 4370 al->s = s; 4371 al->name = name; 4372 al->next = alias_list; 4373 alias_list = al; 4374 return 0; 4375} 4376 4377static int __init slab_sysfs_init(void) 4378{ 4379 struct kmem_cache *s; 4380 int err; 4381 4382 slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj); 4383 if (!slab_kset) { 4384 printk(KERN_ERR "Cannot register slab subsystem.\n"); 4385 return -ENOSYS; 4386 } 4387 4388 slab_state = SYSFS; 4389 4390 list_for_each_entry(s, &slab_caches, list) { 4391 err = sysfs_slab_add(s); 4392 if (err) 4393 printk(KERN_ERR "SLUB: Unable to add boot slab %s" 4394 " to sysfs\n", s->name); 4395 } 4396 4397 while (alias_list) { 4398 struct saved_alias *al = alias_list; 4399 4400 alias_list = alias_list->next; 4401 err = sysfs_slab_alias(al->s, al->name); 4402 if (err) 4403 printk(KERN_ERR "SLUB: Unable to add boot slab alias" 4404 " %s to sysfs\n", s->name); 4405 kfree(al); 4406 } 4407 4408 resiliency_test(); 4409 return 0; 4410} 4411 4412__initcall(slab_sysfs_init); 4413#endif 4414 4415/* 4416 * The /proc/slabinfo ABI 4417 */ 4418#ifdef CONFIG_SLABINFO 4419 4420ssize_t slabinfo_write(struct file *file, const char __user *buffer, 4421 size_t count, loff_t *ppos) 4422{ 4423 return -EINVAL; 4424} 4425 4426 4427static void print_slabinfo_header(struct seq_file *m) 4428{ 4429 seq_puts(m, "slabinfo - version: 2.1\n"); 4430 seq_puts(m, "# name <active_objs> <num_objs> <objsize> " 4431 "<objperslab> <pagesperslab>"); 4432 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>"); 4433 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); 4434 seq_putc(m, '\n'); 4435} 4436 4437static void *s_start(struct seq_file *m, loff_t *pos) 4438{ 4439 loff_t n = *pos; 4440 4441 down_read(&slub_lock); 4442 if (!n) 4443 print_slabinfo_header(m); 4444 4445 return seq_list_start(&slab_caches, *pos); 4446} 4447 4448static void *s_next(struct seq_file *m, void *p, loff_t *pos) 4449{ 4450 return seq_list_next(p, &slab_caches, pos); 4451} 4452 4453static void s_stop(struct seq_file *m, void *p) 4454{ 4455 up_read(&slub_lock); 4456} 4457 4458static int s_show(struct seq_file *m, void *p) 4459{ 4460 unsigned long nr_partials = 0; 4461 unsigned long nr_slabs = 0; 4462 unsigned long nr_inuse = 0; 4463 unsigned long nr_objs = 0; 4464 unsigned long nr_free = 0; 4465 struct kmem_cache *s; 4466 int node; 4467 4468 s = list_entry(p, struct kmem_cache, list); 4469 4470 for_each_online_node(node) { 4471 struct kmem_cache_node *n = get_node(s, node); 4472 4473 if (!n) 4474 continue; 4475 4476 nr_partials += n->nr_partial; 4477 nr_slabs += atomic_long_read(&n->nr_slabs); 4478 nr_objs += atomic_long_read(&n->total_objects); 4479 nr_free += count_partial(n, count_free); 4480 } 4481 4482 nr_inuse = nr_objs - nr_free; 4483 4484 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse, 4485 nr_objs, s->size, oo_objects(s->oo), 4486 (1 << oo_order(s->oo))); 4487 seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0); 4488 seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs, 4489 0UL); 4490 seq_putc(m, '\n'); 4491 return 0; 4492} 4493 4494const struct seq_operations slabinfo_op = { 4495 .start = s_start, 4496 .next = s_next, 4497 .stop = s_stop, 4498 .show = s_show, 4499}; 4500 4501#endif /* CONFIG_SLABINFO */ 4502