slab.c revision 55935a34a428a1497e3b37982e2782c09c6f914d
1/* 2 * linux/mm/slab.c 3 * Written by Mark Hemment, 1996/97. 4 * (markhe@nextd.demon.co.uk) 5 * 6 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli 7 * 8 * Major cleanup, different bufctl logic, per-cpu arrays 9 * (c) 2000 Manfred Spraul 10 * 11 * Cleanup, make the head arrays unconditional, preparation for NUMA 12 * (c) 2002 Manfred Spraul 13 * 14 * An implementation of the Slab Allocator as described in outline in; 15 * UNIX Internals: The New Frontiers by Uresh Vahalia 16 * Pub: Prentice Hall ISBN 0-13-101908-2 17 * or with a little more detail in; 18 * The Slab Allocator: An Object-Caching Kernel Memory Allocator 19 * Jeff Bonwick (Sun Microsystems). 20 * Presented at: USENIX Summer 1994 Technical Conference 21 * 22 * The memory is organized in caches, one cache for each object type. 23 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct) 24 * Each cache consists out of many slabs (they are small (usually one 25 * page long) and always contiguous), and each slab contains multiple 26 * initialized objects. 27 * 28 * This means, that your constructor is used only for newly allocated 29 * slabs and you must pass objects with the same intializations to 30 * kmem_cache_free. 31 * 32 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM, 33 * normal). If you need a special memory type, then must create a new 34 * cache for that memory type. 35 * 36 * In order to reduce fragmentation, the slabs are sorted in 3 groups: 37 * full slabs with 0 free objects 38 * partial slabs 39 * empty slabs with no allocated objects 40 * 41 * If partial slabs exist, then new allocations come from these slabs, 42 * otherwise from empty slabs or new slabs are allocated. 43 * 44 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache 45 * during kmem_cache_destroy(). The caller must prevent concurrent allocs. 46 * 47 * Each cache has a short per-cpu head array, most allocs 48 * and frees go into that array, and if that array overflows, then 1/2 49 * of the entries in the array are given back into the global cache. 50 * The head array is strictly LIFO and should improve the cache hit rates. 51 * On SMP, it additionally reduces the spinlock operations. 52 * 53 * The c_cpuarray may not be read with enabled local interrupts - 54 * it's changed with a smp_call_function(). 55 * 56 * SMP synchronization: 57 * constructors and destructors are called without any locking. 58 * Several members in struct kmem_cache and struct slab never change, they 59 * are accessed without any locking. 60 * The per-cpu arrays are never accessed from the wrong cpu, no locking, 61 * and local interrupts are disabled so slab code is preempt-safe. 62 * The non-constant members are protected with a per-cache irq spinlock. 63 * 64 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch 65 * in 2000 - many ideas in the current implementation are derived from 66 * his patch. 67 * 68 * Further notes from the original documentation: 69 * 70 * 11 April '97. Started multi-threading - markhe 71 * The global cache-chain is protected by the mutex 'cache_chain_mutex'. 72 * The sem is only needed when accessing/extending the cache-chain, which 73 * can never happen inside an interrupt (kmem_cache_create(), 74 * kmem_cache_shrink() and kmem_cache_reap()). 75 * 76 * At present, each engine can be growing a cache. This should be blocked. 77 * 78 * 15 March 2005. NUMA slab allocator. 79 * Shai Fultheim <shai@scalex86.org>. 80 * Shobhit Dayal <shobhit@calsoftinc.com> 81 * Alok N Kataria <alokk@calsoftinc.com> 82 * Christoph Lameter <christoph@lameter.com> 83 * 84 * Modified the slab allocator to be node aware on NUMA systems. 85 * Each node has its own list of partial, free and full slabs. 86 * All object allocations for a node occur from node specific slab lists. 87 */ 88 89#include <linux/slab.h> 90#include <linux/mm.h> 91#include <linux/poison.h> 92#include <linux/swap.h> 93#include <linux/cache.h> 94#include <linux/interrupt.h> 95#include <linux/init.h> 96#include <linux/compiler.h> 97#include <linux/cpuset.h> 98#include <linux/seq_file.h> 99#include <linux/notifier.h> 100#include <linux/kallsyms.h> 101#include <linux/cpu.h> 102#include <linux/sysctl.h> 103#include <linux/module.h> 104#include <linux/rcupdate.h> 105#include <linux/string.h> 106#include <linux/uaccess.h> 107#include <linux/nodemask.h> 108#include <linux/mempolicy.h> 109#include <linux/mutex.h> 110#include <linux/fault-inject.h> 111#include <linux/rtmutex.h> 112 113#include <asm/cacheflush.h> 114#include <asm/tlbflush.h> 115#include <asm/page.h> 116 117/* 118 * DEBUG - 1 for kmem_cache_create() to honour; SLAB_DEBUG_INITIAL, 119 * SLAB_RED_ZONE & SLAB_POISON. 120 * 0 for faster, smaller code (especially in the critical paths). 121 * 122 * STATS - 1 to collect stats for /proc/slabinfo. 123 * 0 for faster, smaller code (especially in the critical paths). 124 * 125 * FORCED_DEBUG - 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible) 126 */ 127 128#ifdef CONFIG_DEBUG_SLAB 129#define DEBUG 1 130#define STATS 1 131#define FORCED_DEBUG 1 132#else 133#define DEBUG 0 134#define STATS 0 135#define FORCED_DEBUG 0 136#endif 137 138/* Shouldn't this be in a header file somewhere? */ 139#define BYTES_PER_WORD sizeof(void *) 140 141#ifndef cache_line_size 142#define cache_line_size() L1_CACHE_BYTES 143#endif 144 145#ifndef ARCH_KMALLOC_MINALIGN 146/* 147 * Enforce a minimum alignment for the kmalloc caches. 148 * Usually, the kmalloc caches are cache_line_size() aligned, except when 149 * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned. 150 * Some archs want to perform DMA into kmalloc caches and need a guaranteed 151 * alignment larger than BYTES_PER_WORD. ARCH_KMALLOC_MINALIGN allows that. 152 * Note that this flag disables some debug features. 153 */ 154#define ARCH_KMALLOC_MINALIGN 0 155#endif 156 157#ifndef ARCH_SLAB_MINALIGN 158/* 159 * Enforce a minimum alignment for all caches. 160 * Intended for archs that get misalignment faults even for BYTES_PER_WORD 161 * aligned buffers. Includes ARCH_KMALLOC_MINALIGN. 162 * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables 163 * some debug features. 164 */ 165#define ARCH_SLAB_MINALIGN 0 166#endif 167 168#ifndef ARCH_KMALLOC_FLAGS 169#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN 170#endif 171 172/* Legal flag mask for kmem_cache_create(). */ 173#if DEBUG 174# define CREATE_MASK (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \ 175 SLAB_POISON | SLAB_HWCACHE_ALIGN | \ 176 SLAB_CACHE_DMA | \ 177 SLAB_MUST_HWCACHE_ALIGN | SLAB_STORE_USER | \ 178 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ 179 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD) 180#else 181# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ 182 SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN | \ 183 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ 184 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD) 185#endif 186 187/* 188 * kmem_bufctl_t: 189 * 190 * Bufctl's are used for linking objs within a slab 191 * linked offsets. 192 * 193 * This implementation relies on "struct page" for locating the cache & 194 * slab an object belongs to. 195 * This allows the bufctl structure to be small (one int), but limits 196 * the number of objects a slab (not a cache) can contain when off-slab 197 * bufctls are used. The limit is the size of the largest general cache 198 * that does not use off-slab slabs. 199 * For 32bit archs with 4 kB pages, is this 56. 200 * This is not serious, as it is only for large objects, when it is unwise 201 * to have too many per slab. 202 * Note: This limit can be raised by introducing a general cache whose size 203 * is less than 512 (PAGE_SIZE<<3), but greater than 256. 204 */ 205 206typedef unsigned int kmem_bufctl_t; 207#define BUFCTL_END (((kmem_bufctl_t)(~0U))-0) 208#define BUFCTL_FREE (((kmem_bufctl_t)(~0U))-1) 209#define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2) 210#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3) 211 212/* 213 * struct slab 214 * 215 * Manages the objs in a slab. Placed either at the beginning of mem allocated 216 * for a slab, or allocated from an general cache. 217 * Slabs are chained into three list: fully used, partial, fully free slabs. 218 */ 219struct slab { 220 struct list_head list; 221 unsigned long colouroff; 222 void *s_mem; /* including colour offset */ 223 unsigned int inuse; /* num of objs active in slab */ 224 kmem_bufctl_t free; 225 unsigned short nodeid; 226}; 227 228/* 229 * struct slab_rcu 230 * 231 * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to 232 * arrange for kmem_freepages to be called via RCU. This is useful if 233 * we need to approach a kernel structure obliquely, from its address 234 * obtained without the usual locking. We can lock the structure to 235 * stabilize it and check it's still at the given address, only if we 236 * can be sure that the memory has not been meanwhile reused for some 237 * other kind of object (which our subsystem's lock might corrupt). 238 * 239 * rcu_read_lock before reading the address, then rcu_read_unlock after 240 * taking the spinlock within the structure expected at that address. 241 * 242 * We assume struct slab_rcu can overlay struct slab when destroying. 243 */ 244struct slab_rcu { 245 struct rcu_head head; 246 struct kmem_cache *cachep; 247 void *addr; 248}; 249 250/* 251 * struct array_cache 252 * 253 * Purpose: 254 * - LIFO ordering, to hand out cache-warm objects from _alloc 255 * - reduce the number of linked list operations 256 * - reduce spinlock operations 257 * 258 * The limit is stored in the per-cpu structure to reduce the data cache 259 * footprint. 260 * 261 */ 262struct array_cache { 263 unsigned int avail; 264 unsigned int limit; 265 unsigned int batchcount; 266 unsigned int touched; 267 spinlock_t lock; 268 void *entry[0]; /* 269 * Must have this definition in here for the proper 270 * alignment of array_cache. Also simplifies accessing 271 * the entries. 272 * [0] is for gcc 2.95. It should really be []. 273 */ 274}; 275 276/* 277 * bootstrap: The caches do not work without cpuarrays anymore, but the 278 * cpuarrays are allocated from the generic caches... 279 */ 280#define BOOT_CPUCACHE_ENTRIES 1 281struct arraycache_init { 282 struct array_cache cache; 283 void *entries[BOOT_CPUCACHE_ENTRIES]; 284}; 285 286/* 287 * The slab lists for all objects. 288 */ 289struct kmem_list3 { 290 struct list_head slabs_partial; /* partial list first, better asm code */ 291 struct list_head slabs_full; 292 struct list_head slabs_free; 293 unsigned long free_objects; 294 unsigned int free_limit; 295 unsigned int colour_next; /* Per-node cache coloring */ 296 spinlock_t list_lock; 297 struct array_cache *shared; /* shared per node */ 298 struct array_cache **alien; /* on other nodes */ 299 unsigned long next_reap; /* updated without locking */ 300 int free_touched; /* updated without locking */ 301}; 302 303/* 304 * Need this for bootstrapping a per node allocator. 305 */ 306#define NUM_INIT_LISTS (2 * MAX_NUMNODES + 1) 307struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS]; 308#define CACHE_CACHE 0 309#define SIZE_AC 1 310#define SIZE_L3 (1 + MAX_NUMNODES) 311 312static int drain_freelist(struct kmem_cache *cache, 313 struct kmem_list3 *l3, int tofree); 314static void free_block(struct kmem_cache *cachep, void **objpp, int len, 315 int node); 316static int enable_cpucache(struct kmem_cache *cachep); 317static void cache_reap(struct work_struct *unused); 318 319/* 320 * This function must be completely optimized away if a constant is passed to 321 * it. Mostly the same as what is in linux/slab.h except it returns an index. 322 */ 323static __always_inline int index_of(const size_t size) 324{ 325 extern void __bad_size(void); 326 327 if (__builtin_constant_p(size)) { 328 int i = 0; 329 330#define CACHE(x) \ 331 if (size <=x) \ 332 return i; \ 333 else \ 334 i++; 335#include "linux/kmalloc_sizes.h" 336#undef CACHE 337 __bad_size(); 338 } else 339 __bad_size(); 340 return 0; 341} 342 343static int slab_early_init = 1; 344 345#define INDEX_AC index_of(sizeof(struct arraycache_init)) 346#define INDEX_L3 index_of(sizeof(struct kmem_list3)) 347 348static void kmem_list3_init(struct kmem_list3 *parent) 349{ 350 INIT_LIST_HEAD(&parent->slabs_full); 351 INIT_LIST_HEAD(&parent->slabs_partial); 352 INIT_LIST_HEAD(&parent->slabs_free); 353 parent->shared = NULL; 354 parent->alien = NULL; 355 parent->colour_next = 0; 356 spin_lock_init(&parent->list_lock); 357 parent->free_objects = 0; 358 parent->free_touched = 0; 359} 360 361#define MAKE_LIST(cachep, listp, slab, nodeid) \ 362 do { \ 363 INIT_LIST_HEAD(listp); \ 364 list_splice(&(cachep->nodelists[nodeid]->slab), listp); \ 365 } while (0) 366 367#define MAKE_ALL_LISTS(cachep, ptr, nodeid) \ 368 do { \ 369 MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \ 370 MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \ 371 MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \ 372 } while (0) 373 374/* 375 * struct kmem_cache 376 * 377 * manages a cache. 378 */ 379 380struct kmem_cache { 381/* 1) per-cpu data, touched during every alloc/free */ 382 struct array_cache *array[NR_CPUS]; 383/* 2) Cache tunables. Protected by cache_chain_mutex */ 384 unsigned int batchcount; 385 unsigned int limit; 386 unsigned int shared; 387 388 unsigned int buffer_size; 389/* 3) touched by every alloc & free from the backend */ 390 struct kmem_list3 *nodelists[MAX_NUMNODES]; 391 392 unsigned int flags; /* constant flags */ 393 unsigned int num; /* # of objs per slab */ 394 395/* 4) cache_grow/shrink */ 396 /* order of pgs per slab (2^n) */ 397 unsigned int gfporder; 398 399 /* force GFP flags, e.g. GFP_DMA */ 400 gfp_t gfpflags; 401 402 size_t colour; /* cache colouring range */ 403 unsigned int colour_off; /* colour offset */ 404 struct kmem_cache *slabp_cache; 405 unsigned int slab_size; 406 unsigned int dflags; /* dynamic flags */ 407 408 /* constructor func */ 409 void (*ctor) (void *, struct kmem_cache *, unsigned long); 410 411 /* de-constructor func */ 412 void (*dtor) (void *, struct kmem_cache *, unsigned long); 413 414/* 5) cache creation/removal */ 415 const char *name; 416 struct list_head next; 417 418/* 6) statistics */ 419#if STATS 420 unsigned long num_active; 421 unsigned long num_allocations; 422 unsigned long high_mark; 423 unsigned long grown; 424 unsigned long reaped; 425 unsigned long errors; 426 unsigned long max_freeable; 427 unsigned long node_allocs; 428 unsigned long node_frees; 429 unsigned long node_overflow; 430 atomic_t allochit; 431 atomic_t allocmiss; 432 atomic_t freehit; 433 atomic_t freemiss; 434#endif 435#if DEBUG 436 /* 437 * If debugging is enabled, then the allocator can add additional 438 * fields and/or padding to every object. buffer_size contains the total 439 * object size including these internal fields, the following two 440 * variables contain the offset to the user object and its size. 441 */ 442 int obj_offset; 443 int obj_size; 444#endif 445}; 446 447#define CFLGS_OFF_SLAB (0x80000000UL) 448#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB) 449 450#define BATCHREFILL_LIMIT 16 451/* 452 * Optimization question: fewer reaps means less probability for unnessary 453 * cpucache drain/refill cycles. 454 * 455 * OTOH the cpuarrays can contain lots of objects, 456 * which could lock up otherwise freeable slabs. 457 */ 458#define REAPTIMEOUT_CPUC (2*HZ) 459#define REAPTIMEOUT_LIST3 (4*HZ) 460 461#if STATS 462#define STATS_INC_ACTIVE(x) ((x)->num_active++) 463#define STATS_DEC_ACTIVE(x) ((x)->num_active--) 464#define STATS_INC_ALLOCED(x) ((x)->num_allocations++) 465#define STATS_INC_GROWN(x) ((x)->grown++) 466#define STATS_ADD_REAPED(x,y) ((x)->reaped += (y)) 467#define STATS_SET_HIGH(x) \ 468 do { \ 469 if ((x)->num_active > (x)->high_mark) \ 470 (x)->high_mark = (x)->num_active; \ 471 } while (0) 472#define STATS_INC_ERR(x) ((x)->errors++) 473#define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++) 474#define STATS_INC_NODEFREES(x) ((x)->node_frees++) 475#define STATS_INC_ACOVERFLOW(x) ((x)->node_overflow++) 476#define STATS_SET_FREEABLE(x, i) \ 477 do { \ 478 if ((x)->max_freeable < i) \ 479 (x)->max_freeable = i; \ 480 } while (0) 481#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit) 482#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss) 483#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit) 484#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss) 485#else 486#define STATS_INC_ACTIVE(x) do { } while (0) 487#define STATS_DEC_ACTIVE(x) do { } while (0) 488#define STATS_INC_ALLOCED(x) do { } while (0) 489#define STATS_INC_GROWN(x) do { } while (0) 490#define STATS_ADD_REAPED(x,y) do { } while (0) 491#define STATS_SET_HIGH(x) do { } while (0) 492#define STATS_INC_ERR(x) do { } while (0) 493#define STATS_INC_NODEALLOCS(x) do { } while (0) 494#define STATS_INC_NODEFREES(x) do { } while (0) 495#define STATS_INC_ACOVERFLOW(x) do { } while (0) 496#define STATS_SET_FREEABLE(x, i) do { } while (0) 497#define STATS_INC_ALLOCHIT(x) do { } while (0) 498#define STATS_INC_ALLOCMISS(x) do { } while (0) 499#define STATS_INC_FREEHIT(x) do { } while (0) 500#define STATS_INC_FREEMISS(x) do { } while (0) 501#endif 502 503#if DEBUG 504 505/* 506 * memory layout of objects: 507 * 0 : objp 508 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that 509 * the end of an object is aligned with the end of the real 510 * allocation. Catches writes behind the end of the allocation. 511 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1: 512 * redzone word. 513 * cachep->obj_offset: The real object. 514 * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long] 515 * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address 516 * [BYTES_PER_WORD long] 517 */ 518static int obj_offset(struct kmem_cache *cachep) 519{ 520 return cachep->obj_offset; 521} 522 523static int obj_size(struct kmem_cache *cachep) 524{ 525 return cachep->obj_size; 526} 527 528static unsigned long *dbg_redzone1(struct kmem_cache *cachep, void *objp) 529{ 530 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); 531 return (unsigned long*) (objp+obj_offset(cachep)-BYTES_PER_WORD); 532} 533 534static unsigned long *dbg_redzone2(struct kmem_cache *cachep, void *objp) 535{ 536 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); 537 if (cachep->flags & SLAB_STORE_USER) 538 return (unsigned long *)(objp + cachep->buffer_size - 539 2 * BYTES_PER_WORD); 540 return (unsigned long *)(objp + cachep->buffer_size - BYTES_PER_WORD); 541} 542 543static void **dbg_userword(struct kmem_cache *cachep, void *objp) 544{ 545 BUG_ON(!(cachep->flags & SLAB_STORE_USER)); 546 return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD); 547} 548 549#else 550 551#define obj_offset(x) 0 552#define obj_size(cachep) (cachep->buffer_size) 553#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long *)NULL;}) 554#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long *)NULL;}) 555#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;}) 556 557#endif 558 559/* 560 * Maximum size of an obj (in 2^order pages) and absolute limit for the gfp 561 * order. 562 */ 563#if defined(CONFIG_LARGE_ALLOCS) 564#define MAX_OBJ_ORDER 13 /* up to 32Mb */ 565#define MAX_GFP_ORDER 13 /* up to 32Mb */ 566#elif defined(CONFIG_MMU) 567#define MAX_OBJ_ORDER 5 /* 32 pages */ 568#define MAX_GFP_ORDER 5 /* 32 pages */ 569#else 570#define MAX_OBJ_ORDER 8 /* up to 1Mb */ 571#define MAX_GFP_ORDER 8 /* up to 1Mb */ 572#endif 573 574/* 575 * Do not go above this order unless 0 objects fit into the slab. 576 */ 577#define BREAK_GFP_ORDER_HI 1 578#define BREAK_GFP_ORDER_LO 0 579static int slab_break_gfp_order = BREAK_GFP_ORDER_LO; 580 581/* 582 * Functions for storing/retrieving the cachep and or slab from the page 583 * allocator. These are used to find the slab an obj belongs to. With kfree(), 584 * these are used to find the cache which an obj belongs to. 585 */ 586static inline void page_set_cache(struct page *page, struct kmem_cache *cache) 587{ 588 page->lru.next = (struct list_head *)cache; 589} 590 591static inline struct kmem_cache *page_get_cache(struct page *page) 592{ 593 if (unlikely(PageCompound(page))) 594 page = (struct page *)page_private(page); 595 BUG_ON(!PageSlab(page)); 596 return (struct kmem_cache *)page->lru.next; 597} 598 599static inline void page_set_slab(struct page *page, struct slab *slab) 600{ 601 page->lru.prev = (struct list_head *)slab; 602} 603 604static inline struct slab *page_get_slab(struct page *page) 605{ 606 if (unlikely(PageCompound(page))) 607 page = (struct page *)page_private(page); 608 BUG_ON(!PageSlab(page)); 609 return (struct slab *)page->lru.prev; 610} 611 612static inline struct kmem_cache *virt_to_cache(const void *obj) 613{ 614 struct page *page = virt_to_page(obj); 615 return page_get_cache(page); 616} 617 618static inline struct slab *virt_to_slab(const void *obj) 619{ 620 struct page *page = virt_to_page(obj); 621 return page_get_slab(page); 622} 623 624static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab, 625 unsigned int idx) 626{ 627 return slab->s_mem + cache->buffer_size * idx; 628} 629 630static inline unsigned int obj_to_index(struct kmem_cache *cache, 631 struct slab *slab, void *obj) 632{ 633 return (unsigned)(obj - slab->s_mem) / cache->buffer_size; 634} 635 636/* 637 * These are the default caches for kmalloc. Custom caches can have other sizes. 638 */ 639struct cache_sizes malloc_sizes[] = { 640#define CACHE(x) { .cs_size = (x) }, 641#include <linux/kmalloc_sizes.h> 642 CACHE(ULONG_MAX) 643#undef CACHE 644}; 645EXPORT_SYMBOL(malloc_sizes); 646 647/* Must match cache_sizes above. Out of line to keep cache footprint low. */ 648struct cache_names { 649 char *name; 650 char *name_dma; 651}; 652 653static struct cache_names __initdata cache_names[] = { 654#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" }, 655#include <linux/kmalloc_sizes.h> 656 {NULL,} 657#undef CACHE 658}; 659 660static struct arraycache_init initarray_cache __initdata = 661 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; 662static struct arraycache_init initarray_generic = 663 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; 664 665/* internal cache of cache description objs */ 666static struct kmem_cache cache_cache = { 667 .batchcount = 1, 668 .limit = BOOT_CPUCACHE_ENTRIES, 669 .shared = 1, 670 .buffer_size = sizeof(struct kmem_cache), 671 .name = "kmem_cache", 672#if DEBUG 673 .obj_size = sizeof(struct kmem_cache), 674#endif 675}; 676 677#define BAD_ALIEN_MAGIC 0x01020304ul 678 679#ifdef CONFIG_LOCKDEP 680 681/* 682 * Slab sometimes uses the kmalloc slabs to store the slab headers 683 * for other slabs "off slab". 684 * The locking for this is tricky in that it nests within the locks 685 * of all other slabs in a few places; to deal with this special 686 * locking we put on-slab caches into a separate lock-class. 687 * 688 * We set lock class for alien array caches which are up during init. 689 * The lock annotation will be lost if all cpus of a node goes down and 690 * then comes back up during hotplug 691 */ 692static struct lock_class_key on_slab_l3_key; 693static struct lock_class_key on_slab_alc_key; 694 695static inline void init_lock_keys(void) 696 697{ 698 int q; 699 struct cache_sizes *s = malloc_sizes; 700 701 while (s->cs_size != ULONG_MAX) { 702 for_each_node(q) { 703 struct array_cache **alc; 704 int r; 705 struct kmem_list3 *l3 = s->cs_cachep->nodelists[q]; 706 if (!l3 || OFF_SLAB(s->cs_cachep)) 707 continue; 708 lockdep_set_class(&l3->list_lock, &on_slab_l3_key); 709 alc = l3->alien; 710 /* 711 * FIXME: This check for BAD_ALIEN_MAGIC 712 * should go away when common slab code is taught to 713 * work even without alien caches. 714 * Currently, non NUMA code returns BAD_ALIEN_MAGIC 715 * for alloc_alien_cache, 716 */ 717 if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC) 718 continue; 719 for_each_node(r) { 720 if (alc[r]) 721 lockdep_set_class(&alc[r]->lock, 722 &on_slab_alc_key); 723 } 724 } 725 s++; 726 } 727} 728#else 729static inline void init_lock_keys(void) 730{ 731} 732#endif 733 734/* 735 * 1. Guard access to the cache-chain. 736 * 2. Protect sanity of cpu_online_map against cpu hotplug events 737 */ 738static DEFINE_MUTEX(cache_chain_mutex); 739static struct list_head cache_chain; 740 741/* 742 * chicken and egg problem: delay the per-cpu array allocation 743 * until the general caches are up. 744 */ 745static enum { 746 NONE, 747 PARTIAL_AC, 748 PARTIAL_L3, 749 FULL 750} g_cpucache_up; 751 752/* 753 * used by boot code to determine if it can use slab based allocator 754 */ 755int slab_is_available(void) 756{ 757 return g_cpucache_up == FULL; 758} 759 760static DEFINE_PER_CPU(struct delayed_work, reap_work); 761 762static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) 763{ 764 return cachep->array[smp_processor_id()]; 765} 766 767static inline struct kmem_cache *__find_general_cachep(size_t size, 768 gfp_t gfpflags) 769{ 770 struct cache_sizes *csizep = malloc_sizes; 771 772#if DEBUG 773 /* This happens if someone tries to call 774 * kmem_cache_create(), or __kmalloc(), before 775 * the generic caches are initialized. 776 */ 777 BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL); 778#endif 779 while (size > csizep->cs_size) 780 csizep++; 781 782 /* 783 * Really subtle: The last entry with cs->cs_size==ULONG_MAX 784 * has cs_{dma,}cachep==NULL. Thus no special case 785 * for large kmalloc calls required. 786 */ 787 if (unlikely(gfpflags & GFP_DMA)) 788 return csizep->cs_dmacachep; 789 return csizep->cs_cachep; 790} 791 792static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags) 793{ 794 return __find_general_cachep(size, gfpflags); 795} 796 797static size_t slab_mgmt_size(size_t nr_objs, size_t align) 798{ 799 return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align); 800} 801 802/* 803 * Calculate the number of objects and left-over bytes for a given buffer size. 804 */ 805static void cache_estimate(unsigned long gfporder, size_t buffer_size, 806 size_t align, int flags, size_t *left_over, 807 unsigned int *num) 808{ 809 int nr_objs; 810 size_t mgmt_size; 811 size_t slab_size = PAGE_SIZE << gfporder; 812 813 /* 814 * The slab management structure can be either off the slab or 815 * on it. For the latter case, the memory allocated for a 816 * slab is used for: 817 * 818 * - The struct slab 819 * - One kmem_bufctl_t for each object 820 * - Padding to respect alignment of @align 821 * - @buffer_size bytes for each object 822 * 823 * If the slab management structure is off the slab, then the 824 * alignment will already be calculated into the size. Because 825 * the slabs are all pages aligned, the objects will be at the 826 * correct alignment when allocated. 827 */ 828 if (flags & CFLGS_OFF_SLAB) { 829 mgmt_size = 0; 830 nr_objs = slab_size / buffer_size; 831 832 if (nr_objs > SLAB_LIMIT) 833 nr_objs = SLAB_LIMIT; 834 } else { 835 /* 836 * Ignore padding for the initial guess. The padding 837 * is at most @align-1 bytes, and @buffer_size is at 838 * least @align. In the worst case, this result will 839 * be one greater than the number of objects that fit 840 * into the memory allocation when taking the padding 841 * into account. 842 */ 843 nr_objs = (slab_size - sizeof(struct slab)) / 844 (buffer_size + sizeof(kmem_bufctl_t)); 845 846 /* 847 * This calculated number will be either the right 848 * amount, or one greater than what we want. 849 */ 850 if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size 851 > slab_size) 852 nr_objs--; 853 854 if (nr_objs > SLAB_LIMIT) 855 nr_objs = SLAB_LIMIT; 856 857 mgmt_size = slab_mgmt_size(nr_objs, align); 858 } 859 *num = nr_objs; 860 *left_over = slab_size - nr_objs*buffer_size - mgmt_size; 861} 862 863#define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg) 864 865static void __slab_error(const char *function, struct kmem_cache *cachep, 866 char *msg) 867{ 868 printk(KERN_ERR "slab error in %s(): cache `%s': %s\n", 869 function, cachep->name, msg); 870 dump_stack(); 871} 872 873/* 874 * By default on NUMA we use alien caches to stage the freeing of 875 * objects allocated from other nodes. This causes massive memory 876 * inefficiencies when using fake NUMA setup to split memory into a 877 * large number of small nodes, so it can be disabled on the command 878 * line 879 */ 880 881static int use_alien_caches __read_mostly = 1; 882static int __init noaliencache_setup(char *s) 883{ 884 use_alien_caches = 0; 885 return 1; 886} 887__setup("noaliencache", noaliencache_setup); 888 889#ifdef CONFIG_NUMA 890/* 891 * Special reaping functions for NUMA systems called from cache_reap(). 892 * These take care of doing round robin flushing of alien caches (containing 893 * objects freed on different nodes from which they were allocated) and the 894 * flushing of remote pcps by calling drain_node_pages. 895 */ 896static DEFINE_PER_CPU(unsigned long, reap_node); 897 898static void init_reap_node(int cpu) 899{ 900 int node; 901 902 node = next_node(cpu_to_node(cpu), node_online_map); 903 if (node == MAX_NUMNODES) 904 node = first_node(node_online_map); 905 906 per_cpu(reap_node, cpu) = node; 907} 908 909static void next_reap_node(void) 910{ 911 int node = __get_cpu_var(reap_node); 912 913 /* 914 * Also drain per cpu pages on remote zones 915 */ 916 if (node != numa_node_id()) 917 drain_node_pages(node); 918 919 node = next_node(node, node_online_map); 920 if (unlikely(node >= MAX_NUMNODES)) 921 node = first_node(node_online_map); 922 __get_cpu_var(reap_node) = node; 923} 924 925#else 926#define init_reap_node(cpu) do { } while (0) 927#define next_reap_node(void) do { } while (0) 928#endif 929 930/* 931 * Initiate the reap timer running on the target CPU. We run at around 1 to 2Hz 932 * via the workqueue/eventd. 933 * Add the CPU number into the expiration time to minimize the possibility of 934 * the CPUs getting into lockstep and contending for the global cache chain 935 * lock. 936 */ 937static void __devinit start_cpu_timer(int cpu) 938{ 939 struct delayed_work *reap_work = &per_cpu(reap_work, cpu); 940 941 /* 942 * When this gets called from do_initcalls via cpucache_init(), 943 * init_workqueues() has already run, so keventd will be setup 944 * at that time. 945 */ 946 if (keventd_up() && reap_work->work.func == NULL) { 947 init_reap_node(cpu); 948 INIT_DELAYED_WORK(reap_work, cache_reap); 949 schedule_delayed_work_on(cpu, reap_work, 950 __round_jiffies_relative(HZ, cpu)); 951 } 952} 953 954static struct array_cache *alloc_arraycache(int node, int entries, 955 int batchcount) 956{ 957 int memsize = sizeof(void *) * entries + sizeof(struct array_cache); 958 struct array_cache *nc = NULL; 959 960 nc = kmalloc_node(memsize, GFP_KERNEL, node); 961 if (nc) { 962 nc->avail = 0; 963 nc->limit = entries; 964 nc->batchcount = batchcount; 965 nc->touched = 0; 966 spin_lock_init(&nc->lock); 967 } 968 return nc; 969} 970 971/* 972 * Transfer objects in one arraycache to another. 973 * Locking must be handled by the caller. 974 * 975 * Return the number of entries transferred. 976 */ 977static int transfer_objects(struct array_cache *to, 978 struct array_cache *from, unsigned int max) 979{ 980 /* Figure out how many entries to transfer */ 981 int nr = min(min(from->avail, max), to->limit - to->avail); 982 983 if (!nr) 984 return 0; 985 986 memcpy(to->entry + to->avail, from->entry + from->avail -nr, 987 sizeof(void *) *nr); 988 989 from->avail -= nr; 990 to->avail += nr; 991 to->touched = 1; 992 return nr; 993} 994 995#ifndef CONFIG_NUMA 996 997#define drain_alien_cache(cachep, alien) do { } while (0) 998#define reap_alien(cachep, l3) do { } while (0) 999 1000static inline struct array_cache **alloc_alien_cache(int node, int limit) 1001{ 1002 return (struct array_cache **)BAD_ALIEN_MAGIC; 1003} 1004 1005static inline void free_alien_cache(struct array_cache **ac_ptr) 1006{ 1007} 1008 1009static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) 1010{ 1011 return 0; 1012} 1013 1014static inline void *alternate_node_alloc(struct kmem_cache *cachep, 1015 gfp_t flags) 1016{ 1017 return NULL; 1018} 1019 1020static inline void *____cache_alloc_node(struct kmem_cache *cachep, 1021 gfp_t flags, int nodeid) 1022{ 1023 return NULL; 1024} 1025 1026#else /* CONFIG_NUMA */ 1027 1028static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int); 1029static void *alternate_node_alloc(struct kmem_cache *, gfp_t); 1030 1031static struct array_cache **alloc_alien_cache(int node, int limit) 1032{ 1033 struct array_cache **ac_ptr; 1034 int memsize = sizeof(void *) * MAX_NUMNODES; 1035 int i; 1036 1037 if (limit > 1) 1038 limit = 12; 1039 ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node); 1040 if (ac_ptr) { 1041 for_each_node(i) { 1042 if (i == node || !node_online(i)) { 1043 ac_ptr[i] = NULL; 1044 continue; 1045 } 1046 ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d); 1047 if (!ac_ptr[i]) { 1048 for (i--; i <= 0; i--) 1049 kfree(ac_ptr[i]); 1050 kfree(ac_ptr); 1051 return NULL; 1052 } 1053 } 1054 } 1055 return ac_ptr; 1056} 1057 1058static void free_alien_cache(struct array_cache **ac_ptr) 1059{ 1060 int i; 1061 1062 if (!ac_ptr) 1063 return; 1064 for_each_node(i) 1065 kfree(ac_ptr[i]); 1066 kfree(ac_ptr); 1067} 1068 1069static void __drain_alien_cache(struct kmem_cache *cachep, 1070 struct array_cache *ac, int node) 1071{ 1072 struct kmem_list3 *rl3 = cachep->nodelists[node]; 1073 1074 if (ac->avail) { 1075 spin_lock(&rl3->list_lock); 1076 /* 1077 * Stuff objects into the remote nodes shared array first. 1078 * That way we could avoid the overhead of putting the objects 1079 * into the free lists and getting them back later. 1080 */ 1081 if (rl3->shared) 1082 transfer_objects(rl3->shared, ac, ac->limit); 1083 1084 free_block(cachep, ac->entry, ac->avail, node); 1085 ac->avail = 0; 1086 spin_unlock(&rl3->list_lock); 1087 } 1088} 1089 1090/* 1091 * Called from cache_reap() to regularly drain alien caches round robin. 1092 */ 1093static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3) 1094{ 1095 int node = __get_cpu_var(reap_node); 1096 1097 if (l3->alien) { 1098 struct array_cache *ac = l3->alien[node]; 1099 1100 if (ac && ac->avail && spin_trylock_irq(&ac->lock)) { 1101 __drain_alien_cache(cachep, ac, node); 1102 spin_unlock_irq(&ac->lock); 1103 } 1104 } 1105} 1106 1107static void drain_alien_cache(struct kmem_cache *cachep, 1108 struct array_cache **alien) 1109{ 1110 int i = 0; 1111 struct array_cache *ac; 1112 unsigned long flags; 1113 1114 for_each_online_node(i) { 1115 ac = alien[i]; 1116 if (ac) { 1117 spin_lock_irqsave(&ac->lock, flags); 1118 __drain_alien_cache(cachep, ac, i); 1119 spin_unlock_irqrestore(&ac->lock, flags); 1120 } 1121 } 1122} 1123 1124static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) 1125{ 1126 struct slab *slabp = virt_to_slab(objp); 1127 int nodeid = slabp->nodeid; 1128 struct kmem_list3 *l3; 1129 struct array_cache *alien = NULL; 1130 int node; 1131 1132 node = numa_node_id(); 1133 1134 /* 1135 * Make sure we are not freeing a object from another node to the array 1136 * cache on this cpu. 1137 */ 1138 if (likely(slabp->nodeid == node) || unlikely(!use_alien_caches)) 1139 return 0; 1140 1141 l3 = cachep->nodelists[node]; 1142 STATS_INC_NODEFREES(cachep); 1143 if (l3->alien && l3->alien[nodeid]) { 1144 alien = l3->alien[nodeid]; 1145 spin_lock(&alien->lock); 1146 if (unlikely(alien->avail == alien->limit)) { 1147 STATS_INC_ACOVERFLOW(cachep); 1148 __drain_alien_cache(cachep, alien, nodeid); 1149 } 1150 alien->entry[alien->avail++] = objp; 1151 spin_unlock(&alien->lock); 1152 } else { 1153 spin_lock(&(cachep->nodelists[nodeid])->list_lock); 1154 free_block(cachep, &objp, 1, nodeid); 1155 spin_unlock(&(cachep->nodelists[nodeid])->list_lock); 1156 } 1157 return 1; 1158} 1159#endif 1160 1161static int __cpuinit cpuup_callback(struct notifier_block *nfb, 1162 unsigned long action, void *hcpu) 1163{ 1164 long cpu = (long)hcpu; 1165 struct kmem_cache *cachep; 1166 struct kmem_list3 *l3 = NULL; 1167 int node = cpu_to_node(cpu); 1168 int memsize = sizeof(struct kmem_list3); 1169 1170 switch (action) { 1171 case CPU_UP_PREPARE: 1172 mutex_lock(&cache_chain_mutex); 1173 /* 1174 * We need to do this right in the beginning since 1175 * alloc_arraycache's are going to use this list. 1176 * kmalloc_node allows us to add the slab to the right 1177 * kmem_list3 and not this cpu's kmem_list3 1178 */ 1179 1180 list_for_each_entry(cachep, &cache_chain, next) { 1181 /* 1182 * Set up the size64 kmemlist for cpu before we can 1183 * begin anything. Make sure some other cpu on this 1184 * node has not already allocated this 1185 */ 1186 if (!cachep->nodelists[node]) { 1187 l3 = kmalloc_node(memsize, GFP_KERNEL, node); 1188 if (!l3) 1189 goto bad; 1190 kmem_list3_init(l3); 1191 l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + 1192 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 1193 1194 /* 1195 * The l3s don't come and go as CPUs come and 1196 * go. cache_chain_mutex is sufficient 1197 * protection here. 1198 */ 1199 cachep->nodelists[node] = l3; 1200 } 1201 1202 spin_lock_irq(&cachep->nodelists[node]->list_lock); 1203 cachep->nodelists[node]->free_limit = 1204 (1 + nr_cpus_node(node)) * 1205 cachep->batchcount + cachep->num; 1206 spin_unlock_irq(&cachep->nodelists[node]->list_lock); 1207 } 1208 1209 /* 1210 * Now we can go ahead with allocating the shared arrays and 1211 * array caches 1212 */ 1213 list_for_each_entry(cachep, &cache_chain, next) { 1214 struct array_cache *nc; 1215 struct array_cache *shared; 1216 struct array_cache **alien = NULL; 1217 1218 nc = alloc_arraycache(node, cachep->limit, 1219 cachep->batchcount); 1220 if (!nc) 1221 goto bad; 1222 shared = alloc_arraycache(node, 1223 cachep->shared * cachep->batchcount, 1224 0xbaadf00d); 1225 if (!shared) 1226 goto bad; 1227 1228 if (use_alien_caches) { 1229 alien = alloc_alien_cache(node, cachep->limit); 1230 if (!alien) 1231 goto bad; 1232 } 1233 cachep->array[cpu] = nc; 1234 l3 = cachep->nodelists[node]; 1235 BUG_ON(!l3); 1236 1237 spin_lock_irq(&l3->list_lock); 1238 if (!l3->shared) { 1239 /* 1240 * We are serialised from CPU_DEAD or 1241 * CPU_UP_CANCELLED by the cpucontrol lock 1242 */ 1243 l3->shared = shared; 1244 shared = NULL; 1245 } 1246#ifdef CONFIG_NUMA 1247 if (!l3->alien) { 1248 l3->alien = alien; 1249 alien = NULL; 1250 } 1251#endif 1252 spin_unlock_irq(&l3->list_lock); 1253 kfree(shared); 1254 free_alien_cache(alien); 1255 } 1256 break; 1257 case CPU_ONLINE: 1258 mutex_unlock(&cache_chain_mutex); 1259 start_cpu_timer(cpu); 1260 break; 1261#ifdef CONFIG_HOTPLUG_CPU 1262 case CPU_DOWN_PREPARE: 1263 mutex_lock(&cache_chain_mutex); 1264 break; 1265 case CPU_DOWN_FAILED: 1266 mutex_unlock(&cache_chain_mutex); 1267 break; 1268 case CPU_DEAD: 1269 /* 1270 * Even if all the cpus of a node are down, we don't free the 1271 * kmem_list3 of any cache. This to avoid a race between 1272 * cpu_down, and a kmalloc allocation from another cpu for 1273 * memory from the node of the cpu going down. The list3 1274 * structure is usually allocated from kmem_cache_create() and 1275 * gets destroyed at kmem_cache_destroy(). 1276 */ 1277 /* fall thru */ 1278#endif 1279 case CPU_UP_CANCELED: 1280 list_for_each_entry(cachep, &cache_chain, next) { 1281 struct array_cache *nc; 1282 struct array_cache *shared; 1283 struct array_cache **alien; 1284 cpumask_t mask; 1285 1286 mask = node_to_cpumask(node); 1287 /* cpu is dead; no one can alloc from it. */ 1288 nc = cachep->array[cpu]; 1289 cachep->array[cpu] = NULL; 1290 l3 = cachep->nodelists[node]; 1291 1292 if (!l3) 1293 goto free_array_cache; 1294 1295 spin_lock_irq(&l3->list_lock); 1296 1297 /* Free limit for this kmem_list3 */ 1298 l3->free_limit -= cachep->batchcount; 1299 if (nc) 1300 free_block(cachep, nc->entry, nc->avail, node); 1301 1302 if (!cpus_empty(mask)) { 1303 spin_unlock_irq(&l3->list_lock); 1304 goto free_array_cache; 1305 } 1306 1307 shared = l3->shared; 1308 if (shared) { 1309 free_block(cachep, l3->shared->entry, 1310 l3->shared->avail, node); 1311 l3->shared = NULL; 1312 } 1313 1314 alien = l3->alien; 1315 l3->alien = NULL; 1316 1317 spin_unlock_irq(&l3->list_lock); 1318 1319 kfree(shared); 1320 if (alien) { 1321 drain_alien_cache(cachep, alien); 1322 free_alien_cache(alien); 1323 } 1324free_array_cache: 1325 kfree(nc); 1326 } 1327 /* 1328 * In the previous loop, all the objects were freed to 1329 * the respective cache's slabs, now we can go ahead and 1330 * shrink each nodelist to its limit. 1331 */ 1332 list_for_each_entry(cachep, &cache_chain, next) { 1333 l3 = cachep->nodelists[node]; 1334 if (!l3) 1335 continue; 1336 drain_freelist(cachep, l3, l3->free_objects); 1337 } 1338 mutex_unlock(&cache_chain_mutex); 1339 break; 1340 } 1341 return NOTIFY_OK; 1342bad: 1343 return NOTIFY_BAD; 1344} 1345 1346static struct notifier_block __cpuinitdata cpucache_notifier = { 1347 &cpuup_callback, NULL, 0 1348}; 1349 1350/* 1351 * swap the static kmem_list3 with kmalloced memory 1352 */ 1353static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list, 1354 int nodeid) 1355{ 1356 struct kmem_list3 *ptr; 1357 1358 ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, nodeid); 1359 BUG_ON(!ptr); 1360 1361 local_irq_disable(); 1362 memcpy(ptr, list, sizeof(struct kmem_list3)); 1363 /* 1364 * Do not assume that spinlocks can be initialized via memcpy: 1365 */ 1366 spin_lock_init(&ptr->list_lock); 1367 1368 MAKE_ALL_LISTS(cachep, ptr, nodeid); 1369 cachep->nodelists[nodeid] = ptr; 1370 local_irq_enable(); 1371} 1372 1373/* 1374 * Initialisation. Called after the page allocator have been initialised and 1375 * before smp_init(). 1376 */ 1377void __init kmem_cache_init(void) 1378{ 1379 size_t left_over; 1380 struct cache_sizes *sizes; 1381 struct cache_names *names; 1382 int i; 1383 int order; 1384 int node; 1385 1386 for (i = 0; i < NUM_INIT_LISTS; i++) { 1387 kmem_list3_init(&initkmem_list3[i]); 1388 if (i < MAX_NUMNODES) 1389 cache_cache.nodelists[i] = NULL; 1390 } 1391 1392 /* 1393 * Fragmentation resistance on low memory - only use bigger 1394 * page orders on machines with more than 32MB of memory. 1395 */ 1396 if (num_physpages > (32 << 20) >> PAGE_SHIFT) 1397 slab_break_gfp_order = BREAK_GFP_ORDER_HI; 1398 1399 /* Bootstrap is tricky, because several objects are allocated 1400 * from caches that do not exist yet: 1401 * 1) initialize the cache_cache cache: it contains the struct 1402 * kmem_cache structures of all caches, except cache_cache itself: 1403 * cache_cache is statically allocated. 1404 * Initially an __init data area is used for the head array and the 1405 * kmem_list3 structures, it's replaced with a kmalloc allocated 1406 * array at the end of the bootstrap. 1407 * 2) Create the first kmalloc cache. 1408 * The struct kmem_cache for the new cache is allocated normally. 1409 * An __init data area is used for the head array. 1410 * 3) Create the remaining kmalloc caches, with minimally sized 1411 * head arrays. 1412 * 4) Replace the __init data head arrays for cache_cache and the first 1413 * kmalloc cache with kmalloc allocated arrays. 1414 * 5) Replace the __init data for kmem_list3 for cache_cache and 1415 * the other cache's with kmalloc allocated memory. 1416 * 6) Resize the head arrays of the kmalloc caches to their final sizes. 1417 */ 1418 1419 node = numa_node_id(); 1420 1421 /* 1) create the cache_cache */ 1422 INIT_LIST_HEAD(&cache_chain); 1423 list_add(&cache_cache.next, &cache_chain); 1424 cache_cache.colour_off = cache_line_size(); 1425 cache_cache.array[smp_processor_id()] = &initarray_cache.cache; 1426 cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE]; 1427 1428 cache_cache.buffer_size = ALIGN(cache_cache.buffer_size, 1429 cache_line_size()); 1430 1431 for (order = 0; order < MAX_ORDER; order++) { 1432 cache_estimate(order, cache_cache.buffer_size, 1433 cache_line_size(), 0, &left_over, &cache_cache.num); 1434 if (cache_cache.num) 1435 break; 1436 } 1437 BUG_ON(!cache_cache.num); 1438 cache_cache.gfporder = order; 1439 cache_cache.colour = left_over / cache_cache.colour_off; 1440 cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + 1441 sizeof(struct slab), cache_line_size()); 1442 1443 /* 2+3) create the kmalloc caches */ 1444 sizes = malloc_sizes; 1445 names = cache_names; 1446 1447 /* 1448 * Initialize the caches that provide memory for the array cache and the 1449 * kmem_list3 structures first. Without this, further allocations will 1450 * bug. 1451 */ 1452 1453 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name, 1454 sizes[INDEX_AC].cs_size, 1455 ARCH_KMALLOC_MINALIGN, 1456 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1457 NULL, NULL); 1458 1459 if (INDEX_AC != INDEX_L3) { 1460 sizes[INDEX_L3].cs_cachep = 1461 kmem_cache_create(names[INDEX_L3].name, 1462 sizes[INDEX_L3].cs_size, 1463 ARCH_KMALLOC_MINALIGN, 1464 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1465 NULL, NULL); 1466 } 1467 1468 slab_early_init = 0; 1469 1470 while (sizes->cs_size != ULONG_MAX) { 1471 /* 1472 * For performance, all the general caches are L1 aligned. 1473 * This should be particularly beneficial on SMP boxes, as it 1474 * eliminates "false sharing". 1475 * Note for systems short on memory removing the alignment will 1476 * allow tighter packing of the smaller caches. 1477 */ 1478 if (!sizes->cs_cachep) { 1479 sizes->cs_cachep = kmem_cache_create(names->name, 1480 sizes->cs_size, 1481 ARCH_KMALLOC_MINALIGN, 1482 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1483 NULL, NULL); 1484 } 1485 1486 sizes->cs_dmacachep = kmem_cache_create(names->name_dma, 1487 sizes->cs_size, 1488 ARCH_KMALLOC_MINALIGN, 1489 ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| 1490 SLAB_PANIC, 1491 NULL, NULL); 1492 sizes++; 1493 names++; 1494 } 1495 /* 4) Replace the bootstrap head arrays */ 1496 { 1497 struct array_cache *ptr; 1498 1499 ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); 1500 1501 local_irq_disable(); 1502 BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache); 1503 memcpy(ptr, cpu_cache_get(&cache_cache), 1504 sizeof(struct arraycache_init)); 1505 /* 1506 * Do not assume that spinlocks can be initialized via memcpy: 1507 */ 1508 spin_lock_init(&ptr->lock); 1509 1510 cache_cache.array[smp_processor_id()] = ptr; 1511 local_irq_enable(); 1512 1513 ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); 1514 1515 local_irq_disable(); 1516 BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep) 1517 != &initarray_generic.cache); 1518 memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep), 1519 sizeof(struct arraycache_init)); 1520 /* 1521 * Do not assume that spinlocks can be initialized via memcpy: 1522 */ 1523 spin_lock_init(&ptr->lock); 1524 1525 malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] = 1526 ptr; 1527 local_irq_enable(); 1528 } 1529 /* 5) Replace the bootstrap kmem_list3's */ 1530 { 1531 int nid; 1532 1533 /* Replace the static kmem_list3 structures for the boot cpu */ 1534 init_list(&cache_cache, &initkmem_list3[CACHE_CACHE], node); 1535 1536 for_each_online_node(nid) { 1537 init_list(malloc_sizes[INDEX_AC].cs_cachep, 1538 &initkmem_list3[SIZE_AC + nid], nid); 1539 1540 if (INDEX_AC != INDEX_L3) { 1541 init_list(malloc_sizes[INDEX_L3].cs_cachep, 1542 &initkmem_list3[SIZE_L3 + nid], nid); 1543 } 1544 } 1545 } 1546 1547 /* 6) resize the head arrays to their final sizes */ 1548 { 1549 struct kmem_cache *cachep; 1550 mutex_lock(&cache_chain_mutex); 1551 list_for_each_entry(cachep, &cache_chain, next) 1552 if (enable_cpucache(cachep)) 1553 BUG(); 1554 mutex_unlock(&cache_chain_mutex); 1555 } 1556 1557 /* Annotate slab for lockdep -- annotate the malloc caches */ 1558 init_lock_keys(); 1559 1560 1561 /* Done! */ 1562 g_cpucache_up = FULL; 1563 1564 /* 1565 * Register a cpu startup notifier callback that initializes 1566 * cpu_cache_get for all new cpus 1567 */ 1568 register_cpu_notifier(&cpucache_notifier); 1569 1570 /* 1571 * The reap timers are started later, with a module init call: That part 1572 * of the kernel is not yet operational. 1573 */ 1574} 1575 1576static int __init cpucache_init(void) 1577{ 1578 int cpu; 1579 1580 /* 1581 * Register the timers that return unneeded pages to the page allocator 1582 */ 1583 for_each_online_cpu(cpu) 1584 start_cpu_timer(cpu); 1585 return 0; 1586} 1587__initcall(cpucache_init); 1588 1589/* 1590 * Interface to system's page allocator. No need to hold the cache-lock. 1591 * 1592 * If we requested dmaable memory, we will get it. Even if we 1593 * did not request dmaable memory, we might get it, but that 1594 * would be relatively rare and ignorable. 1595 */ 1596static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) 1597{ 1598 struct page *page; 1599 int nr_pages; 1600 int i; 1601 1602#ifndef CONFIG_MMU 1603 /* 1604 * Nommu uses slab's for process anonymous memory allocations, and thus 1605 * requires __GFP_COMP to properly refcount higher order allocations 1606 */ 1607 flags |= __GFP_COMP; 1608#endif 1609 1610 flags |= cachep->gfpflags; 1611 1612 page = alloc_pages_node(nodeid, flags, cachep->gfporder); 1613 if (!page) 1614 return NULL; 1615 1616 nr_pages = (1 << cachep->gfporder); 1617 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 1618 add_zone_page_state(page_zone(page), 1619 NR_SLAB_RECLAIMABLE, nr_pages); 1620 else 1621 add_zone_page_state(page_zone(page), 1622 NR_SLAB_UNRECLAIMABLE, nr_pages); 1623 for (i = 0; i < nr_pages; i++) 1624 __SetPageSlab(page + i); 1625 return page_address(page); 1626} 1627 1628/* 1629 * Interface to system's page release. 1630 */ 1631static void kmem_freepages(struct kmem_cache *cachep, void *addr) 1632{ 1633 unsigned long i = (1 << cachep->gfporder); 1634 struct page *page = virt_to_page(addr); 1635 const unsigned long nr_freed = i; 1636 1637 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 1638 sub_zone_page_state(page_zone(page), 1639 NR_SLAB_RECLAIMABLE, nr_freed); 1640 else 1641 sub_zone_page_state(page_zone(page), 1642 NR_SLAB_UNRECLAIMABLE, nr_freed); 1643 while (i--) { 1644 BUG_ON(!PageSlab(page)); 1645 __ClearPageSlab(page); 1646 page++; 1647 } 1648 if (current->reclaim_state) 1649 current->reclaim_state->reclaimed_slab += nr_freed; 1650 free_pages((unsigned long)addr, cachep->gfporder); 1651} 1652 1653static void kmem_rcu_free(struct rcu_head *head) 1654{ 1655 struct slab_rcu *slab_rcu = (struct slab_rcu *)head; 1656 struct kmem_cache *cachep = slab_rcu->cachep; 1657 1658 kmem_freepages(cachep, slab_rcu->addr); 1659 if (OFF_SLAB(cachep)) 1660 kmem_cache_free(cachep->slabp_cache, slab_rcu); 1661} 1662 1663#if DEBUG 1664 1665#ifdef CONFIG_DEBUG_PAGEALLOC 1666static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr, 1667 unsigned long caller) 1668{ 1669 int size = obj_size(cachep); 1670 1671 addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)]; 1672 1673 if (size < 5 * sizeof(unsigned long)) 1674 return; 1675 1676 *addr++ = 0x12345678; 1677 *addr++ = caller; 1678 *addr++ = smp_processor_id(); 1679 size -= 3 * sizeof(unsigned long); 1680 { 1681 unsigned long *sptr = &caller; 1682 unsigned long svalue; 1683 1684 while (!kstack_end(sptr)) { 1685 svalue = *sptr++; 1686 if (kernel_text_address(svalue)) { 1687 *addr++ = svalue; 1688 size -= sizeof(unsigned long); 1689 if (size <= sizeof(unsigned long)) 1690 break; 1691 } 1692 } 1693 1694 } 1695 *addr++ = 0x87654321; 1696} 1697#endif 1698 1699static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val) 1700{ 1701 int size = obj_size(cachep); 1702 addr = &((char *)addr)[obj_offset(cachep)]; 1703 1704 memset(addr, val, size); 1705 *(unsigned char *)(addr + size - 1) = POISON_END; 1706} 1707 1708static void dump_line(char *data, int offset, int limit) 1709{ 1710 int i; 1711 unsigned char error = 0; 1712 int bad_count = 0; 1713 1714 printk(KERN_ERR "%03x:", offset); 1715 for (i = 0; i < limit; i++) { 1716 if (data[offset + i] != POISON_FREE) { 1717 error = data[offset + i]; 1718 bad_count++; 1719 } 1720 printk(" %02x", (unsigned char)data[offset + i]); 1721 } 1722 printk("\n"); 1723 1724 if (bad_count == 1) { 1725 error ^= POISON_FREE; 1726 if (!(error & (error - 1))) { 1727 printk(KERN_ERR "Single bit error detected. Probably " 1728 "bad RAM.\n"); 1729#ifdef CONFIG_X86 1730 printk(KERN_ERR "Run memtest86+ or a similar memory " 1731 "test tool.\n"); 1732#else 1733 printk(KERN_ERR "Run a memory test tool.\n"); 1734#endif 1735 } 1736 } 1737} 1738#endif 1739 1740#if DEBUG 1741 1742static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines) 1743{ 1744 int i, size; 1745 char *realobj; 1746 1747 if (cachep->flags & SLAB_RED_ZONE) { 1748 printk(KERN_ERR "Redzone: 0x%lx/0x%lx.\n", 1749 *dbg_redzone1(cachep, objp), 1750 *dbg_redzone2(cachep, objp)); 1751 } 1752 1753 if (cachep->flags & SLAB_STORE_USER) { 1754 printk(KERN_ERR "Last user: [<%p>]", 1755 *dbg_userword(cachep, objp)); 1756 print_symbol("(%s)", 1757 (unsigned long)*dbg_userword(cachep, objp)); 1758 printk("\n"); 1759 } 1760 realobj = (char *)objp + obj_offset(cachep); 1761 size = obj_size(cachep); 1762 for (i = 0; i < size && lines; i += 16, lines--) { 1763 int limit; 1764 limit = 16; 1765 if (i + limit > size) 1766 limit = size - i; 1767 dump_line(realobj, i, limit); 1768 } 1769} 1770 1771static void check_poison_obj(struct kmem_cache *cachep, void *objp) 1772{ 1773 char *realobj; 1774 int size, i; 1775 int lines = 0; 1776 1777 realobj = (char *)objp + obj_offset(cachep); 1778 size = obj_size(cachep); 1779 1780 for (i = 0; i < size; i++) { 1781 char exp = POISON_FREE; 1782 if (i == size - 1) 1783 exp = POISON_END; 1784 if (realobj[i] != exp) { 1785 int limit; 1786 /* Mismatch ! */ 1787 /* Print header */ 1788 if (lines == 0) { 1789 printk(KERN_ERR 1790 "Slab corruption: start=%p, len=%d\n", 1791 realobj, size); 1792 print_objinfo(cachep, objp, 0); 1793 } 1794 /* Hexdump the affected line */ 1795 i = (i / 16) * 16; 1796 limit = 16; 1797 if (i + limit > size) 1798 limit = size - i; 1799 dump_line(realobj, i, limit); 1800 i += 16; 1801 lines++; 1802 /* Limit to 5 lines */ 1803 if (lines > 5) 1804 break; 1805 } 1806 } 1807 if (lines != 0) { 1808 /* Print some data about the neighboring objects, if they 1809 * exist: 1810 */ 1811 struct slab *slabp = virt_to_slab(objp); 1812 unsigned int objnr; 1813 1814 objnr = obj_to_index(cachep, slabp, objp); 1815 if (objnr) { 1816 objp = index_to_obj(cachep, slabp, objnr - 1); 1817 realobj = (char *)objp + obj_offset(cachep); 1818 printk(KERN_ERR "Prev obj: start=%p, len=%d\n", 1819 realobj, size); 1820 print_objinfo(cachep, objp, 2); 1821 } 1822 if (objnr + 1 < cachep->num) { 1823 objp = index_to_obj(cachep, slabp, objnr + 1); 1824 realobj = (char *)objp + obj_offset(cachep); 1825 printk(KERN_ERR "Next obj: start=%p, len=%d\n", 1826 realobj, size); 1827 print_objinfo(cachep, objp, 2); 1828 } 1829 } 1830} 1831#endif 1832 1833#if DEBUG 1834/** 1835 * slab_destroy_objs - destroy a slab and its objects 1836 * @cachep: cache pointer being destroyed 1837 * @slabp: slab pointer being destroyed 1838 * 1839 * Call the registered destructor for each object in a slab that is being 1840 * destroyed. 1841 */ 1842static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) 1843{ 1844 int i; 1845 for (i = 0; i < cachep->num; i++) { 1846 void *objp = index_to_obj(cachep, slabp, i); 1847 1848 if (cachep->flags & SLAB_POISON) { 1849#ifdef CONFIG_DEBUG_PAGEALLOC 1850 if (cachep->buffer_size % PAGE_SIZE == 0 && 1851 OFF_SLAB(cachep)) 1852 kernel_map_pages(virt_to_page(objp), 1853 cachep->buffer_size / PAGE_SIZE, 1); 1854 else 1855 check_poison_obj(cachep, objp); 1856#else 1857 check_poison_obj(cachep, objp); 1858#endif 1859 } 1860 if (cachep->flags & SLAB_RED_ZONE) { 1861 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) 1862 slab_error(cachep, "start of a freed object " 1863 "was overwritten"); 1864 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) 1865 slab_error(cachep, "end of a freed object " 1866 "was overwritten"); 1867 } 1868 if (cachep->dtor && !(cachep->flags & SLAB_POISON)) 1869 (cachep->dtor) (objp + obj_offset(cachep), cachep, 0); 1870 } 1871} 1872#else 1873static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) 1874{ 1875 if (cachep->dtor) { 1876 int i; 1877 for (i = 0; i < cachep->num; i++) { 1878 void *objp = index_to_obj(cachep, slabp, i); 1879 (cachep->dtor) (objp, cachep, 0); 1880 } 1881 } 1882} 1883#endif 1884 1885/** 1886 * slab_destroy - destroy and release all objects in a slab 1887 * @cachep: cache pointer being destroyed 1888 * @slabp: slab pointer being destroyed 1889 * 1890 * Destroy all the objs in a slab, and release the mem back to the system. 1891 * Before calling the slab must have been unlinked from the cache. The 1892 * cache-lock is not held/needed. 1893 */ 1894static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp) 1895{ 1896 void *addr = slabp->s_mem - slabp->colouroff; 1897 1898 slab_destroy_objs(cachep, slabp); 1899 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) { 1900 struct slab_rcu *slab_rcu; 1901 1902 slab_rcu = (struct slab_rcu *)slabp; 1903 slab_rcu->cachep = cachep; 1904 slab_rcu->addr = addr; 1905 call_rcu(&slab_rcu->head, kmem_rcu_free); 1906 } else { 1907 kmem_freepages(cachep, addr); 1908 if (OFF_SLAB(cachep)) 1909 kmem_cache_free(cachep->slabp_cache, slabp); 1910 } 1911} 1912 1913/* 1914 * For setting up all the kmem_list3s for cache whose buffer_size is same as 1915 * size of kmem_list3. 1916 */ 1917static void set_up_list3s(struct kmem_cache *cachep, int index) 1918{ 1919 int node; 1920 1921 for_each_online_node(node) { 1922 cachep->nodelists[node] = &initkmem_list3[index + node]; 1923 cachep->nodelists[node]->next_reap = jiffies + 1924 REAPTIMEOUT_LIST3 + 1925 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 1926 } 1927} 1928 1929static void __kmem_cache_destroy(struct kmem_cache *cachep) 1930{ 1931 int i; 1932 struct kmem_list3 *l3; 1933 1934 for_each_online_cpu(i) 1935 kfree(cachep->array[i]); 1936 1937 /* NUMA: free the list3 structures */ 1938 for_each_online_node(i) { 1939 l3 = cachep->nodelists[i]; 1940 if (l3) { 1941 kfree(l3->shared); 1942 free_alien_cache(l3->alien); 1943 kfree(l3); 1944 } 1945 } 1946 kmem_cache_free(&cache_cache, cachep); 1947} 1948 1949 1950/** 1951 * calculate_slab_order - calculate size (page order) of slabs 1952 * @cachep: pointer to the cache that is being created 1953 * @size: size of objects to be created in this cache. 1954 * @align: required alignment for the objects. 1955 * @flags: slab allocation flags 1956 * 1957 * Also calculates the number of objects per slab. 1958 * 1959 * This could be made much more intelligent. For now, try to avoid using 1960 * high order pages for slabs. When the gfp() functions are more friendly 1961 * towards high-order requests, this should be changed. 1962 */ 1963static size_t calculate_slab_order(struct kmem_cache *cachep, 1964 size_t size, size_t align, unsigned long flags) 1965{ 1966 unsigned long offslab_limit; 1967 size_t left_over = 0; 1968 int gfporder; 1969 1970 for (gfporder = 0; gfporder <= MAX_GFP_ORDER; gfporder++) { 1971 unsigned int num; 1972 size_t remainder; 1973 1974 cache_estimate(gfporder, size, align, flags, &remainder, &num); 1975 if (!num) 1976 continue; 1977 1978 if (flags & CFLGS_OFF_SLAB) { 1979 /* 1980 * Max number of objs-per-slab for caches which 1981 * use off-slab slabs. Needed to avoid a possible 1982 * looping condition in cache_grow(). 1983 */ 1984 offslab_limit = size - sizeof(struct slab); 1985 offslab_limit /= sizeof(kmem_bufctl_t); 1986 1987 if (num > offslab_limit) 1988 break; 1989 } 1990 1991 /* Found something acceptable - save it away */ 1992 cachep->num = num; 1993 cachep->gfporder = gfporder; 1994 left_over = remainder; 1995 1996 /* 1997 * A VFS-reclaimable slab tends to have most allocations 1998 * as GFP_NOFS and we really don't want to have to be allocating 1999 * higher-order pages when we are unable to shrink dcache. 2000 */ 2001 if (flags & SLAB_RECLAIM_ACCOUNT) 2002 break; 2003 2004 /* 2005 * Large number of objects is good, but very large slabs are 2006 * currently bad for the gfp()s. 2007 */ 2008 if (gfporder >= slab_break_gfp_order) 2009 break; 2010 2011 /* 2012 * Acceptable internal fragmentation? 2013 */ 2014 if (left_over * 8 <= (PAGE_SIZE << gfporder)) 2015 break; 2016 } 2017 return left_over; 2018} 2019 2020static int setup_cpu_cache(struct kmem_cache *cachep) 2021{ 2022 if (g_cpucache_up == FULL) 2023 return enable_cpucache(cachep); 2024 2025 if (g_cpucache_up == NONE) { 2026 /* 2027 * Note: the first kmem_cache_create must create the cache 2028 * that's used by kmalloc(24), otherwise the creation of 2029 * further caches will BUG(). 2030 */ 2031 cachep->array[smp_processor_id()] = &initarray_generic.cache; 2032 2033 /* 2034 * If the cache that's used by kmalloc(sizeof(kmem_list3)) is 2035 * the first cache, then we need to set up all its list3s, 2036 * otherwise the creation of further caches will BUG(). 2037 */ 2038 set_up_list3s(cachep, SIZE_AC); 2039 if (INDEX_AC == INDEX_L3) 2040 g_cpucache_up = PARTIAL_L3; 2041 else 2042 g_cpucache_up = PARTIAL_AC; 2043 } else { 2044 cachep->array[smp_processor_id()] = 2045 kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); 2046 2047 if (g_cpucache_up == PARTIAL_AC) { 2048 set_up_list3s(cachep, SIZE_L3); 2049 g_cpucache_up = PARTIAL_L3; 2050 } else { 2051 int node; 2052 for_each_online_node(node) { 2053 cachep->nodelists[node] = 2054 kmalloc_node(sizeof(struct kmem_list3), 2055 GFP_KERNEL, node); 2056 BUG_ON(!cachep->nodelists[node]); 2057 kmem_list3_init(cachep->nodelists[node]); 2058 } 2059 } 2060 } 2061 cachep->nodelists[numa_node_id()]->next_reap = 2062 jiffies + REAPTIMEOUT_LIST3 + 2063 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 2064 2065 cpu_cache_get(cachep)->avail = 0; 2066 cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES; 2067 cpu_cache_get(cachep)->batchcount = 1; 2068 cpu_cache_get(cachep)->touched = 0; 2069 cachep->batchcount = 1; 2070 cachep->limit = BOOT_CPUCACHE_ENTRIES; 2071 return 0; 2072} 2073 2074/** 2075 * kmem_cache_create - Create a cache. 2076 * @name: A string which is used in /proc/slabinfo to identify this cache. 2077 * @size: The size of objects to be created in this cache. 2078 * @align: The required alignment for the objects. 2079 * @flags: SLAB flags 2080 * @ctor: A constructor for the objects. 2081 * @dtor: A destructor for the objects. 2082 * 2083 * Returns a ptr to the cache on success, NULL on failure. 2084 * Cannot be called within a int, but can be interrupted. 2085 * The @ctor is run when new pages are allocated by the cache 2086 * and the @dtor is run before the pages are handed back. 2087 * 2088 * @name must be valid until the cache is destroyed. This implies that 2089 * the module calling this has to destroy the cache before getting unloaded. 2090 * 2091 * The flags are 2092 * 2093 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) 2094 * to catch references to uninitialised memory. 2095 * 2096 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check 2097 * for buffer overruns. 2098 * 2099 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware 2100 * cacheline. This can be beneficial if you're counting cycles as closely 2101 * as davem. 2102 */ 2103struct kmem_cache * 2104kmem_cache_create (const char *name, size_t size, size_t align, 2105 unsigned long flags, 2106 void (*ctor)(void*, struct kmem_cache *, unsigned long), 2107 void (*dtor)(void*, struct kmem_cache *, unsigned long)) 2108{ 2109 size_t left_over, slab_size, ralign; 2110 struct kmem_cache *cachep = NULL, *pc; 2111 2112 /* 2113 * Sanity checks... these are all serious usage bugs. 2114 */ 2115 if (!name || in_interrupt() || (size < BYTES_PER_WORD) || 2116 (size > (1 << MAX_OBJ_ORDER) * PAGE_SIZE) || (dtor && !ctor)) { 2117 printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__, 2118 name); 2119 BUG(); 2120 } 2121 2122 /* 2123 * We use cache_chain_mutex to ensure a consistent view of 2124 * cpu_online_map as well. Please see cpuup_callback 2125 */ 2126 mutex_lock(&cache_chain_mutex); 2127 2128 list_for_each_entry(pc, &cache_chain, next) { 2129 char tmp; 2130 int res; 2131 2132 /* 2133 * This happens when the module gets unloaded and doesn't 2134 * destroy its slab cache and no-one else reuses the vmalloc 2135 * area of the module. Print a warning. 2136 */ 2137 res = probe_kernel_address(pc->name, tmp); 2138 if (res) { 2139 printk("SLAB: cache with size %d has lost its name\n", 2140 pc->buffer_size); 2141 continue; 2142 } 2143 2144 if (!strcmp(pc->name, name)) { 2145 printk("kmem_cache_create: duplicate cache %s\n", name); 2146 dump_stack(); 2147 goto oops; 2148 } 2149 } 2150 2151#if DEBUG 2152 WARN_ON(strchr(name, ' ')); /* It confuses parsers */ 2153 if ((flags & SLAB_DEBUG_INITIAL) && !ctor) { 2154 /* No constructor, but inital state check requested */ 2155 printk(KERN_ERR "%s: No con, but init state check " 2156 "requested - %s\n", __FUNCTION__, name); 2157 flags &= ~SLAB_DEBUG_INITIAL; 2158 } 2159#if FORCED_DEBUG 2160 /* 2161 * Enable redzoning and last user accounting, except for caches with 2162 * large objects, if the increased size would increase the object size 2163 * above the next power of two: caches with object sizes just above a 2164 * power of two have a significant amount of internal fragmentation. 2165 */ 2166 if (size < 4096 || fls(size - 1) == fls(size-1 + 3 * BYTES_PER_WORD)) 2167 flags |= SLAB_RED_ZONE | SLAB_STORE_USER; 2168 if (!(flags & SLAB_DESTROY_BY_RCU)) 2169 flags |= SLAB_POISON; 2170#endif 2171 if (flags & SLAB_DESTROY_BY_RCU) 2172 BUG_ON(flags & SLAB_POISON); 2173#endif 2174 if (flags & SLAB_DESTROY_BY_RCU) 2175 BUG_ON(dtor); 2176 2177 /* 2178 * Always checks flags, a caller might be expecting debug support which 2179 * isn't available. 2180 */ 2181 BUG_ON(flags & ~CREATE_MASK); 2182 2183 /* 2184 * Check that size is in terms of words. This is needed to avoid 2185 * unaligned accesses for some archs when redzoning is used, and makes 2186 * sure any on-slab bufctl's are also correctly aligned. 2187 */ 2188 if (size & (BYTES_PER_WORD - 1)) { 2189 size += (BYTES_PER_WORD - 1); 2190 size &= ~(BYTES_PER_WORD - 1); 2191 } 2192 2193 /* calculate the final buffer alignment: */ 2194 2195 /* 1) arch recommendation: can be overridden for debug */ 2196 if (flags & SLAB_HWCACHE_ALIGN) { 2197 /* 2198 * Default alignment: as specified by the arch code. Except if 2199 * an object is really small, then squeeze multiple objects into 2200 * one cacheline. 2201 */ 2202 ralign = cache_line_size(); 2203 while (size <= ralign / 2) 2204 ralign /= 2; 2205 } else { 2206 ralign = BYTES_PER_WORD; 2207 } 2208 2209 /* 2210 * Redzoning and user store require word alignment. Note this will be 2211 * overridden by architecture or caller mandated alignment if either 2212 * is greater than BYTES_PER_WORD. 2213 */ 2214 if (flags & SLAB_RED_ZONE || flags & SLAB_STORE_USER) 2215 ralign = BYTES_PER_WORD; 2216 2217 /* 2) arch mandated alignment */ 2218 if (ralign < ARCH_SLAB_MINALIGN) { 2219 ralign = ARCH_SLAB_MINALIGN; 2220 } 2221 /* 3) caller mandated alignment */ 2222 if (ralign < align) { 2223 ralign = align; 2224 } 2225 /* disable debug if necessary */ 2226 if (ralign > BYTES_PER_WORD) 2227 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); 2228 /* 2229 * 4) Store it. 2230 */ 2231 align = ralign; 2232 2233 /* Get cache's description obj. */ 2234 cachep = kmem_cache_zalloc(&cache_cache, GFP_KERNEL); 2235 if (!cachep) 2236 goto oops; 2237 2238#if DEBUG 2239 cachep->obj_size = size; 2240 2241 /* 2242 * Both debugging options require word-alignment which is calculated 2243 * into align above. 2244 */ 2245 if (flags & SLAB_RED_ZONE) { 2246 /* add space for red zone words */ 2247 cachep->obj_offset += BYTES_PER_WORD; 2248 size += 2 * BYTES_PER_WORD; 2249 } 2250 if (flags & SLAB_STORE_USER) { 2251 /* user store requires one word storage behind the end of 2252 * the real object. 2253 */ 2254 size += BYTES_PER_WORD; 2255 } 2256#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) 2257 if (size >= malloc_sizes[INDEX_L3 + 1].cs_size 2258 && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) { 2259 cachep->obj_offset += PAGE_SIZE - size; 2260 size = PAGE_SIZE; 2261 } 2262#endif 2263#endif 2264 2265 /* 2266 * Determine if the slab management is 'on' or 'off' slab. 2267 * (bootstrapping cannot cope with offslab caches so don't do 2268 * it too early on.) 2269 */ 2270 if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init) 2271 /* 2272 * Size is large, assume best to place the slab management obj 2273 * off-slab (should allow better packing of objs). 2274 */ 2275 flags |= CFLGS_OFF_SLAB; 2276 2277 size = ALIGN(size, align); 2278 2279 left_over = calculate_slab_order(cachep, size, align, flags); 2280 2281 if (!cachep->num) { 2282 printk("kmem_cache_create: couldn't create cache %s.\n", name); 2283 kmem_cache_free(&cache_cache, cachep); 2284 cachep = NULL; 2285 goto oops; 2286 } 2287 slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t) 2288 + sizeof(struct slab), align); 2289 2290 /* 2291 * If the slab has been placed off-slab, and we have enough space then 2292 * move it on-slab. This is at the expense of any extra colouring. 2293 */ 2294 if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) { 2295 flags &= ~CFLGS_OFF_SLAB; 2296 left_over -= slab_size; 2297 } 2298 2299 if (flags & CFLGS_OFF_SLAB) { 2300 /* really off slab. No need for manual alignment */ 2301 slab_size = 2302 cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab); 2303 } 2304 2305 cachep->colour_off = cache_line_size(); 2306 /* Offset must be a multiple of the alignment. */ 2307 if (cachep->colour_off < align) 2308 cachep->colour_off = align; 2309 cachep->colour = left_over / cachep->colour_off; 2310 cachep->slab_size = slab_size; 2311 cachep->flags = flags; 2312 cachep->gfpflags = 0; 2313 if (flags & SLAB_CACHE_DMA) 2314 cachep->gfpflags |= GFP_DMA; 2315 cachep->buffer_size = size; 2316 2317 if (flags & CFLGS_OFF_SLAB) { 2318 cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u); 2319 /* 2320 * This is a possibility for one of the malloc_sizes caches. 2321 * But since we go off slab only for object size greater than 2322 * PAGE_SIZE/8, and malloc_sizes gets created in ascending order, 2323 * this should not happen at all. 2324 * But leave a BUG_ON for some lucky dude. 2325 */ 2326 BUG_ON(!cachep->slabp_cache); 2327 } 2328 cachep->ctor = ctor; 2329 cachep->dtor = dtor; 2330 cachep->name = name; 2331 2332 if (setup_cpu_cache(cachep)) { 2333 __kmem_cache_destroy(cachep); 2334 cachep = NULL; 2335 goto oops; 2336 } 2337 2338 /* cache setup completed, link it into the list */ 2339 list_add(&cachep->next, &cache_chain); 2340oops: 2341 if (!cachep && (flags & SLAB_PANIC)) 2342 panic("kmem_cache_create(): failed to create slab `%s'\n", 2343 name); 2344 mutex_unlock(&cache_chain_mutex); 2345 return cachep; 2346} 2347EXPORT_SYMBOL(kmem_cache_create); 2348 2349#if DEBUG 2350static void check_irq_off(void) 2351{ 2352 BUG_ON(!irqs_disabled()); 2353} 2354 2355static void check_irq_on(void) 2356{ 2357 BUG_ON(irqs_disabled()); 2358} 2359 2360static void check_spinlock_acquired(struct kmem_cache *cachep) 2361{ 2362#ifdef CONFIG_SMP 2363 check_irq_off(); 2364 assert_spin_locked(&cachep->nodelists[numa_node_id()]->list_lock); 2365#endif 2366} 2367 2368static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node) 2369{ 2370#ifdef CONFIG_SMP 2371 check_irq_off(); 2372 assert_spin_locked(&cachep->nodelists[node]->list_lock); 2373#endif 2374} 2375 2376#else 2377#define check_irq_off() do { } while(0) 2378#define check_irq_on() do { } while(0) 2379#define check_spinlock_acquired(x) do { } while(0) 2380#define check_spinlock_acquired_node(x, y) do { } while(0) 2381#endif 2382 2383static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, 2384 struct array_cache *ac, 2385 int force, int node); 2386 2387static void do_drain(void *arg) 2388{ 2389 struct kmem_cache *cachep = arg; 2390 struct array_cache *ac; 2391 int node = numa_node_id(); 2392 2393 check_irq_off(); 2394 ac = cpu_cache_get(cachep); 2395 spin_lock(&cachep->nodelists[node]->list_lock); 2396 free_block(cachep, ac->entry, ac->avail, node); 2397 spin_unlock(&cachep->nodelists[node]->list_lock); 2398 ac->avail = 0; 2399} 2400 2401static void drain_cpu_caches(struct kmem_cache *cachep) 2402{ 2403 struct kmem_list3 *l3; 2404 int node; 2405 2406 on_each_cpu(do_drain, cachep, 1, 1); 2407 check_irq_on(); 2408 for_each_online_node(node) { 2409 l3 = cachep->nodelists[node]; 2410 if (l3 && l3->alien) 2411 drain_alien_cache(cachep, l3->alien); 2412 } 2413 2414 for_each_online_node(node) { 2415 l3 = cachep->nodelists[node]; 2416 if (l3) 2417 drain_array(cachep, l3, l3->shared, 1, node); 2418 } 2419} 2420 2421/* 2422 * Remove slabs from the list of free slabs. 2423 * Specify the number of slabs to drain in tofree. 2424 * 2425 * Returns the actual number of slabs released. 2426 */ 2427static int drain_freelist(struct kmem_cache *cache, 2428 struct kmem_list3 *l3, int tofree) 2429{ 2430 struct list_head *p; 2431 int nr_freed; 2432 struct slab *slabp; 2433 2434 nr_freed = 0; 2435 while (nr_freed < tofree && !list_empty(&l3->slabs_free)) { 2436 2437 spin_lock_irq(&l3->list_lock); 2438 p = l3->slabs_free.prev; 2439 if (p == &l3->slabs_free) { 2440 spin_unlock_irq(&l3->list_lock); 2441 goto out; 2442 } 2443 2444 slabp = list_entry(p, struct slab, list); 2445#if DEBUG 2446 BUG_ON(slabp->inuse); 2447#endif 2448 list_del(&slabp->list); 2449 /* 2450 * Safe to drop the lock. The slab is no longer linked 2451 * to the cache. 2452 */ 2453 l3->free_objects -= cache->num; 2454 spin_unlock_irq(&l3->list_lock); 2455 slab_destroy(cache, slabp); 2456 nr_freed++; 2457 } 2458out: 2459 return nr_freed; 2460} 2461 2462/* Called with cache_chain_mutex held to protect against cpu hotplug */ 2463static int __cache_shrink(struct kmem_cache *cachep) 2464{ 2465 int ret = 0, i = 0; 2466 struct kmem_list3 *l3; 2467 2468 drain_cpu_caches(cachep); 2469 2470 check_irq_on(); 2471 for_each_online_node(i) { 2472 l3 = cachep->nodelists[i]; 2473 if (!l3) 2474 continue; 2475 2476 drain_freelist(cachep, l3, l3->free_objects); 2477 2478 ret += !list_empty(&l3->slabs_full) || 2479 !list_empty(&l3->slabs_partial); 2480 } 2481 return (ret ? 1 : 0); 2482} 2483 2484/** 2485 * kmem_cache_shrink - Shrink a cache. 2486 * @cachep: The cache to shrink. 2487 * 2488 * Releases as many slabs as possible for a cache. 2489 * To help debugging, a zero exit status indicates all slabs were released. 2490 */ 2491int kmem_cache_shrink(struct kmem_cache *cachep) 2492{ 2493 int ret; 2494 BUG_ON(!cachep || in_interrupt()); 2495 2496 mutex_lock(&cache_chain_mutex); 2497 ret = __cache_shrink(cachep); 2498 mutex_unlock(&cache_chain_mutex); 2499 return ret; 2500} 2501EXPORT_SYMBOL(kmem_cache_shrink); 2502 2503/** 2504 * kmem_cache_destroy - delete a cache 2505 * @cachep: the cache to destroy 2506 * 2507 * Remove a struct kmem_cache object from the slab cache. 2508 * 2509 * It is expected this function will be called by a module when it is 2510 * unloaded. This will remove the cache completely, and avoid a duplicate 2511 * cache being allocated each time a module is loaded and unloaded, if the 2512 * module doesn't have persistent in-kernel storage across loads and unloads. 2513 * 2514 * The cache must be empty before calling this function. 2515 * 2516 * The caller must guarantee that noone will allocate memory from the cache 2517 * during the kmem_cache_destroy(). 2518 */ 2519void kmem_cache_destroy(struct kmem_cache *cachep) 2520{ 2521 BUG_ON(!cachep || in_interrupt()); 2522 2523 /* Find the cache in the chain of caches. */ 2524 mutex_lock(&cache_chain_mutex); 2525 /* 2526 * the chain is never empty, cache_cache is never destroyed 2527 */ 2528 list_del(&cachep->next); 2529 if (__cache_shrink(cachep)) { 2530 slab_error(cachep, "Can't free all objects"); 2531 list_add(&cachep->next, &cache_chain); 2532 mutex_unlock(&cache_chain_mutex); 2533 return; 2534 } 2535 2536 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) 2537 synchronize_rcu(); 2538 2539 __kmem_cache_destroy(cachep); 2540 mutex_unlock(&cache_chain_mutex); 2541} 2542EXPORT_SYMBOL(kmem_cache_destroy); 2543 2544/* 2545 * Get the memory for a slab management obj. 2546 * For a slab cache when the slab descriptor is off-slab, slab descriptors 2547 * always come from malloc_sizes caches. The slab descriptor cannot 2548 * come from the same cache which is getting created because, 2549 * when we are searching for an appropriate cache for these 2550 * descriptors in kmem_cache_create, we search through the malloc_sizes array. 2551 * If we are creating a malloc_sizes cache here it would not be visible to 2552 * kmem_find_general_cachep till the initialization is complete. 2553 * Hence we cannot have slabp_cache same as the original cache. 2554 */ 2555static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, 2556 int colour_off, gfp_t local_flags, 2557 int nodeid) 2558{ 2559 struct slab *slabp; 2560 2561 if (OFF_SLAB(cachep)) { 2562 /* Slab management obj is off-slab. */ 2563 slabp = kmem_cache_alloc_node(cachep->slabp_cache, 2564 local_flags & ~GFP_THISNODE, nodeid); 2565 if (!slabp) 2566 return NULL; 2567 } else { 2568 slabp = objp + colour_off; 2569 colour_off += cachep->slab_size; 2570 } 2571 slabp->inuse = 0; 2572 slabp->colouroff = colour_off; 2573 slabp->s_mem = objp + colour_off; 2574 slabp->nodeid = nodeid; 2575 return slabp; 2576} 2577 2578static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp) 2579{ 2580 return (kmem_bufctl_t *) (slabp + 1); 2581} 2582 2583static void cache_init_objs(struct kmem_cache *cachep, 2584 struct slab *slabp, unsigned long ctor_flags) 2585{ 2586 int i; 2587 2588 for (i = 0; i < cachep->num; i++) { 2589 void *objp = index_to_obj(cachep, slabp, i); 2590#if DEBUG 2591 /* need to poison the objs? */ 2592 if (cachep->flags & SLAB_POISON) 2593 poison_obj(cachep, objp, POISON_FREE); 2594 if (cachep->flags & SLAB_STORE_USER) 2595 *dbg_userword(cachep, objp) = NULL; 2596 2597 if (cachep->flags & SLAB_RED_ZONE) { 2598 *dbg_redzone1(cachep, objp) = RED_INACTIVE; 2599 *dbg_redzone2(cachep, objp) = RED_INACTIVE; 2600 } 2601 /* 2602 * Constructors are not allowed to allocate memory from the same 2603 * cache which they are a constructor for. Otherwise, deadlock. 2604 * They must also be threaded. 2605 */ 2606 if (cachep->ctor && !(cachep->flags & SLAB_POISON)) 2607 cachep->ctor(objp + obj_offset(cachep), cachep, 2608 ctor_flags); 2609 2610 if (cachep->flags & SLAB_RED_ZONE) { 2611 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) 2612 slab_error(cachep, "constructor overwrote the" 2613 " end of an object"); 2614 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) 2615 slab_error(cachep, "constructor overwrote the" 2616 " start of an object"); 2617 } 2618 if ((cachep->buffer_size % PAGE_SIZE) == 0 && 2619 OFF_SLAB(cachep) && cachep->flags & SLAB_POISON) 2620 kernel_map_pages(virt_to_page(objp), 2621 cachep->buffer_size / PAGE_SIZE, 0); 2622#else 2623 if (cachep->ctor) 2624 cachep->ctor(objp, cachep, ctor_flags); 2625#endif 2626 slab_bufctl(slabp)[i] = i + 1; 2627 } 2628 slab_bufctl(slabp)[i - 1] = BUFCTL_END; 2629 slabp->free = 0; 2630} 2631 2632static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags) 2633{ 2634 if (flags & GFP_DMA) 2635 BUG_ON(!(cachep->gfpflags & GFP_DMA)); 2636 else 2637 BUG_ON(cachep->gfpflags & GFP_DMA); 2638} 2639 2640static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp, 2641 int nodeid) 2642{ 2643 void *objp = index_to_obj(cachep, slabp, slabp->free); 2644 kmem_bufctl_t next; 2645 2646 slabp->inuse++; 2647 next = slab_bufctl(slabp)[slabp->free]; 2648#if DEBUG 2649 slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE; 2650 WARN_ON(slabp->nodeid != nodeid); 2651#endif 2652 slabp->free = next; 2653 2654 return objp; 2655} 2656 2657static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, 2658 void *objp, int nodeid) 2659{ 2660 unsigned int objnr = obj_to_index(cachep, slabp, objp); 2661 2662#if DEBUG 2663 /* Verify that the slab belongs to the intended node */ 2664 WARN_ON(slabp->nodeid != nodeid); 2665 2666 if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) { 2667 printk(KERN_ERR "slab: double free detected in cache " 2668 "'%s', objp %p\n", cachep->name, objp); 2669 BUG(); 2670 } 2671#endif 2672 slab_bufctl(slabp)[objnr] = slabp->free; 2673 slabp->free = objnr; 2674 slabp->inuse--; 2675} 2676 2677/* 2678 * Map pages beginning at addr to the given cache and slab. This is required 2679 * for the slab allocator to be able to lookup the cache and slab of a 2680 * virtual address for kfree, ksize, kmem_ptr_validate, and slab debugging. 2681 */ 2682static void slab_map_pages(struct kmem_cache *cache, struct slab *slab, 2683 void *addr) 2684{ 2685 int nr_pages; 2686 struct page *page; 2687 2688 page = virt_to_page(addr); 2689 2690 nr_pages = 1; 2691 if (likely(!PageCompound(page))) 2692 nr_pages <<= cache->gfporder; 2693 2694 do { 2695 page_set_cache(page, cache); 2696 page_set_slab(page, slab); 2697 page++; 2698 } while (--nr_pages); 2699} 2700 2701/* 2702 * Grow (by 1) the number of slabs within a cache. This is called by 2703 * kmem_cache_alloc() when there are no active objs left in a cache. 2704 */ 2705static int cache_grow(struct kmem_cache *cachep, 2706 gfp_t flags, int nodeid, void *objp) 2707{ 2708 struct slab *slabp; 2709 size_t offset; 2710 gfp_t local_flags; 2711 unsigned long ctor_flags; 2712 struct kmem_list3 *l3; 2713 2714 /* 2715 * Be lazy and only check for valid flags here, keeping it out of the 2716 * critical path in kmem_cache_alloc(). 2717 */ 2718 BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK | __GFP_NO_GROW)); 2719 if (flags & __GFP_NO_GROW) 2720 return 0; 2721 2722 ctor_flags = SLAB_CTOR_CONSTRUCTOR; 2723 local_flags = (flags & GFP_LEVEL_MASK); 2724 if (!(local_flags & __GFP_WAIT)) 2725 /* 2726 * Not allowed to sleep. Need to tell a constructor about 2727 * this - it might need to know... 2728 */ 2729 ctor_flags |= SLAB_CTOR_ATOMIC; 2730 2731 /* Take the l3 list lock to change the colour_next on this node */ 2732 check_irq_off(); 2733 l3 = cachep->nodelists[nodeid]; 2734 spin_lock(&l3->list_lock); 2735 2736 /* Get colour for the slab, and cal the next value. */ 2737 offset = l3->colour_next; 2738 l3->colour_next++; 2739 if (l3->colour_next >= cachep->colour) 2740 l3->colour_next = 0; 2741 spin_unlock(&l3->list_lock); 2742 2743 offset *= cachep->colour_off; 2744 2745 if (local_flags & __GFP_WAIT) 2746 local_irq_enable(); 2747 2748 /* 2749 * The test for missing atomic flag is performed here, rather than 2750 * the more obvious place, simply to reduce the critical path length 2751 * in kmem_cache_alloc(). If a caller is seriously mis-behaving they 2752 * will eventually be caught here (where it matters). 2753 */ 2754 kmem_flagcheck(cachep, flags); 2755 2756 /* 2757 * Get mem for the objs. Attempt to allocate a physical page from 2758 * 'nodeid'. 2759 */ 2760 if (!objp) 2761 objp = kmem_getpages(cachep, flags, nodeid); 2762 if (!objp) 2763 goto failed; 2764 2765 /* Get slab management. */ 2766 slabp = alloc_slabmgmt(cachep, objp, offset, 2767 local_flags & ~GFP_THISNODE, nodeid); 2768 if (!slabp) 2769 goto opps1; 2770 2771 slabp->nodeid = nodeid; 2772 slab_map_pages(cachep, slabp, objp); 2773 2774 cache_init_objs(cachep, slabp, ctor_flags); 2775 2776 if (local_flags & __GFP_WAIT) 2777 local_irq_disable(); 2778 check_irq_off(); 2779 spin_lock(&l3->list_lock); 2780 2781 /* Make slab active. */ 2782 list_add_tail(&slabp->list, &(l3->slabs_free)); 2783 STATS_INC_GROWN(cachep); 2784 l3->free_objects += cachep->num; 2785 spin_unlock(&l3->list_lock); 2786 return 1; 2787opps1: 2788 kmem_freepages(cachep, objp); 2789failed: 2790 if (local_flags & __GFP_WAIT) 2791 local_irq_disable(); 2792 return 0; 2793} 2794 2795#if DEBUG 2796 2797/* 2798 * Perform extra freeing checks: 2799 * - detect bad pointers. 2800 * - POISON/RED_ZONE checking 2801 * - destructor calls, for caches with POISON+dtor 2802 */ 2803static void kfree_debugcheck(const void *objp) 2804{ 2805 struct page *page; 2806 2807 if (!virt_addr_valid(objp)) { 2808 printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n", 2809 (unsigned long)objp); 2810 BUG(); 2811 } 2812 page = virt_to_page(objp); 2813 if (!PageSlab(page)) { 2814 printk(KERN_ERR "kfree_debugcheck: bad ptr %lxh.\n", 2815 (unsigned long)objp); 2816 BUG(); 2817 } 2818} 2819 2820static inline void verify_redzone_free(struct kmem_cache *cache, void *obj) 2821{ 2822 unsigned long redzone1, redzone2; 2823 2824 redzone1 = *dbg_redzone1(cache, obj); 2825 redzone2 = *dbg_redzone2(cache, obj); 2826 2827 /* 2828 * Redzone is ok. 2829 */ 2830 if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE) 2831 return; 2832 2833 if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE) 2834 slab_error(cache, "double free detected"); 2835 else 2836 slab_error(cache, "memory outside object was overwritten"); 2837 2838 printk(KERN_ERR "%p: redzone 1:0x%lx, redzone 2:0x%lx.\n", 2839 obj, redzone1, redzone2); 2840} 2841 2842static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, 2843 void *caller) 2844{ 2845 struct page *page; 2846 unsigned int objnr; 2847 struct slab *slabp; 2848 2849 objp -= obj_offset(cachep); 2850 kfree_debugcheck(objp); 2851 page = virt_to_page(objp); 2852 2853 slabp = page_get_slab(page); 2854 2855 if (cachep->flags & SLAB_RED_ZONE) { 2856 verify_redzone_free(cachep, objp); 2857 *dbg_redzone1(cachep, objp) = RED_INACTIVE; 2858 *dbg_redzone2(cachep, objp) = RED_INACTIVE; 2859 } 2860 if (cachep->flags & SLAB_STORE_USER) 2861 *dbg_userword(cachep, objp) = caller; 2862 2863 objnr = obj_to_index(cachep, slabp, objp); 2864 2865 BUG_ON(objnr >= cachep->num); 2866 BUG_ON(objp != index_to_obj(cachep, slabp, objnr)); 2867 2868 if (cachep->flags & SLAB_DEBUG_INITIAL) { 2869 /* 2870 * Need to call the slab's constructor so the caller can 2871 * perform a verify of its state (debugging). Called without 2872 * the cache-lock held. 2873 */ 2874 cachep->ctor(objp + obj_offset(cachep), 2875 cachep, SLAB_CTOR_CONSTRUCTOR | SLAB_CTOR_VERIFY); 2876 } 2877 if (cachep->flags & SLAB_POISON && cachep->dtor) { 2878 /* we want to cache poison the object, 2879 * call the destruction callback 2880 */ 2881 cachep->dtor(objp + obj_offset(cachep), cachep, 0); 2882 } 2883#ifdef CONFIG_DEBUG_SLAB_LEAK 2884 slab_bufctl(slabp)[objnr] = BUFCTL_FREE; 2885#endif 2886 if (cachep->flags & SLAB_POISON) { 2887#ifdef CONFIG_DEBUG_PAGEALLOC 2888 if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) { 2889 store_stackinfo(cachep, objp, (unsigned long)caller); 2890 kernel_map_pages(virt_to_page(objp), 2891 cachep->buffer_size / PAGE_SIZE, 0); 2892 } else { 2893 poison_obj(cachep, objp, POISON_FREE); 2894 } 2895#else 2896 poison_obj(cachep, objp, POISON_FREE); 2897#endif 2898 } 2899 return objp; 2900} 2901 2902static void check_slabp(struct kmem_cache *cachep, struct slab *slabp) 2903{ 2904 kmem_bufctl_t i; 2905 int entries = 0; 2906 2907 /* Check slab's freelist to see if this obj is there. */ 2908 for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) { 2909 entries++; 2910 if (entries > cachep->num || i >= cachep->num) 2911 goto bad; 2912 } 2913 if (entries != cachep->num - slabp->inuse) { 2914bad: 2915 printk(KERN_ERR "slab: Internal list corruption detected in " 2916 "cache '%s'(%d), slabp %p(%d). Hexdump:\n", 2917 cachep->name, cachep->num, slabp, slabp->inuse); 2918 for (i = 0; 2919 i < sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t); 2920 i++) { 2921 if (i % 16 == 0) 2922 printk("\n%03x:", i); 2923 printk(" %02x", ((unsigned char *)slabp)[i]); 2924 } 2925 printk("\n"); 2926 BUG(); 2927 } 2928} 2929#else 2930#define kfree_debugcheck(x) do { } while(0) 2931#define cache_free_debugcheck(x,objp,z) (objp) 2932#define check_slabp(x,y) do { } while(0) 2933#endif 2934 2935static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) 2936{ 2937 int batchcount; 2938 struct kmem_list3 *l3; 2939 struct array_cache *ac; 2940 int node; 2941 2942 node = numa_node_id(); 2943 2944 check_irq_off(); 2945 ac = cpu_cache_get(cachep); 2946retry: 2947 batchcount = ac->batchcount; 2948 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { 2949 /* 2950 * If there was little recent activity on this cache, then 2951 * perform only a partial refill. Otherwise we could generate 2952 * refill bouncing. 2953 */ 2954 batchcount = BATCHREFILL_LIMIT; 2955 } 2956 l3 = cachep->nodelists[node]; 2957 2958 BUG_ON(ac->avail > 0 || !l3); 2959 spin_lock(&l3->list_lock); 2960 2961 /* See if we can refill from the shared array */ 2962 if (l3->shared && transfer_objects(ac, l3->shared, batchcount)) 2963 goto alloc_done; 2964 2965 while (batchcount > 0) { 2966 struct list_head *entry; 2967 struct slab *slabp; 2968 /* Get slab alloc is to come from. */ 2969 entry = l3->slabs_partial.next; 2970 if (entry == &l3->slabs_partial) { 2971 l3->free_touched = 1; 2972 entry = l3->slabs_free.next; 2973 if (entry == &l3->slabs_free) 2974 goto must_grow; 2975 } 2976 2977 slabp = list_entry(entry, struct slab, list); 2978 check_slabp(cachep, slabp); 2979 check_spinlock_acquired(cachep); 2980 while (slabp->inuse < cachep->num && batchcount--) { 2981 STATS_INC_ALLOCED(cachep); 2982 STATS_INC_ACTIVE(cachep); 2983 STATS_SET_HIGH(cachep); 2984 2985 ac->entry[ac->avail++] = slab_get_obj(cachep, slabp, 2986 node); 2987 } 2988 check_slabp(cachep, slabp); 2989 2990 /* move slabp to correct slabp list: */ 2991 list_del(&slabp->list); 2992 if (slabp->free == BUFCTL_END) 2993 list_add(&slabp->list, &l3->slabs_full); 2994 else 2995 list_add(&slabp->list, &l3->slabs_partial); 2996 } 2997 2998must_grow: 2999 l3->free_objects -= ac->avail; 3000alloc_done: 3001 spin_unlock(&l3->list_lock); 3002 3003 if (unlikely(!ac->avail)) { 3004 int x; 3005 x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL); 3006 3007 /* cache_grow can reenable interrupts, then ac could change. */ 3008 ac = cpu_cache_get(cachep); 3009 if (!x && ac->avail == 0) /* no objects in sight? abort */ 3010 return NULL; 3011 3012 if (!ac->avail) /* objects refilled by interrupt? */ 3013 goto retry; 3014 } 3015 ac->touched = 1; 3016 return ac->entry[--ac->avail]; 3017} 3018 3019static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep, 3020 gfp_t flags) 3021{ 3022 might_sleep_if(flags & __GFP_WAIT); 3023#if DEBUG 3024 kmem_flagcheck(cachep, flags); 3025#endif 3026} 3027 3028#if DEBUG 3029static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, 3030 gfp_t flags, void *objp, void *caller) 3031{ 3032 if (!objp) 3033 return objp; 3034 if (cachep->flags & SLAB_POISON) { 3035#ifdef CONFIG_DEBUG_PAGEALLOC 3036 if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) 3037 kernel_map_pages(virt_to_page(objp), 3038 cachep->buffer_size / PAGE_SIZE, 1); 3039 else 3040 check_poison_obj(cachep, objp); 3041#else 3042 check_poison_obj(cachep, objp); 3043#endif 3044 poison_obj(cachep, objp, POISON_INUSE); 3045 } 3046 if (cachep->flags & SLAB_STORE_USER) 3047 *dbg_userword(cachep, objp) = caller; 3048 3049 if (cachep->flags & SLAB_RED_ZONE) { 3050 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || 3051 *dbg_redzone2(cachep, objp) != RED_INACTIVE) { 3052 slab_error(cachep, "double free, or memory outside" 3053 " object was overwritten"); 3054 printk(KERN_ERR 3055 "%p: redzone 1:0x%lx, redzone 2:0x%lx\n", 3056 objp, *dbg_redzone1(cachep, objp), 3057 *dbg_redzone2(cachep, objp)); 3058 } 3059 *dbg_redzone1(cachep, objp) = RED_ACTIVE; 3060 *dbg_redzone2(cachep, objp) = RED_ACTIVE; 3061 } 3062#ifdef CONFIG_DEBUG_SLAB_LEAK 3063 { 3064 struct slab *slabp; 3065 unsigned objnr; 3066 3067 slabp = page_get_slab(virt_to_page(objp)); 3068 objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size; 3069 slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE; 3070 } 3071#endif 3072 objp += obj_offset(cachep); 3073 if (cachep->ctor && cachep->flags & SLAB_POISON) { 3074 unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR; 3075 3076 if (!(flags & __GFP_WAIT)) 3077 ctor_flags |= SLAB_CTOR_ATOMIC; 3078 3079 cachep->ctor(objp, cachep, ctor_flags); 3080 } 3081#if ARCH_SLAB_MINALIGN 3082 if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) { 3083 printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", 3084 objp, ARCH_SLAB_MINALIGN); 3085 } 3086#endif 3087 return objp; 3088} 3089#else 3090#define cache_alloc_debugcheck_after(a,b,objp,d) (objp) 3091#endif 3092 3093#ifdef CONFIG_FAILSLAB 3094 3095static struct failslab_attr { 3096 3097 struct fault_attr attr; 3098 3099 u32 ignore_gfp_wait; 3100#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 3101 struct dentry *ignore_gfp_wait_file; 3102#endif 3103 3104} failslab = { 3105 .attr = FAULT_ATTR_INITIALIZER, 3106 .ignore_gfp_wait = 1, 3107}; 3108 3109static int __init setup_failslab(char *str) 3110{ 3111 return setup_fault_attr(&failslab.attr, str); 3112} 3113__setup("failslab=", setup_failslab); 3114 3115static int should_failslab(struct kmem_cache *cachep, gfp_t flags) 3116{ 3117 if (cachep == &cache_cache) 3118 return 0; 3119 if (flags & __GFP_NOFAIL) 3120 return 0; 3121 if (failslab.ignore_gfp_wait && (flags & __GFP_WAIT)) 3122 return 0; 3123 3124 return should_fail(&failslab.attr, obj_size(cachep)); 3125} 3126 3127#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 3128 3129static int __init failslab_debugfs(void) 3130{ 3131 mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; 3132 struct dentry *dir; 3133 int err; 3134 3135 err = init_fault_attr_dentries(&failslab.attr, "failslab"); 3136 if (err) 3137 return err; 3138 dir = failslab.attr.dentries.dir; 3139 3140 failslab.ignore_gfp_wait_file = 3141 debugfs_create_bool("ignore-gfp-wait", mode, dir, 3142 &failslab.ignore_gfp_wait); 3143 3144 if (!failslab.ignore_gfp_wait_file) { 3145 err = -ENOMEM; 3146 debugfs_remove(failslab.ignore_gfp_wait_file); 3147 cleanup_fault_attr_dentries(&failslab.attr); 3148 } 3149 3150 return err; 3151} 3152 3153late_initcall(failslab_debugfs); 3154 3155#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ 3156 3157#else /* CONFIG_FAILSLAB */ 3158 3159static inline int should_failslab(struct kmem_cache *cachep, gfp_t flags) 3160{ 3161 return 0; 3162} 3163 3164#endif /* CONFIG_FAILSLAB */ 3165 3166static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3167{ 3168 void *objp; 3169 struct array_cache *ac; 3170 3171 check_irq_off(); 3172 3173 if (should_failslab(cachep, flags)) 3174 return NULL; 3175 3176 ac = cpu_cache_get(cachep); 3177 if (likely(ac->avail)) { 3178 STATS_INC_ALLOCHIT(cachep); 3179 ac->touched = 1; 3180 objp = ac->entry[--ac->avail]; 3181 } else { 3182 STATS_INC_ALLOCMISS(cachep); 3183 objp = cache_alloc_refill(cachep, flags); 3184 } 3185 return objp; 3186} 3187 3188static __always_inline void *__cache_alloc(struct kmem_cache *cachep, 3189 gfp_t flags, void *caller) 3190{ 3191 unsigned long save_flags; 3192 void *objp = NULL; 3193 3194 cache_alloc_debugcheck_before(cachep, flags); 3195 3196 local_irq_save(save_flags); 3197 3198 if (unlikely(NUMA_BUILD && 3199 current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) 3200 objp = alternate_node_alloc(cachep, flags); 3201 3202 if (!objp) 3203 objp = ____cache_alloc(cachep, flags); 3204 /* 3205 * We may just have run out of memory on the local node. 3206 * ____cache_alloc_node() knows how to locate memory on other nodes 3207 */ 3208 if (NUMA_BUILD && !objp) 3209 objp = ____cache_alloc_node(cachep, flags, numa_node_id()); 3210 local_irq_restore(save_flags); 3211 objp = cache_alloc_debugcheck_after(cachep, flags, objp, 3212 caller); 3213 prefetchw(objp); 3214 return objp; 3215} 3216 3217#ifdef CONFIG_NUMA 3218/* 3219 * Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY. 3220 * 3221 * If we are in_interrupt, then process context, including cpusets and 3222 * mempolicy, may not apply and should not be used for allocation policy. 3223 */ 3224static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) 3225{ 3226 int nid_alloc, nid_here; 3227 3228 if (in_interrupt() || (flags & __GFP_THISNODE)) 3229 return NULL; 3230 nid_alloc = nid_here = numa_node_id(); 3231 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) 3232 nid_alloc = cpuset_mem_spread_node(); 3233 else if (current->mempolicy) 3234 nid_alloc = slab_node(current->mempolicy); 3235 if (nid_alloc != nid_here) 3236 return ____cache_alloc_node(cachep, flags, nid_alloc); 3237 return NULL; 3238} 3239 3240/* 3241 * Fallback function if there was no memory available and no objects on a 3242 * certain node and fall back is permitted. First we scan all the 3243 * available nodelists for available objects. If that fails then we 3244 * perform an allocation without specifying a node. This allows the page 3245 * allocator to do its reclaim / fallback magic. We then insert the 3246 * slab into the proper nodelist and then allocate from it. 3247 */ 3248void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) 3249{ 3250 struct zonelist *zonelist = &NODE_DATA(slab_node(current->mempolicy)) 3251 ->node_zonelists[gfp_zone(flags)]; 3252 struct zone **z; 3253 void *obj = NULL; 3254 int nid; 3255 gfp_t local_flags = (flags & GFP_LEVEL_MASK); 3256 3257retry: 3258 /* 3259 * Look through allowed nodes for objects available 3260 * from existing per node queues. 3261 */ 3262 for (z = zonelist->zones; *z && !obj; z++) { 3263 nid = zone_to_nid(*z); 3264 3265 if (cpuset_zone_allowed(*z, flags | __GFP_HARDWALL) && 3266 cache->nodelists[nid] && 3267 cache->nodelists[nid]->free_objects) 3268 obj = ____cache_alloc_node(cache, 3269 flags | GFP_THISNODE, nid); 3270 } 3271 3272 if (!obj) { 3273 /* 3274 * This allocation will be performed within the constraints 3275 * of the current cpuset / memory policy requirements. 3276 * We may trigger various forms of reclaim on the allowed 3277 * set and go into memory reserves if necessary. 3278 */ 3279 if (local_flags & __GFP_WAIT) 3280 local_irq_enable(); 3281 kmem_flagcheck(cache, flags); 3282 obj = kmem_getpages(cache, flags, -1); 3283 if (local_flags & __GFP_WAIT) 3284 local_irq_disable(); 3285 if (obj) { 3286 /* 3287 * Insert into the appropriate per node queues 3288 */ 3289 nid = page_to_nid(virt_to_page(obj)); 3290 if (cache_grow(cache, flags, nid, obj)) { 3291 obj = ____cache_alloc_node(cache, 3292 flags | GFP_THISNODE, nid); 3293 if (!obj) 3294 /* 3295 * Another processor may allocate the 3296 * objects in the slab since we are 3297 * not holding any locks. 3298 */ 3299 goto retry; 3300 } else { 3301 kmem_freepages(cache, obj); 3302 obj = NULL; 3303 } 3304 } 3305 } 3306 return obj; 3307} 3308 3309/* 3310 * A interface to enable slab creation on nodeid 3311 */ 3312static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, 3313 int nodeid) 3314{ 3315 struct list_head *entry; 3316 struct slab *slabp; 3317 struct kmem_list3 *l3; 3318 void *obj; 3319 int x; 3320 3321 l3 = cachep->nodelists[nodeid]; 3322 BUG_ON(!l3); 3323 3324retry: 3325 check_irq_off(); 3326 spin_lock(&l3->list_lock); 3327 entry = l3->slabs_partial.next; 3328 if (entry == &l3->slabs_partial) { 3329 l3->free_touched = 1; 3330 entry = l3->slabs_free.next; 3331 if (entry == &l3->slabs_free) 3332 goto must_grow; 3333 } 3334 3335 slabp = list_entry(entry, struct slab, list); 3336 check_spinlock_acquired_node(cachep, nodeid); 3337 check_slabp(cachep, slabp); 3338 3339 STATS_INC_NODEALLOCS(cachep); 3340 STATS_INC_ACTIVE(cachep); 3341 STATS_SET_HIGH(cachep); 3342 3343 BUG_ON(slabp->inuse == cachep->num); 3344 3345 obj = slab_get_obj(cachep, slabp, nodeid); 3346 check_slabp(cachep, slabp); 3347 l3->free_objects--; 3348 /* move slabp to correct slabp list: */ 3349 list_del(&slabp->list); 3350 3351 if (slabp->free == BUFCTL_END) 3352 list_add(&slabp->list, &l3->slabs_full); 3353 else 3354 list_add(&slabp->list, &l3->slabs_partial); 3355 3356 spin_unlock(&l3->list_lock); 3357 goto done; 3358 3359must_grow: 3360 spin_unlock(&l3->list_lock); 3361 x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL); 3362 if (x) 3363 goto retry; 3364 3365 if (!(flags & __GFP_THISNODE)) 3366 /* Unable to grow the cache. Fall back to other nodes. */ 3367 return fallback_alloc(cachep, flags); 3368 3369 return NULL; 3370 3371done: 3372 return obj; 3373} 3374#endif 3375 3376/* 3377 * Caller needs to acquire correct kmem_list's list_lock 3378 */ 3379static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, 3380 int node) 3381{ 3382 int i; 3383 struct kmem_list3 *l3; 3384 3385 for (i = 0; i < nr_objects; i++) { 3386 void *objp = objpp[i]; 3387 struct slab *slabp; 3388 3389 slabp = virt_to_slab(objp); 3390 l3 = cachep->nodelists[node]; 3391 list_del(&slabp->list); 3392 check_spinlock_acquired_node(cachep, node); 3393 check_slabp(cachep, slabp); 3394 slab_put_obj(cachep, slabp, objp, node); 3395 STATS_DEC_ACTIVE(cachep); 3396 l3->free_objects++; 3397 check_slabp(cachep, slabp); 3398 3399 /* fixup slab chains */ 3400 if (slabp->inuse == 0) { 3401 if (l3->free_objects > l3->free_limit) { 3402 l3->free_objects -= cachep->num; 3403 /* No need to drop any previously held 3404 * lock here, even if we have a off-slab slab 3405 * descriptor it is guaranteed to come from 3406 * a different cache, refer to comments before 3407 * alloc_slabmgmt. 3408 */ 3409 slab_destroy(cachep, slabp); 3410 } else { 3411 list_add(&slabp->list, &l3->slabs_free); 3412 } 3413 } else { 3414 /* Unconditionally move a slab to the end of the 3415 * partial list on free - maximum time for the 3416 * other objects to be freed, too. 3417 */ 3418 list_add_tail(&slabp->list, &l3->slabs_partial); 3419 } 3420 } 3421} 3422 3423static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) 3424{ 3425 int batchcount; 3426 struct kmem_list3 *l3; 3427 int node = numa_node_id(); 3428 3429 batchcount = ac->batchcount; 3430#if DEBUG 3431 BUG_ON(!batchcount || batchcount > ac->avail); 3432#endif 3433 check_irq_off(); 3434 l3 = cachep->nodelists[node]; 3435 spin_lock(&l3->list_lock); 3436 if (l3->shared) { 3437 struct array_cache *shared_array = l3->shared; 3438 int max = shared_array->limit - shared_array->avail; 3439 if (max) { 3440 if (batchcount > max) 3441 batchcount = max; 3442 memcpy(&(shared_array->entry[shared_array->avail]), 3443 ac->entry, sizeof(void *) * batchcount); 3444 shared_array->avail += batchcount; 3445 goto free_done; 3446 } 3447 } 3448 3449 free_block(cachep, ac->entry, batchcount, node); 3450free_done: 3451#if STATS 3452 { 3453 int i = 0; 3454 struct list_head *p; 3455 3456 p = l3->slabs_free.next; 3457 while (p != &(l3->slabs_free)) { 3458 struct slab *slabp; 3459 3460 slabp = list_entry(p, struct slab, list); 3461 BUG_ON(slabp->inuse); 3462 3463 i++; 3464 p = p->next; 3465 } 3466 STATS_SET_FREEABLE(cachep, i); 3467 } 3468#endif 3469 spin_unlock(&l3->list_lock); 3470 ac->avail -= batchcount; 3471 memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail); 3472} 3473 3474/* 3475 * Release an obj back to its cache. If the obj has a constructed state, it must 3476 * be in this state _before_ it is released. Called with disabled ints. 3477 */ 3478static inline void __cache_free(struct kmem_cache *cachep, void *objp) 3479{ 3480 struct array_cache *ac = cpu_cache_get(cachep); 3481 3482 check_irq_off(); 3483 objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); 3484 3485 if (cache_free_alien(cachep, objp)) 3486 return; 3487 3488 if (likely(ac->avail < ac->limit)) { 3489 STATS_INC_FREEHIT(cachep); 3490 ac->entry[ac->avail++] = objp; 3491 return; 3492 } else { 3493 STATS_INC_FREEMISS(cachep); 3494 cache_flusharray(cachep, ac); 3495 ac->entry[ac->avail++] = objp; 3496 } 3497} 3498 3499/** 3500 * kmem_cache_alloc - Allocate an object 3501 * @cachep: The cache to allocate from. 3502 * @flags: See kmalloc(). 3503 * 3504 * Allocate an object from this cache. The flags are only relevant 3505 * if the cache has no available objects. 3506 */ 3507void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3508{ 3509 return __cache_alloc(cachep, flags, __builtin_return_address(0)); 3510} 3511EXPORT_SYMBOL(kmem_cache_alloc); 3512 3513/** 3514 * kmem_cache_zalloc - Allocate an object. The memory is set to zero. 3515 * @cache: The cache to allocate from. 3516 * @flags: See kmalloc(). 3517 * 3518 * Allocate an object from this cache and set the allocated memory to zero. 3519 * The flags are only relevant if the cache has no available objects. 3520 */ 3521void *kmem_cache_zalloc(struct kmem_cache *cache, gfp_t flags) 3522{ 3523 void *ret = __cache_alloc(cache, flags, __builtin_return_address(0)); 3524 if (ret) 3525 memset(ret, 0, obj_size(cache)); 3526 return ret; 3527} 3528EXPORT_SYMBOL(kmem_cache_zalloc); 3529 3530/** 3531 * kmem_ptr_validate - check if an untrusted pointer might 3532 * be a slab entry. 3533 * @cachep: the cache we're checking against 3534 * @ptr: pointer to validate 3535 * 3536 * This verifies that the untrusted pointer looks sane: 3537 * it is _not_ a guarantee that the pointer is actually 3538 * part of the slab cache in question, but it at least 3539 * validates that the pointer can be dereferenced and 3540 * looks half-way sane. 3541 * 3542 * Currently only used for dentry validation. 3543 */ 3544int fastcall kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr) 3545{ 3546 unsigned long addr = (unsigned long)ptr; 3547 unsigned long min_addr = PAGE_OFFSET; 3548 unsigned long align_mask = BYTES_PER_WORD - 1; 3549 unsigned long size = cachep->buffer_size; 3550 struct page *page; 3551 3552 if (unlikely(addr < min_addr)) 3553 goto out; 3554 if (unlikely(addr > (unsigned long)high_memory - size)) 3555 goto out; 3556 if (unlikely(addr & align_mask)) 3557 goto out; 3558 if (unlikely(!kern_addr_valid(addr))) 3559 goto out; 3560 if (unlikely(!kern_addr_valid(addr + size - 1))) 3561 goto out; 3562 page = virt_to_page(ptr); 3563 if (unlikely(!PageSlab(page))) 3564 goto out; 3565 if (unlikely(page_get_cache(page) != cachep)) 3566 goto out; 3567 return 1; 3568out: 3569 return 0; 3570} 3571 3572#ifdef CONFIG_NUMA 3573/** 3574 * kmem_cache_alloc_node - Allocate an object on the specified node 3575 * @cachep: The cache to allocate from. 3576 * @flags: See kmalloc(). 3577 * @nodeid: node number of the target node. 3578 * 3579 * Identical to kmem_cache_alloc but it will allocate memory on the given 3580 * node, which can improve the performance for cpu bound structures. 3581 * 3582 * Fallback to other node is possible if __GFP_THISNODE is not set. 3583 */ 3584static __always_inline void * 3585__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, 3586 int nodeid, void *caller) 3587{ 3588 unsigned long save_flags; 3589 void *ptr = NULL; 3590 3591 cache_alloc_debugcheck_before(cachep, flags); 3592 local_irq_save(save_flags); 3593 3594 if (unlikely(nodeid == -1)) 3595 nodeid = numa_node_id(); 3596 3597 if (likely(cachep->nodelists[nodeid])) { 3598 if (nodeid == numa_node_id()) { 3599 /* 3600 * Use the locally cached objects if possible. 3601 * However ____cache_alloc does not allow fallback 3602 * to other nodes. It may fail while we still have 3603 * objects on other nodes available. 3604 */ 3605 ptr = ____cache_alloc(cachep, flags); 3606 } 3607 if (!ptr) { 3608 /* ___cache_alloc_node can fall back to other nodes */ 3609 ptr = ____cache_alloc_node(cachep, flags, nodeid); 3610 } 3611 } else { 3612 /* Node not bootstrapped yet */ 3613 if (!(flags & __GFP_THISNODE)) 3614 ptr = fallback_alloc(cachep, flags); 3615 } 3616 3617 local_irq_restore(save_flags); 3618 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); 3619 3620 return ptr; 3621} 3622 3623void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) 3624{ 3625 return __cache_alloc_node(cachep, flags, nodeid, 3626 __builtin_return_address(0)); 3627} 3628EXPORT_SYMBOL(kmem_cache_alloc_node); 3629 3630static __always_inline void * 3631__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) 3632{ 3633 struct kmem_cache *cachep; 3634 3635 cachep = kmem_find_general_cachep(size, flags); 3636 if (unlikely(cachep == NULL)) 3637 return NULL; 3638 return kmem_cache_alloc_node(cachep, flags, node); 3639} 3640 3641#ifdef CONFIG_DEBUG_SLAB 3642void *__kmalloc_node(size_t size, gfp_t flags, int node) 3643{ 3644 return __do_kmalloc_node(size, flags, node, 3645 __builtin_return_address(0)); 3646} 3647EXPORT_SYMBOL(__kmalloc_node); 3648 3649void *__kmalloc_node_track_caller(size_t size, gfp_t flags, 3650 int node, void *caller) 3651{ 3652 return __do_kmalloc_node(size, flags, node, caller); 3653} 3654EXPORT_SYMBOL(__kmalloc_node_track_caller); 3655#else 3656void *__kmalloc_node(size_t size, gfp_t flags, int node) 3657{ 3658 return __do_kmalloc_node(size, flags, node, NULL); 3659} 3660EXPORT_SYMBOL(__kmalloc_node); 3661#endif /* CONFIG_DEBUG_SLAB */ 3662#endif /* CONFIG_NUMA */ 3663 3664/** 3665 * __do_kmalloc - allocate memory 3666 * @size: how many bytes of memory are required. 3667 * @flags: the type of memory to allocate (see kmalloc). 3668 * @caller: function caller for debug tracking of the caller 3669 */ 3670static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, 3671 void *caller) 3672{ 3673 struct kmem_cache *cachep; 3674 3675 /* If you want to save a few bytes .text space: replace 3676 * __ with kmem_. 3677 * Then kmalloc uses the uninlined functions instead of the inline 3678 * functions. 3679 */ 3680 cachep = __find_general_cachep(size, flags); 3681 if (unlikely(cachep == NULL)) 3682 return NULL; 3683 return __cache_alloc(cachep, flags, caller); 3684} 3685 3686 3687#ifdef CONFIG_DEBUG_SLAB 3688void *__kmalloc(size_t size, gfp_t flags) 3689{ 3690 return __do_kmalloc(size, flags, __builtin_return_address(0)); 3691} 3692EXPORT_SYMBOL(__kmalloc); 3693 3694void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller) 3695{ 3696 return __do_kmalloc(size, flags, caller); 3697} 3698EXPORT_SYMBOL(__kmalloc_track_caller); 3699 3700#else 3701void *__kmalloc(size_t size, gfp_t flags) 3702{ 3703 return __do_kmalloc(size, flags, NULL); 3704} 3705EXPORT_SYMBOL(__kmalloc); 3706#endif 3707 3708/** 3709 * kmem_cache_free - Deallocate an object 3710 * @cachep: The cache the allocation was from. 3711 * @objp: The previously allocated object. 3712 * 3713 * Free an object which was previously allocated from this 3714 * cache. 3715 */ 3716void kmem_cache_free(struct kmem_cache *cachep, void *objp) 3717{ 3718 unsigned long flags; 3719 3720 BUG_ON(virt_to_cache(objp) != cachep); 3721 3722 local_irq_save(flags); 3723 __cache_free(cachep, objp); 3724 local_irq_restore(flags); 3725} 3726EXPORT_SYMBOL(kmem_cache_free); 3727 3728/** 3729 * kfree - free previously allocated memory 3730 * @objp: pointer returned by kmalloc. 3731 * 3732 * If @objp is NULL, no operation is performed. 3733 * 3734 * Don't free memory not originally allocated by kmalloc() 3735 * or you will run into trouble. 3736 */ 3737void kfree(const void *objp) 3738{ 3739 struct kmem_cache *c; 3740 unsigned long flags; 3741 3742 if (unlikely(!objp)) 3743 return; 3744 local_irq_save(flags); 3745 kfree_debugcheck(objp); 3746 c = virt_to_cache(objp); 3747 debug_check_no_locks_freed(objp, obj_size(c)); 3748 __cache_free(c, (void *)objp); 3749 local_irq_restore(flags); 3750} 3751EXPORT_SYMBOL(kfree); 3752 3753unsigned int kmem_cache_size(struct kmem_cache *cachep) 3754{ 3755 return obj_size(cachep); 3756} 3757EXPORT_SYMBOL(kmem_cache_size); 3758 3759const char *kmem_cache_name(struct kmem_cache *cachep) 3760{ 3761 return cachep->name; 3762} 3763EXPORT_SYMBOL_GPL(kmem_cache_name); 3764 3765/* 3766 * This initializes kmem_list3 or resizes varioius caches for all nodes. 3767 */ 3768static int alloc_kmemlist(struct kmem_cache *cachep) 3769{ 3770 int node; 3771 struct kmem_list3 *l3; 3772 struct array_cache *new_shared; 3773 struct array_cache **new_alien = NULL; 3774 3775 for_each_online_node(node) { 3776 3777 if (use_alien_caches) { 3778 new_alien = alloc_alien_cache(node, cachep->limit); 3779 if (!new_alien) 3780 goto fail; 3781 } 3782 3783 new_shared = alloc_arraycache(node, 3784 cachep->shared*cachep->batchcount, 3785 0xbaadf00d); 3786 if (!new_shared) { 3787 free_alien_cache(new_alien); 3788 goto fail; 3789 } 3790 3791 l3 = cachep->nodelists[node]; 3792 if (l3) { 3793 struct array_cache *shared = l3->shared; 3794 3795 spin_lock_irq(&l3->list_lock); 3796 3797 if (shared) 3798 free_block(cachep, shared->entry, 3799 shared->avail, node); 3800 3801 l3->shared = new_shared; 3802 if (!l3->alien) { 3803 l3->alien = new_alien; 3804 new_alien = NULL; 3805 } 3806 l3->free_limit = (1 + nr_cpus_node(node)) * 3807 cachep->batchcount + cachep->num; 3808 spin_unlock_irq(&l3->list_lock); 3809 kfree(shared); 3810 free_alien_cache(new_alien); 3811 continue; 3812 } 3813 l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node); 3814 if (!l3) { 3815 free_alien_cache(new_alien); 3816 kfree(new_shared); 3817 goto fail; 3818 } 3819 3820 kmem_list3_init(l3); 3821 l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + 3822 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 3823 l3->shared = new_shared; 3824 l3->alien = new_alien; 3825 l3->free_limit = (1 + nr_cpus_node(node)) * 3826 cachep->batchcount + cachep->num; 3827 cachep->nodelists[node] = l3; 3828 } 3829 return 0; 3830 3831fail: 3832 if (!cachep->next.next) { 3833 /* Cache is not active yet. Roll back what we did */ 3834 node--; 3835 while (node >= 0) { 3836 if (cachep->nodelists[node]) { 3837 l3 = cachep->nodelists[node]; 3838 3839 kfree(l3->shared); 3840 free_alien_cache(l3->alien); 3841 kfree(l3); 3842 cachep->nodelists[node] = NULL; 3843 } 3844 node--; 3845 } 3846 } 3847 return -ENOMEM; 3848} 3849 3850struct ccupdate_struct { 3851 struct kmem_cache *cachep; 3852 struct array_cache *new[NR_CPUS]; 3853}; 3854 3855static void do_ccupdate_local(void *info) 3856{ 3857 struct ccupdate_struct *new = info; 3858 struct array_cache *old; 3859 3860 check_irq_off(); 3861 old = cpu_cache_get(new->cachep); 3862 3863 new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()]; 3864 new->new[smp_processor_id()] = old; 3865} 3866 3867/* Always called with the cache_chain_mutex held */ 3868static int do_tune_cpucache(struct kmem_cache *cachep, int limit, 3869 int batchcount, int shared) 3870{ 3871 struct ccupdate_struct *new; 3872 int i; 3873 3874 new = kzalloc(sizeof(*new), GFP_KERNEL); 3875 if (!new) 3876 return -ENOMEM; 3877 3878 for_each_online_cpu(i) { 3879 new->new[i] = alloc_arraycache(cpu_to_node(i), limit, 3880 batchcount); 3881 if (!new->new[i]) { 3882 for (i--; i >= 0; i--) 3883 kfree(new->new[i]); 3884 kfree(new); 3885 return -ENOMEM; 3886 } 3887 } 3888 new->cachep = cachep; 3889 3890 on_each_cpu(do_ccupdate_local, (void *)new, 1, 1); 3891 3892 check_irq_on(); 3893 cachep->batchcount = batchcount; 3894 cachep->limit = limit; 3895 cachep->shared = shared; 3896 3897 for_each_online_cpu(i) { 3898 struct array_cache *ccold = new->new[i]; 3899 if (!ccold) 3900 continue; 3901 spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); 3902 free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i)); 3903 spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); 3904 kfree(ccold); 3905 } 3906 kfree(new); 3907 return alloc_kmemlist(cachep); 3908} 3909 3910/* Called with cache_chain_mutex held always */ 3911static int enable_cpucache(struct kmem_cache *cachep) 3912{ 3913 int err; 3914 int limit, shared; 3915 3916 /* 3917 * The head array serves three purposes: 3918 * - create a LIFO ordering, i.e. return objects that are cache-warm 3919 * - reduce the number of spinlock operations. 3920 * - reduce the number of linked list operations on the slab and 3921 * bufctl chains: array operations are cheaper. 3922 * The numbers are guessed, we should auto-tune as described by 3923 * Bonwick. 3924 */ 3925 if (cachep->buffer_size > 131072) 3926 limit = 1; 3927 else if (cachep->buffer_size > PAGE_SIZE) 3928 limit = 8; 3929 else if (cachep->buffer_size > 1024) 3930 limit = 24; 3931 else if (cachep->buffer_size > 256) 3932 limit = 54; 3933 else 3934 limit = 120; 3935 3936 /* 3937 * CPU bound tasks (e.g. network routing) can exhibit cpu bound 3938 * allocation behaviour: Most allocs on one cpu, most free operations 3939 * on another cpu. For these cases, an efficient object passing between 3940 * cpus is necessary. This is provided by a shared array. The array 3941 * replaces Bonwick's magazine layer. 3942 * On uniprocessor, it's functionally equivalent (but less efficient) 3943 * to a larger limit. Thus disabled by default. 3944 */ 3945 shared = 0; 3946#ifdef CONFIG_SMP 3947 if (cachep->buffer_size <= PAGE_SIZE) 3948 shared = 8; 3949#endif 3950 3951#if DEBUG 3952 /* 3953 * With debugging enabled, large batchcount lead to excessively long 3954 * periods with disabled local interrupts. Limit the batchcount 3955 */ 3956 if (limit > 32) 3957 limit = 32; 3958#endif 3959 err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared); 3960 if (err) 3961 printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n", 3962 cachep->name, -err); 3963 return err; 3964} 3965 3966/* 3967 * Drain an array if it contains any elements taking the l3 lock only if 3968 * necessary. Note that the l3 listlock also protects the array_cache 3969 * if drain_array() is used on the shared array. 3970 */ 3971void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, 3972 struct array_cache *ac, int force, int node) 3973{ 3974 int tofree; 3975 3976 if (!ac || !ac->avail) 3977 return; 3978 if (ac->touched && !force) { 3979 ac->touched = 0; 3980 } else { 3981 spin_lock_irq(&l3->list_lock); 3982 if (ac->avail) { 3983 tofree = force ? ac->avail : (ac->limit + 4) / 5; 3984 if (tofree > ac->avail) 3985 tofree = (ac->avail + 1) / 2; 3986 free_block(cachep, ac->entry, tofree, node); 3987 ac->avail -= tofree; 3988 memmove(ac->entry, &(ac->entry[tofree]), 3989 sizeof(void *) * ac->avail); 3990 } 3991 spin_unlock_irq(&l3->list_lock); 3992 } 3993} 3994 3995/** 3996 * cache_reap - Reclaim memory from caches. 3997 * @unused: unused parameter 3998 * 3999 * Called from workqueue/eventd every few seconds. 4000 * Purpose: 4001 * - clear the per-cpu caches for this CPU. 4002 * - return freeable pages to the main free memory pool. 4003 * 4004 * If we cannot acquire the cache chain mutex then just give up - we'll try 4005 * again on the next iteration. 4006 */ 4007static void cache_reap(struct work_struct *unused) 4008{ 4009 struct kmem_cache *searchp; 4010 struct kmem_list3 *l3; 4011 int node = numa_node_id(); 4012 4013 if (!mutex_trylock(&cache_chain_mutex)) { 4014 /* Give up. Setup the next iteration. */ 4015 schedule_delayed_work(&__get_cpu_var(reap_work), 4016 round_jiffies_relative(REAPTIMEOUT_CPUC)); 4017 return; 4018 } 4019 4020 list_for_each_entry(searchp, &cache_chain, next) { 4021 check_irq_on(); 4022 4023 /* 4024 * We only take the l3 lock if absolutely necessary and we 4025 * have established with reasonable certainty that 4026 * we can do some work if the lock was obtained. 4027 */ 4028 l3 = searchp->nodelists[node]; 4029 4030 reap_alien(searchp, l3); 4031 4032 drain_array(searchp, l3, cpu_cache_get(searchp), 0, node); 4033 4034 /* 4035 * These are racy checks but it does not matter 4036 * if we skip one check or scan twice. 4037 */ 4038 if (time_after(l3->next_reap, jiffies)) 4039 goto next; 4040 4041 l3->next_reap = jiffies + REAPTIMEOUT_LIST3; 4042 4043 drain_array(searchp, l3, l3->shared, 0, node); 4044 4045 if (l3->free_touched) 4046 l3->free_touched = 0; 4047 else { 4048 int freed; 4049 4050 freed = drain_freelist(searchp, l3, (l3->free_limit + 4051 5 * searchp->num - 1) / (5 * searchp->num)); 4052 STATS_ADD_REAPED(searchp, freed); 4053 } 4054next: 4055 cond_resched(); 4056 } 4057 check_irq_on(); 4058 mutex_unlock(&cache_chain_mutex); 4059 next_reap_node(); 4060 refresh_cpu_vm_stats(smp_processor_id()); 4061 /* Set up the next iteration */ 4062 schedule_delayed_work(&__get_cpu_var(reap_work), 4063 round_jiffies_relative(REAPTIMEOUT_CPUC)); 4064} 4065 4066#ifdef CONFIG_PROC_FS 4067 4068static void print_slabinfo_header(struct seq_file *m) 4069{ 4070 /* 4071 * Output format version, so at least we can change it 4072 * without _too_ many complaints. 4073 */ 4074#if STATS 4075 seq_puts(m, "slabinfo - version: 2.1 (statistics)\n"); 4076#else 4077 seq_puts(m, "slabinfo - version: 2.1\n"); 4078#endif 4079 seq_puts(m, "# name <active_objs> <num_objs> <objsize> " 4080 "<objperslab> <pagesperslab>"); 4081 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>"); 4082 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); 4083#if STATS 4084 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> " 4085 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>"); 4086 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>"); 4087#endif 4088 seq_putc(m, '\n'); 4089} 4090 4091static void *s_start(struct seq_file *m, loff_t *pos) 4092{ 4093 loff_t n = *pos; 4094 struct list_head *p; 4095 4096 mutex_lock(&cache_chain_mutex); 4097 if (!n) 4098 print_slabinfo_header(m); 4099 p = cache_chain.next; 4100 while (n--) { 4101 p = p->next; 4102 if (p == &cache_chain) 4103 return NULL; 4104 } 4105 return list_entry(p, struct kmem_cache, next); 4106} 4107 4108static void *s_next(struct seq_file *m, void *p, loff_t *pos) 4109{ 4110 struct kmem_cache *cachep = p; 4111 ++*pos; 4112 return cachep->next.next == &cache_chain ? 4113 NULL : list_entry(cachep->next.next, struct kmem_cache, next); 4114} 4115 4116static void s_stop(struct seq_file *m, void *p) 4117{ 4118 mutex_unlock(&cache_chain_mutex); 4119} 4120 4121static int s_show(struct seq_file *m, void *p) 4122{ 4123 struct kmem_cache *cachep = p; 4124 struct slab *slabp; 4125 unsigned long active_objs; 4126 unsigned long num_objs; 4127 unsigned long active_slabs = 0; 4128 unsigned long num_slabs, free_objects = 0, shared_avail = 0; 4129 const char *name; 4130 char *error = NULL; 4131 int node; 4132 struct kmem_list3 *l3; 4133 4134 active_objs = 0; 4135 num_slabs = 0; 4136 for_each_online_node(node) { 4137 l3 = cachep->nodelists[node]; 4138 if (!l3) 4139 continue; 4140 4141 check_irq_on(); 4142 spin_lock_irq(&l3->list_lock); 4143 4144 list_for_each_entry(slabp, &l3->slabs_full, list) { 4145 if (slabp->inuse != cachep->num && !error) 4146 error = "slabs_full accounting error"; 4147 active_objs += cachep->num; 4148 active_slabs++; 4149 } 4150 list_for_each_entry(slabp, &l3->slabs_partial, list) { 4151 if (slabp->inuse == cachep->num && !error) 4152 error = "slabs_partial inuse accounting error"; 4153 if (!slabp->inuse && !error) 4154 error = "slabs_partial/inuse accounting error"; 4155 active_objs += slabp->inuse; 4156 active_slabs++; 4157 } 4158 list_for_each_entry(slabp, &l3->slabs_free, list) { 4159 if (slabp->inuse && !error) 4160 error = "slabs_free/inuse accounting error"; 4161 num_slabs++; 4162 } 4163 free_objects += l3->free_objects; 4164 if (l3->shared) 4165 shared_avail += l3->shared->avail; 4166 4167 spin_unlock_irq(&l3->list_lock); 4168 } 4169 num_slabs += active_slabs; 4170 num_objs = num_slabs * cachep->num; 4171 if (num_objs - active_objs != free_objects && !error) 4172 error = "free_objects accounting error"; 4173 4174 name = cachep->name; 4175 if (error) 4176 printk(KERN_ERR "slab: cache %s error: %s\n", name, error); 4177 4178 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", 4179 name, active_objs, num_objs, cachep->buffer_size, 4180 cachep->num, (1 << cachep->gfporder)); 4181 seq_printf(m, " : tunables %4u %4u %4u", 4182 cachep->limit, cachep->batchcount, cachep->shared); 4183 seq_printf(m, " : slabdata %6lu %6lu %6lu", 4184 active_slabs, num_slabs, shared_avail); 4185#if STATS 4186 { /* list3 stats */ 4187 unsigned long high = cachep->high_mark; 4188 unsigned long allocs = cachep->num_allocations; 4189 unsigned long grown = cachep->grown; 4190 unsigned long reaped = cachep->reaped; 4191 unsigned long errors = cachep->errors; 4192 unsigned long max_freeable = cachep->max_freeable; 4193 unsigned long node_allocs = cachep->node_allocs; 4194 unsigned long node_frees = cachep->node_frees; 4195 unsigned long overflows = cachep->node_overflow; 4196 4197 seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \ 4198 %4lu %4lu %4lu %4lu %4lu", allocs, high, grown, 4199 reaped, errors, max_freeable, node_allocs, 4200 node_frees, overflows); 4201 } 4202 /* cpu stats */ 4203 { 4204 unsigned long allochit = atomic_read(&cachep->allochit); 4205 unsigned long allocmiss = atomic_read(&cachep->allocmiss); 4206 unsigned long freehit = atomic_read(&cachep->freehit); 4207 unsigned long freemiss = atomic_read(&cachep->freemiss); 4208 4209 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu", 4210 allochit, allocmiss, freehit, freemiss); 4211 } 4212#endif 4213 seq_putc(m, '\n'); 4214 return 0; 4215} 4216 4217/* 4218 * slabinfo_op - iterator that generates /proc/slabinfo 4219 * 4220 * Output layout: 4221 * cache-name 4222 * num-active-objs 4223 * total-objs 4224 * object size 4225 * num-active-slabs 4226 * total-slabs 4227 * num-pages-per-slab 4228 * + further values on SMP and with statistics enabled 4229 */ 4230 4231const struct seq_operations slabinfo_op = { 4232 .start = s_start, 4233 .next = s_next, 4234 .stop = s_stop, 4235 .show = s_show, 4236}; 4237 4238#define MAX_SLABINFO_WRITE 128 4239/** 4240 * slabinfo_write - Tuning for the slab allocator 4241 * @file: unused 4242 * @buffer: user buffer 4243 * @count: data length 4244 * @ppos: unused 4245 */ 4246ssize_t slabinfo_write(struct file *file, const char __user * buffer, 4247 size_t count, loff_t *ppos) 4248{ 4249 char kbuf[MAX_SLABINFO_WRITE + 1], *tmp; 4250 int limit, batchcount, shared, res; 4251 struct kmem_cache *cachep; 4252 4253 if (count > MAX_SLABINFO_WRITE) 4254 return -EINVAL; 4255 if (copy_from_user(&kbuf, buffer, count)) 4256 return -EFAULT; 4257 kbuf[MAX_SLABINFO_WRITE] = '\0'; 4258 4259 tmp = strchr(kbuf, ' '); 4260 if (!tmp) 4261 return -EINVAL; 4262 *tmp = '\0'; 4263 tmp++; 4264 if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3) 4265 return -EINVAL; 4266 4267 /* Find the cache in the chain of caches. */ 4268 mutex_lock(&cache_chain_mutex); 4269 res = -EINVAL; 4270 list_for_each_entry(cachep, &cache_chain, next) { 4271 if (!strcmp(cachep->name, kbuf)) { 4272 if (limit < 1 || batchcount < 1 || 4273 batchcount > limit || shared < 0) { 4274 res = 0; 4275 } else { 4276 res = do_tune_cpucache(cachep, limit, 4277 batchcount, shared); 4278 } 4279 break; 4280 } 4281 } 4282 mutex_unlock(&cache_chain_mutex); 4283 if (res >= 0) 4284 res = count; 4285 return res; 4286} 4287 4288#ifdef CONFIG_DEBUG_SLAB_LEAK 4289 4290static void *leaks_start(struct seq_file *m, loff_t *pos) 4291{ 4292 loff_t n = *pos; 4293 struct list_head *p; 4294 4295 mutex_lock(&cache_chain_mutex); 4296 p = cache_chain.next; 4297 while (n--) { 4298 p = p->next; 4299 if (p == &cache_chain) 4300 return NULL; 4301 } 4302 return list_entry(p, struct kmem_cache, next); 4303} 4304 4305static inline int add_caller(unsigned long *n, unsigned long v) 4306{ 4307 unsigned long *p; 4308 int l; 4309 if (!v) 4310 return 1; 4311 l = n[1]; 4312 p = n + 2; 4313 while (l) { 4314 int i = l/2; 4315 unsigned long *q = p + 2 * i; 4316 if (*q == v) { 4317 q[1]++; 4318 return 1; 4319 } 4320 if (*q > v) { 4321 l = i; 4322 } else { 4323 p = q + 2; 4324 l -= i + 1; 4325 } 4326 } 4327 if (++n[1] == n[0]) 4328 return 0; 4329 memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n)); 4330 p[0] = v; 4331 p[1] = 1; 4332 return 1; 4333} 4334 4335static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s) 4336{ 4337 void *p; 4338 int i; 4339 if (n[0] == n[1]) 4340 return; 4341 for (i = 0, p = s->s_mem; i < c->num; i++, p += c->buffer_size) { 4342 if (slab_bufctl(s)[i] != BUFCTL_ACTIVE) 4343 continue; 4344 if (!add_caller(n, (unsigned long)*dbg_userword(c, p))) 4345 return; 4346 } 4347} 4348 4349static void show_symbol(struct seq_file *m, unsigned long address) 4350{ 4351#ifdef CONFIG_KALLSYMS 4352 char *modname; 4353 const char *name; 4354 unsigned long offset, size; 4355 char namebuf[KSYM_NAME_LEN+1]; 4356 4357 name = kallsyms_lookup(address, &size, &offset, &modname, namebuf); 4358 4359 if (name) { 4360 seq_printf(m, "%s+%#lx/%#lx", name, offset, size); 4361 if (modname) 4362 seq_printf(m, " [%s]", modname); 4363 return; 4364 } 4365#endif 4366 seq_printf(m, "%p", (void *)address); 4367} 4368 4369static int leaks_show(struct seq_file *m, void *p) 4370{ 4371 struct kmem_cache *cachep = p; 4372 struct slab *slabp; 4373 struct kmem_list3 *l3; 4374 const char *name; 4375 unsigned long *n = m->private; 4376 int node; 4377 int i; 4378 4379 if (!(cachep->flags & SLAB_STORE_USER)) 4380 return 0; 4381 if (!(cachep->flags & SLAB_RED_ZONE)) 4382 return 0; 4383 4384 /* OK, we can do it */ 4385 4386 n[1] = 0; 4387 4388 for_each_online_node(node) { 4389 l3 = cachep->nodelists[node]; 4390 if (!l3) 4391 continue; 4392 4393 check_irq_on(); 4394 spin_lock_irq(&l3->list_lock); 4395 4396 list_for_each_entry(slabp, &l3->slabs_full, list) 4397 handle_slab(n, cachep, slabp); 4398 list_for_each_entry(slabp, &l3->slabs_partial, list) 4399 handle_slab(n, cachep, slabp); 4400 spin_unlock_irq(&l3->list_lock); 4401 } 4402 name = cachep->name; 4403 if (n[0] == n[1]) { 4404 /* Increase the buffer size */ 4405 mutex_unlock(&cache_chain_mutex); 4406 m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL); 4407 if (!m->private) { 4408 /* Too bad, we are really out */ 4409 m->private = n; 4410 mutex_lock(&cache_chain_mutex); 4411 return -ENOMEM; 4412 } 4413 *(unsigned long *)m->private = n[0] * 2; 4414 kfree(n); 4415 mutex_lock(&cache_chain_mutex); 4416 /* Now make sure this entry will be retried */ 4417 m->count = m->size; 4418 return 0; 4419 } 4420 for (i = 0; i < n[1]; i++) { 4421 seq_printf(m, "%s: %lu ", name, n[2*i+3]); 4422 show_symbol(m, n[2*i+2]); 4423 seq_putc(m, '\n'); 4424 } 4425 4426 return 0; 4427} 4428 4429const struct seq_operations slabstats_op = { 4430 .start = leaks_start, 4431 .next = s_next, 4432 .stop = s_stop, 4433 .show = leaks_show, 4434}; 4435#endif 4436#endif 4437 4438/** 4439 * ksize - get the actual amount of memory allocated for a given object 4440 * @objp: Pointer to the object 4441 * 4442 * kmalloc may internally round up allocations and return more memory 4443 * than requested. ksize() can be used to determine the actual amount of 4444 * memory allocated. The caller may use this additional memory, even though 4445 * a smaller amount of memory was initially specified with the kmalloc call. 4446 * The caller must guarantee that objp points to a valid object previously 4447 * allocated with either kmalloc() or kmem_cache_alloc(). The object 4448 * must not be freed during the duration of the call. 4449 */ 4450unsigned int ksize(const void *objp) 4451{ 4452 if (unlikely(objp == NULL)) 4453 return 0; 4454 4455 return obj_size(virt_to_cache(objp)); 4456} 4457