slab.c revision e94a40c508dbdce872c79a13b35830c050d71e23
1/* 2 * linux/mm/slab.c 3 * Written by Mark Hemment, 1996/97. 4 * (markhe@nextd.demon.co.uk) 5 * 6 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli 7 * 8 * Major cleanup, different bufctl logic, per-cpu arrays 9 * (c) 2000 Manfred Spraul 10 * 11 * Cleanup, make the head arrays unconditional, preparation for NUMA 12 * (c) 2002 Manfred Spraul 13 * 14 * An implementation of the Slab Allocator as described in outline in; 15 * UNIX Internals: The New Frontiers by Uresh Vahalia 16 * Pub: Prentice Hall ISBN 0-13-101908-2 17 * or with a little more detail in; 18 * The Slab Allocator: An Object-Caching Kernel Memory Allocator 19 * Jeff Bonwick (Sun Microsystems). 20 * Presented at: USENIX Summer 1994 Technical Conference 21 * 22 * The memory is organized in caches, one cache for each object type. 23 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct) 24 * Each cache consists out of many slabs (they are small (usually one 25 * page long) and always contiguous), and each slab contains multiple 26 * initialized objects. 27 * 28 * This means, that your constructor is used only for newly allocated 29 * slabs and you must pass objects with the same intializations to 30 * kmem_cache_free. 31 * 32 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM, 33 * normal). If you need a special memory type, then must create a new 34 * cache for that memory type. 35 * 36 * In order to reduce fragmentation, the slabs are sorted in 3 groups: 37 * full slabs with 0 free objects 38 * partial slabs 39 * empty slabs with no allocated objects 40 * 41 * If partial slabs exist, then new allocations come from these slabs, 42 * otherwise from empty slabs or new slabs are allocated. 43 * 44 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache 45 * during kmem_cache_destroy(). The caller must prevent concurrent allocs. 46 * 47 * Each cache has a short per-cpu head array, most allocs 48 * and frees go into that array, and if that array overflows, then 1/2 49 * of the entries in the array are given back into the global cache. 50 * The head array is strictly LIFO and should improve the cache hit rates. 51 * On SMP, it additionally reduces the spinlock operations. 52 * 53 * The c_cpuarray may not be read with enabled local interrupts - 54 * it's changed with a smp_call_function(). 55 * 56 * SMP synchronization: 57 * constructors and destructors are called without any locking. 58 * Several members in struct kmem_cache and struct slab never change, they 59 * are accessed without any locking. 60 * The per-cpu arrays are never accessed from the wrong cpu, no locking, 61 * and local interrupts are disabled so slab code is preempt-safe. 62 * The non-constant members are protected with a per-cache irq spinlock. 63 * 64 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch 65 * in 2000 - many ideas in the current implementation are derived from 66 * his patch. 67 * 68 * Further notes from the original documentation: 69 * 70 * 11 April '97. Started multi-threading - markhe 71 * The global cache-chain is protected by the mutex 'cache_chain_mutex'. 72 * The sem is only needed when accessing/extending the cache-chain, which 73 * can never happen inside an interrupt (kmem_cache_create(), 74 * kmem_cache_shrink() and kmem_cache_reap()). 75 * 76 * At present, each engine can be growing a cache. This should be blocked. 77 * 78 * 15 March 2005. NUMA slab allocator. 79 * Shai Fultheim <shai@scalex86.org>. 80 * Shobhit Dayal <shobhit@calsoftinc.com> 81 * Alok N Kataria <alokk@calsoftinc.com> 82 * Christoph Lameter <christoph@lameter.com> 83 * 84 * Modified the slab allocator to be node aware on NUMA systems. 85 * Each node has its own list of partial, free and full slabs. 86 * All object allocations for a node occur from node specific slab lists. 87 */ 88 89#include <linux/slab.h> 90#include <linux/mm.h> 91#include <linux/poison.h> 92#include <linux/swap.h> 93#include <linux/cache.h> 94#include <linux/interrupt.h> 95#include <linux/init.h> 96#include <linux/compiler.h> 97#include <linux/cpuset.h> 98#include <linux/seq_file.h> 99#include <linux/notifier.h> 100#include <linux/kallsyms.h> 101#include <linux/cpu.h> 102#include <linux/sysctl.h> 103#include <linux/module.h> 104#include <linux/rcupdate.h> 105#include <linux/string.h> 106#include <linux/uaccess.h> 107#include <linux/nodemask.h> 108#include <linux/mempolicy.h> 109#include <linux/mutex.h> 110#include <linux/fault-inject.h> 111#include <linux/rtmutex.h> 112#include <linux/reciprocal_div.h> 113 114#include <asm/cacheflush.h> 115#include <asm/tlbflush.h> 116#include <asm/page.h> 117 118/* 119 * DEBUG - 1 for kmem_cache_create() to honour; SLAB_DEBUG_INITIAL, 120 * SLAB_RED_ZONE & SLAB_POISON. 121 * 0 for faster, smaller code (especially in the critical paths). 122 * 123 * STATS - 1 to collect stats for /proc/slabinfo. 124 * 0 for faster, smaller code (especially in the critical paths). 125 * 126 * FORCED_DEBUG - 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible) 127 */ 128 129#ifdef CONFIG_DEBUG_SLAB 130#define DEBUG 1 131#define STATS 1 132#define FORCED_DEBUG 1 133#else 134#define DEBUG 0 135#define STATS 0 136#define FORCED_DEBUG 0 137#endif 138 139/* Shouldn't this be in a header file somewhere? */ 140#define BYTES_PER_WORD sizeof(void *) 141 142#ifndef cache_line_size 143#define cache_line_size() L1_CACHE_BYTES 144#endif 145 146#ifndef ARCH_KMALLOC_MINALIGN 147/* 148 * Enforce a minimum alignment for the kmalloc caches. 149 * Usually, the kmalloc caches are cache_line_size() aligned, except when 150 * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned. 151 * Some archs want to perform DMA into kmalloc caches and need a guaranteed 152 * alignment larger than BYTES_PER_WORD. ARCH_KMALLOC_MINALIGN allows that. 153 * Note that this flag disables some debug features. 154 */ 155#define ARCH_KMALLOC_MINALIGN 0 156#endif 157 158#ifndef ARCH_SLAB_MINALIGN 159/* 160 * Enforce a minimum alignment for all caches. 161 * Intended for archs that get misalignment faults even for BYTES_PER_WORD 162 * aligned buffers. Includes ARCH_KMALLOC_MINALIGN. 163 * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables 164 * some debug features. 165 */ 166#define ARCH_SLAB_MINALIGN 0 167#endif 168 169#ifndef ARCH_KMALLOC_FLAGS 170#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN 171#endif 172 173/* Legal flag mask for kmem_cache_create(). */ 174#if DEBUG 175# define CREATE_MASK (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \ 176 SLAB_POISON | SLAB_HWCACHE_ALIGN | \ 177 SLAB_CACHE_DMA | \ 178 SLAB_MUST_HWCACHE_ALIGN | SLAB_STORE_USER | \ 179 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ 180 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD) 181#else 182# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ 183 SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN | \ 184 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ 185 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD) 186#endif 187 188/* 189 * kmem_bufctl_t: 190 * 191 * Bufctl's are used for linking objs within a slab 192 * linked offsets. 193 * 194 * This implementation relies on "struct page" for locating the cache & 195 * slab an object belongs to. 196 * This allows the bufctl structure to be small (one int), but limits 197 * the number of objects a slab (not a cache) can contain when off-slab 198 * bufctls are used. The limit is the size of the largest general cache 199 * that does not use off-slab slabs. 200 * For 32bit archs with 4 kB pages, is this 56. 201 * This is not serious, as it is only for large objects, when it is unwise 202 * to have too many per slab. 203 * Note: This limit can be raised by introducing a general cache whose size 204 * is less than 512 (PAGE_SIZE<<3), but greater than 256. 205 */ 206 207typedef unsigned int kmem_bufctl_t; 208#define BUFCTL_END (((kmem_bufctl_t)(~0U))-0) 209#define BUFCTL_FREE (((kmem_bufctl_t)(~0U))-1) 210#define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2) 211#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3) 212 213/* 214 * struct slab 215 * 216 * Manages the objs in a slab. Placed either at the beginning of mem allocated 217 * for a slab, or allocated from an general cache. 218 * Slabs are chained into three list: fully used, partial, fully free slabs. 219 */ 220struct slab { 221 struct list_head list; 222 unsigned long colouroff; 223 void *s_mem; /* including colour offset */ 224 unsigned int inuse; /* num of objs active in slab */ 225 kmem_bufctl_t free; 226 unsigned short nodeid; 227}; 228 229/* 230 * struct slab_rcu 231 * 232 * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to 233 * arrange for kmem_freepages to be called via RCU. This is useful if 234 * we need to approach a kernel structure obliquely, from its address 235 * obtained without the usual locking. We can lock the structure to 236 * stabilize it and check it's still at the given address, only if we 237 * can be sure that the memory has not been meanwhile reused for some 238 * other kind of object (which our subsystem's lock might corrupt). 239 * 240 * rcu_read_lock before reading the address, then rcu_read_unlock after 241 * taking the spinlock within the structure expected at that address. 242 * 243 * We assume struct slab_rcu can overlay struct slab when destroying. 244 */ 245struct slab_rcu { 246 struct rcu_head head; 247 struct kmem_cache *cachep; 248 void *addr; 249}; 250 251/* 252 * struct array_cache 253 * 254 * Purpose: 255 * - LIFO ordering, to hand out cache-warm objects from _alloc 256 * - reduce the number of linked list operations 257 * - reduce spinlock operations 258 * 259 * The limit is stored in the per-cpu structure to reduce the data cache 260 * footprint. 261 * 262 */ 263struct array_cache { 264 unsigned int avail; 265 unsigned int limit; 266 unsigned int batchcount; 267 unsigned int touched; 268 spinlock_t lock; 269 void *entry[0]; /* 270 * Must have this definition in here for the proper 271 * alignment of array_cache. Also simplifies accessing 272 * the entries. 273 * [0] is for gcc 2.95. It should really be []. 274 */ 275}; 276 277/* 278 * bootstrap: The caches do not work without cpuarrays anymore, but the 279 * cpuarrays are allocated from the generic caches... 280 */ 281#define BOOT_CPUCACHE_ENTRIES 1 282struct arraycache_init { 283 struct array_cache cache; 284 void *entries[BOOT_CPUCACHE_ENTRIES]; 285}; 286 287/* 288 * The slab lists for all objects. 289 */ 290struct kmem_list3 { 291 struct list_head slabs_partial; /* partial list first, better asm code */ 292 struct list_head slabs_full; 293 struct list_head slabs_free; 294 unsigned long free_objects; 295 unsigned int free_limit; 296 unsigned int colour_next; /* Per-node cache coloring */ 297 spinlock_t list_lock; 298 struct array_cache *shared; /* shared per node */ 299 struct array_cache **alien; /* on other nodes */ 300 unsigned long next_reap; /* updated without locking */ 301 int free_touched; /* updated without locking */ 302}; 303 304/* 305 * Need this for bootstrapping a per node allocator. 306 */ 307#define NUM_INIT_LISTS (2 * MAX_NUMNODES + 1) 308struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS]; 309#define CACHE_CACHE 0 310#define SIZE_AC 1 311#define SIZE_L3 (1 + MAX_NUMNODES) 312 313static int drain_freelist(struct kmem_cache *cache, 314 struct kmem_list3 *l3, int tofree); 315static void free_block(struct kmem_cache *cachep, void **objpp, int len, 316 int node); 317static int enable_cpucache(struct kmem_cache *cachep); 318static void cache_reap(struct work_struct *unused); 319 320/* 321 * This function must be completely optimized away if a constant is passed to 322 * it. Mostly the same as what is in linux/slab.h except it returns an index. 323 */ 324static __always_inline int index_of(const size_t size) 325{ 326 extern void __bad_size(void); 327 328 if (__builtin_constant_p(size)) { 329 int i = 0; 330 331#define CACHE(x) \ 332 if (size <=x) \ 333 return i; \ 334 else \ 335 i++; 336#include "linux/kmalloc_sizes.h" 337#undef CACHE 338 __bad_size(); 339 } else 340 __bad_size(); 341 return 0; 342} 343 344static int slab_early_init = 1; 345 346#define INDEX_AC index_of(sizeof(struct arraycache_init)) 347#define INDEX_L3 index_of(sizeof(struct kmem_list3)) 348 349static void kmem_list3_init(struct kmem_list3 *parent) 350{ 351 INIT_LIST_HEAD(&parent->slabs_full); 352 INIT_LIST_HEAD(&parent->slabs_partial); 353 INIT_LIST_HEAD(&parent->slabs_free); 354 parent->shared = NULL; 355 parent->alien = NULL; 356 parent->colour_next = 0; 357 spin_lock_init(&parent->list_lock); 358 parent->free_objects = 0; 359 parent->free_touched = 0; 360} 361 362#define MAKE_LIST(cachep, listp, slab, nodeid) \ 363 do { \ 364 INIT_LIST_HEAD(listp); \ 365 list_splice(&(cachep->nodelists[nodeid]->slab), listp); \ 366 } while (0) 367 368#define MAKE_ALL_LISTS(cachep, ptr, nodeid) \ 369 do { \ 370 MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \ 371 MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \ 372 MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \ 373 } while (0) 374 375/* 376 * struct kmem_cache 377 * 378 * manages a cache. 379 */ 380 381struct kmem_cache { 382/* 1) per-cpu data, touched during every alloc/free */ 383 struct array_cache *array[NR_CPUS]; 384/* 2) Cache tunables. Protected by cache_chain_mutex */ 385 unsigned int batchcount; 386 unsigned int limit; 387 unsigned int shared; 388 389 unsigned int buffer_size; 390 u32 reciprocal_buffer_size; 391/* 3) touched by every alloc & free from the backend */ 392 struct kmem_list3 *nodelists[MAX_NUMNODES]; 393 394 unsigned int flags; /* constant flags */ 395 unsigned int num; /* # of objs per slab */ 396 397/* 4) cache_grow/shrink */ 398 /* order of pgs per slab (2^n) */ 399 unsigned int gfporder; 400 401 /* force GFP flags, e.g. GFP_DMA */ 402 gfp_t gfpflags; 403 404 size_t colour; /* cache colouring range */ 405 unsigned int colour_off; /* colour offset */ 406 struct kmem_cache *slabp_cache; 407 unsigned int slab_size; 408 unsigned int dflags; /* dynamic flags */ 409 410 /* constructor func */ 411 void (*ctor) (void *, struct kmem_cache *, unsigned long); 412 413 /* de-constructor func */ 414 void (*dtor) (void *, struct kmem_cache *, unsigned long); 415 416/* 5) cache creation/removal */ 417 const char *name; 418 struct list_head next; 419 420/* 6) statistics */ 421#if STATS 422 unsigned long num_active; 423 unsigned long num_allocations; 424 unsigned long high_mark; 425 unsigned long grown; 426 unsigned long reaped; 427 unsigned long errors; 428 unsigned long max_freeable; 429 unsigned long node_allocs; 430 unsigned long node_frees; 431 unsigned long node_overflow; 432 atomic_t allochit; 433 atomic_t allocmiss; 434 atomic_t freehit; 435 atomic_t freemiss; 436#endif 437#if DEBUG 438 /* 439 * If debugging is enabled, then the allocator can add additional 440 * fields and/or padding to every object. buffer_size contains the total 441 * object size including these internal fields, the following two 442 * variables contain the offset to the user object and its size. 443 */ 444 int obj_offset; 445 int obj_size; 446#endif 447}; 448 449#define CFLGS_OFF_SLAB (0x80000000UL) 450#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB) 451 452#define BATCHREFILL_LIMIT 16 453/* 454 * Optimization question: fewer reaps means less probability for unnessary 455 * cpucache drain/refill cycles. 456 * 457 * OTOH the cpuarrays can contain lots of objects, 458 * which could lock up otherwise freeable slabs. 459 */ 460#define REAPTIMEOUT_CPUC (2*HZ) 461#define REAPTIMEOUT_LIST3 (4*HZ) 462 463#if STATS 464#define STATS_INC_ACTIVE(x) ((x)->num_active++) 465#define STATS_DEC_ACTIVE(x) ((x)->num_active--) 466#define STATS_INC_ALLOCED(x) ((x)->num_allocations++) 467#define STATS_INC_GROWN(x) ((x)->grown++) 468#define STATS_ADD_REAPED(x,y) ((x)->reaped += (y)) 469#define STATS_SET_HIGH(x) \ 470 do { \ 471 if ((x)->num_active > (x)->high_mark) \ 472 (x)->high_mark = (x)->num_active; \ 473 } while (0) 474#define STATS_INC_ERR(x) ((x)->errors++) 475#define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++) 476#define STATS_INC_NODEFREES(x) ((x)->node_frees++) 477#define STATS_INC_ACOVERFLOW(x) ((x)->node_overflow++) 478#define STATS_SET_FREEABLE(x, i) \ 479 do { \ 480 if ((x)->max_freeable < i) \ 481 (x)->max_freeable = i; \ 482 } while (0) 483#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit) 484#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss) 485#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit) 486#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss) 487#else 488#define STATS_INC_ACTIVE(x) do { } while (0) 489#define STATS_DEC_ACTIVE(x) do { } while (0) 490#define STATS_INC_ALLOCED(x) do { } while (0) 491#define STATS_INC_GROWN(x) do { } while (0) 492#define STATS_ADD_REAPED(x,y) do { } while (0) 493#define STATS_SET_HIGH(x) do { } while (0) 494#define STATS_INC_ERR(x) do { } while (0) 495#define STATS_INC_NODEALLOCS(x) do { } while (0) 496#define STATS_INC_NODEFREES(x) do { } while (0) 497#define STATS_INC_ACOVERFLOW(x) do { } while (0) 498#define STATS_SET_FREEABLE(x, i) do { } while (0) 499#define STATS_INC_ALLOCHIT(x) do { } while (0) 500#define STATS_INC_ALLOCMISS(x) do { } while (0) 501#define STATS_INC_FREEHIT(x) do { } while (0) 502#define STATS_INC_FREEMISS(x) do { } while (0) 503#endif 504 505#if DEBUG 506 507/* 508 * memory layout of objects: 509 * 0 : objp 510 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that 511 * the end of an object is aligned with the end of the real 512 * allocation. Catches writes behind the end of the allocation. 513 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1: 514 * redzone word. 515 * cachep->obj_offset: The real object. 516 * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long] 517 * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address 518 * [BYTES_PER_WORD long] 519 */ 520static int obj_offset(struct kmem_cache *cachep) 521{ 522 return cachep->obj_offset; 523} 524 525static int obj_size(struct kmem_cache *cachep) 526{ 527 return cachep->obj_size; 528} 529 530static unsigned long *dbg_redzone1(struct kmem_cache *cachep, void *objp) 531{ 532 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); 533 return (unsigned long*) (objp+obj_offset(cachep)-BYTES_PER_WORD); 534} 535 536static unsigned long *dbg_redzone2(struct kmem_cache *cachep, void *objp) 537{ 538 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); 539 if (cachep->flags & SLAB_STORE_USER) 540 return (unsigned long *)(objp + cachep->buffer_size - 541 2 * BYTES_PER_WORD); 542 return (unsigned long *)(objp + cachep->buffer_size - BYTES_PER_WORD); 543} 544 545static void **dbg_userword(struct kmem_cache *cachep, void *objp) 546{ 547 BUG_ON(!(cachep->flags & SLAB_STORE_USER)); 548 return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD); 549} 550 551#else 552 553#define obj_offset(x) 0 554#define obj_size(cachep) (cachep->buffer_size) 555#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long *)NULL;}) 556#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long *)NULL;}) 557#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;}) 558 559#endif 560 561/* 562 * Maximum size of an obj (in 2^order pages) and absolute limit for the gfp 563 * order. 564 */ 565#if defined(CONFIG_LARGE_ALLOCS) 566#define MAX_OBJ_ORDER 13 /* up to 32Mb */ 567#define MAX_GFP_ORDER 13 /* up to 32Mb */ 568#elif defined(CONFIG_MMU) 569#define MAX_OBJ_ORDER 5 /* 32 pages */ 570#define MAX_GFP_ORDER 5 /* 32 pages */ 571#else 572#define MAX_OBJ_ORDER 8 /* up to 1Mb */ 573#define MAX_GFP_ORDER 8 /* up to 1Mb */ 574#endif 575 576/* 577 * Do not go above this order unless 0 objects fit into the slab. 578 */ 579#define BREAK_GFP_ORDER_HI 1 580#define BREAK_GFP_ORDER_LO 0 581static int slab_break_gfp_order = BREAK_GFP_ORDER_LO; 582 583/* 584 * Functions for storing/retrieving the cachep and or slab from the page 585 * allocator. These are used to find the slab an obj belongs to. With kfree(), 586 * these are used to find the cache which an obj belongs to. 587 */ 588static inline void page_set_cache(struct page *page, struct kmem_cache *cache) 589{ 590 page->lru.next = (struct list_head *)cache; 591} 592 593static inline struct kmem_cache *page_get_cache(struct page *page) 594{ 595 if (unlikely(PageCompound(page))) 596 page = (struct page *)page_private(page); 597 BUG_ON(!PageSlab(page)); 598 return (struct kmem_cache *)page->lru.next; 599} 600 601static inline void page_set_slab(struct page *page, struct slab *slab) 602{ 603 page->lru.prev = (struct list_head *)slab; 604} 605 606static inline struct slab *page_get_slab(struct page *page) 607{ 608 if (unlikely(PageCompound(page))) 609 page = (struct page *)page_private(page); 610 BUG_ON(!PageSlab(page)); 611 return (struct slab *)page->lru.prev; 612} 613 614static inline struct kmem_cache *virt_to_cache(const void *obj) 615{ 616 struct page *page = virt_to_page(obj); 617 return page_get_cache(page); 618} 619 620static inline struct slab *virt_to_slab(const void *obj) 621{ 622 struct page *page = virt_to_page(obj); 623 return page_get_slab(page); 624} 625 626static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab, 627 unsigned int idx) 628{ 629 return slab->s_mem + cache->buffer_size * idx; 630} 631 632/* 633 * We want to avoid an expensive divide : (offset / cache->buffer_size) 634 * Using the fact that buffer_size is a constant for a particular cache, 635 * we can replace (offset / cache->buffer_size) by 636 * reciprocal_divide(offset, cache->reciprocal_buffer_size) 637 */ 638static inline unsigned int obj_to_index(const struct kmem_cache *cache, 639 const struct slab *slab, void *obj) 640{ 641 u32 offset = (obj - slab->s_mem); 642 return reciprocal_divide(offset, cache->reciprocal_buffer_size); 643} 644 645/* 646 * These are the default caches for kmalloc. Custom caches can have other sizes. 647 */ 648struct cache_sizes malloc_sizes[] = { 649#define CACHE(x) { .cs_size = (x) }, 650#include <linux/kmalloc_sizes.h> 651 CACHE(ULONG_MAX) 652#undef CACHE 653}; 654EXPORT_SYMBOL(malloc_sizes); 655 656/* Must match cache_sizes above. Out of line to keep cache footprint low. */ 657struct cache_names { 658 char *name; 659 char *name_dma; 660}; 661 662static struct cache_names __initdata cache_names[] = { 663#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" }, 664#include <linux/kmalloc_sizes.h> 665 {NULL,} 666#undef CACHE 667}; 668 669static struct arraycache_init initarray_cache __initdata = 670 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; 671static struct arraycache_init initarray_generic = 672 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; 673 674/* internal cache of cache description objs */ 675static struct kmem_cache cache_cache = { 676 .batchcount = 1, 677 .limit = BOOT_CPUCACHE_ENTRIES, 678 .shared = 1, 679 .buffer_size = sizeof(struct kmem_cache), 680 .name = "kmem_cache", 681#if DEBUG 682 .obj_size = sizeof(struct kmem_cache), 683#endif 684}; 685 686#define BAD_ALIEN_MAGIC 0x01020304ul 687 688#ifdef CONFIG_LOCKDEP 689 690/* 691 * Slab sometimes uses the kmalloc slabs to store the slab headers 692 * for other slabs "off slab". 693 * The locking for this is tricky in that it nests within the locks 694 * of all other slabs in a few places; to deal with this special 695 * locking we put on-slab caches into a separate lock-class. 696 * 697 * We set lock class for alien array caches which are up during init. 698 * The lock annotation will be lost if all cpus of a node goes down and 699 * then comes back up during hotplug 700 */ 701static struct lock_class_key on_slab_l3_key; 702static struct lock_class_key on_slab_alc_key; 703 704static inline void init_lock_keys(void) 705 706{ 707 int q; 708 struct cache_sizes *s = malloc_sizes; 709 710 while (s->cs_size != ULONG_MAX) { 711 for_each_node(q) { 712 struct array_cache **alc; 713 int r; 714 struct kmem_list3 *l3 = s->cs_cachep->nodelists[q]; 715 if (!l3 || OFF_SLAB(s->cs_cachep)) 716 continue; 717 lockdep_set_class(&l3->list_lock, &on_slab_l3_key); 718 alc = l3->alien; 719 /* 720 * FIXME: This check for BAD_ALIEN_MAGIC 721 * should go away when common slab code is taught to 722 * work even without alien caches. 723 * Currently, non NUMA code returns BAD_ALIEN_MAGIC 724 * for alloc_alien_cache, 725 */ 726 if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC) 727 continue; 728 for_each_node(r) { 729 if (alc[r]) 730 lockdep_set_class(&alc[r]->lock, 731 &on_slab_alc_key); 732 } 733 } 734 s++; 735 } 736} 737#else 738static inline void init_lock_keys(void) 739{ 740} 741#endif 742 743/* 744 * 1. Guard access to the cache-chain. 745 * 2. Protect sanity of cpu_online_map against cpu hotplug events 746 */ 747static DEFINE_MUTEX(cache_chain_mutex); 748static struct list_head cache_chain; 749 750/* 751 * chicken and egg problem: delay the per-cpu array allocation 752 * until the general caches are up. 753 */ 754static enum { 755 NONE, 756 PARTIAL_AC, 757 PARTIAL_L3, 758 FULL 759} g_cpucache_up; 760 761/* 762 * used by boot code to determine if it can use slab based allocator 763 */ 764int slab_is_available(void) 765{ 766 return g_cpucache_up == FULL; 767} 768 769static DEFINE_PER_CPU(struct delayed_work, reap_work); 770 771static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) 772{ 773 return cachep->array[smp_processor_id()]; 774} 775 776static inline struct kmem_cache *__find_general_cachep(size_t size, 777 gfp_t gfpflags) 778{ 779 struct cache_sizes *csizep = malloc_sizes; 780 781#if DEBUG 782 /* This happens if someone tries to call 783 * kmem_cache_create(), or __kmalloc(), before 784 * the generic caches are initialized. 785 */ 786 BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL); 787#endif 788 while (size > csizep->cs_size) 789 csizep++; 790 791 /* 792 * Really subtle: The last entry with cs->cs_size==ULONG_MAX 793 * has cs_{dma,}cachep==NULL. Thus no special case 794 * for large kmalloc calls required. 795 */ 796#ifdef CONFIG_ZONE_DMA 797 if (unlikely(gfpflags & GFP_DMA)) 798 return csizep->cs_dmacachep; 799#endif 800 return csizep->cs_cachep; 801} 802 803static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags) 804{ 805 return __find_general_cachep(size, gfpflags); 806} 807 808static size_t slab_mgmt_size(size_t nr_objs, size_t align) 809{ 810 return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align); 811} 812 813/* 814 * Calculate the number of objects and left-over bytes for a given buffer size. 815 */ 816static void cache_estimate(unsigned long gfporder, size_t buffer_size, 817 size_t align, int flags, size_t *left_over, 818 unsigned int *num) 819{ 820 int nr_objs; 821 size_t mgmt_size; 822 size_t slab_size = PAGE_SIZE << gfporder; 823 824 /* 825 * The slab management structure can be either off the slab or 826 * on it. For the latter case, the memory allocated for a 827 * slab is used for: 828 * 829 * - The struct slab 830 * - One kmem_bufctl_t for each object 831 * - Padding to respect alignment of @align 832 * - @buffer_size bytes for each object 833 * 834 * If the slab management structure is off the slab, then the 835 * alignment will already be calculated into the size. Because 836 * the slabs are all pages aligned, the objects will be at the 837 * correct alignment when allocated. 838 */ 839 if (flags & CFLGS_OFF_SLAB) { 840 mgmt_size = 0; 841 nr_objs = slab_size / buffer_size; 842 843 if (nr_objs > SLAB_LIMIT) 844 nr_objs = SLAB_LIMIT; 845 } else { 846 /* 847 * Ignore padding for the initial guess. The padding 848 * is at most @align-1 bytes, and @buffer_size is at 849 * least @align. In the worst case, this result will 850 * be one greater than the number of objects that fit 851 * into the memory allocation when taking the padding 852 * into account. 853 */ 854 nr_objs = (slab_size - sizeof(struct slab)) / 855 (buffer_size + sizeof(kmem_bufctl_t)); 856 857 /* 858 * This calculated number will be either the right 859 * amount, or one greater than what we want. 860 */ 861 if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size 862 > slab_size) 863 nr_objs--; 864 865 if (nr_objs > SLAB_LIMIT) 866 nr_objs = SLAB_LIMIT; 867 868 mgmt_size = slab_mgmt_size(nr_objs, align); 869 } 870 *num = nr_objs; 871 *left_over = slab_size - nr_objs*buffer_size - mgmt_size; 872} 873 874#define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg) 875 876static void __slab_error(const char *function, struct kmem_cache *cachep, 877 char *msg) 878{ 879 printk(KERN_ERR "slab error in %s(): cache `%s': %s\n", 880 function, cachep->name, msg); 881 dump_stack(); 882} 883 884/* 885 * By default on NUMA we use alien caches to stage the freeing of 886 * objects allocated from other nodes. This causes massive memory 887 * inefficiencies when using fake NUMA setup to split memory into a 888 * large number of small nodes, so it can be disabled on the command 889 * line 890 */ 891 892static int use_alien_caches __read_mostly = 1; 893static int __init noaliencache_setup(char *s) 894{ 895 use_alien_caches = 0; 896 return 1; 897} 898__setup("noaliencache", noaliencache_setup); 899 900#ifdef CONFIG_NUMA 901/* 902 * Special reaping functions for NUMA systems called from cache_reap(). 903 * These take care of doing round robin flushing of alien caches (containing 904 * objects freed on different nodes from which they were allocated) and the 905 * flushing of remote pcps by calling drain_node_pages. 906 */ 907static DEFINE_PER_CPU(unsigned long, reap_node); 908 909static void init_reap_node(int cpu) 910{ 911 int node; 912 913 node = next_node(cpu_to_node(cpu), node_online_map); 914 if (node == MAX_NUMNODES) 915 node = first_node(node_online_map); 916 917 per_cpu(reap_node, cpu) = node; 918} 919 920static void next_reap_node(void) 921{ 922 int node = __get_cpu_var(reap_node); 923 924 /* 925 * Also drain per cpu pages on remote zones 926 */ 927 if (node != numa_node_id()) 928 drain_node_pages(node); 929 930 node = next_node(node, node_online_map); 931 if (unlikely(node >= MAX_NUMNODES)) 932 node = first_node(node_online_map); 933 __get_cpu_var(reap_node) = node; 934} 935 936#else 937#define init_reap_node(cpu) do { } while (0) 938#define next_reap_node(void) do { } while (0) 939#endif 940 941/* 942 * Initiate the reap timer running on the target CPU. We run at around 1 to 2Hz 943 * via the workqueue/eventd. 944 * Add the CPU number into the expiration time to minimize the possibility of 945 * the CPUs getting into lockstep and contending for the global cache chain 946 * lock. 947 */ 948static void __devinit start_cpu_timer(int cpu) 949{ 950 struct delayed_work *reap_work = &per_cpu(reap_work, cpu); 951 952 /* 953 * When this gets called from do_initcalls via cpucache_init(), 954 * init_workqueues() has already run, so keventd will be setup 955 * at that time. 956 */ 957 if (keventd_up() && reap_work->work.func == NULL) { 958 init_reap_node(cpu); 959 INIT_DELAYED_WORK(reap_work, cache_reap); 960 schedule_delayed_work_on(cpu, reap_work, 961 __round_jiffies_relative(HZ, cpu)); 962 } 963} 964 965static struct array_cache *alloc_arraycache(int node, int entries, 966 int batchcount) 967{ 968 int memsize = sizeof(void *) * entries + sizeof(struct array_cache); 969 struct array_cache *nc = NULL; 970 971 nc = kmalloc_node(memsize, GFP_KERNEL, node); 972 if (nc) { 973 nc->avail = 0; 974 nc->limit = entries; 975 nc->batchcount = batchcount; 976 nc->touched = 0; 977 spin_lock_init(&nc->lock); 978 } 979 return nc; 980} 981 982/* 983 * Transfer objects in one arraycache to another. 984 * Locking must be handled by the caller. 985 * 986 * Return the number of entries transferred. 987 */ 988static int transfer_objects(struct array_cache *to, 989 struct array_cache *from, unsigned int max) 990{ 991 /* Figure out how many entries to transfer */ 992 int nr = min(min(from->avail, max), to->limit - to->avail); 993 994 if (!nr) 995 return 0; 996 997 memcpy(to->entry + to->avail, from->entry + from->avail -nr, 998 sizeof(void *) *nr); 999 1000 from->avail -= nr; 1001 to->avail += nr; 1002 to->touched = 1; 1003 return nr; 1004} 1005 1006#ifndef CONFIG_NUMA 1007 1008#define drain_alien_cache(cachep, alien) do { } while (0) 1009#define reap_alien(cachep, l3) do { } while (0) 1010 1011static inline struct array_cache **alloc_alien_cache(int node, int limit) 1012{ 1013 return (struct array_cache **)BAD_ALIEN_MAGIC; 1014} 1015 1016static inline void free_alien_cache(struct array_cache **ac_ptr) 1017{ 1018} 1019 1020static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) 1021{ 1022 return 0; 1023} 1024 1025static inline void *alternate_node_alloc(struct kmem_cache *cachep, 1026 gfp_t flags) 1027{ 1028 return NULL; 1029} 1030 1031static inline void *____cache_alloc_node(struct kmem_cache *cachep, 1032 gfp_t flags, int nodeid) 1033{ 1034 return NULL; 1035} 1036 1037#else /* CONFIG_NUMA */ 1038 1039static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int); 1040static void *alternate_node_alloc(struct kmem_cache *, gfp_t); 1041 1042static struct array_cache **alloc_alien_cache(int node, int limit) 1043{ 1044 struct array_cache **ac_ptr; 1045 int memsize = sizeof(void *) * nr_node_ids; 1046 int i; 1047 1048 if (limit > 1) 1049 limit = 12; 1050 ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node); 1051 if (ac_ptr) { 1052 for_each_node(i) { 1053 if (i == node || !node_online(i)) { 1054 ac_ptr[i] = NULL; 1055 continue; 1056 } 1057 ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d); 1058 if (!ac_ptr[i]) { 1059 for (i--; i <= 0; i--) 1060 kfree(ac_ptr[i]); 1061 kfree(ac_ptr); 1062 return NULL; 1063 } 1064 } 1065 } 1066 return ac_ptr; 1067} 1068 1069static void free_alien_cache(struct array_cache **ac_ptr) 1070{ 1071 int i; 1072 1073 if (!ac_ptr) 1074 return; 1075 for_each_node(i) 1076 kfree(ac_ptr[i]); 1077 kfree(ac_ptr); 1078} 1079 1080static void __drain_alien_cache(struct kmem_cache *cachep, 1081 struct array_cache *ac, int node) 1082{ 1083 struct kmem_list3 *rl3 = cachep->nodelists[node]; 1084 1085 if (ac->avail) { 1086 spin_lock(&rl3->list_lock); 1087 /* 1088 * Stuff objects into the remote nodes shared array first. 1089 * That way we could avoid the overhead of putting the objects 1090 * into the free lists and getting them back later. 1091 */ 1092 if (rl3->shared) 1093 transfer_objects(rl3->shared, ac, ac->limit); 1094 1095 free_block(cachep, ac->entry, ac->avail, node); 1096 ac->avail = 0; 1097 spin_unlock(&rl3->list_lock); 1098 } 1099} 1100 1101/* 1102 * Called from cache_reap() to regularly drain alien caches round robin. 1103 */ 1104static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3) 1105{ 1106 int node = __get_cpu_var(reap_node); 1107 1108 if (l3->alien) { 1109 struct array_cache *ac = l3->alien[node]; 1110 1111 if (ac && ac->avail && spin_trylock_irq(&ac->lock)) { 1112 __drain_alien_cache(cachep, ac, node); 1113 spin_unlock_irq(&ac->lock); 1114 } 1115 } 1116} 1117 1118static void drain_alien_cache(struct kmem_cache *cachep, 1119 struct array_cache **alien) 1120{ 1121 int i = 0; 1122 struct array_cache *ac; 1123 unsigned long flags; 1124 1125 for_each_online_node(i) { 1126 ac = alien[i]; 1127 if (ac) { 1128 spin_lock_irqsave(&ac->lock, flags); 1129 __drain_alien_cache(cachep, ac, i); 1130 spin_unlock_irqrestore(&ac->lock, flags); 1131 } 1132 } 1133} 1134 1135static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) 1136{ 1137 struct slab *slabp = virt_to_slab(objp); 1138 int nodeid = slabp->nodeid; 1139 struct kmem_list3 *l3; 1140 struct array_cache *alien = NULL; 1141 int node; 1142 1143 node = numa_node_id(); 1144 1145 /* 1146 * Make sure we are not freeing a object from another node to the array 1147 * cache on this cpu. 1148 */ 1149 if (likely(slabp->nodeid == node) || unlikely(!use_alien_caches)) 1150 return 0; 1151 1152 l3 = cachep->nodelists[node]; 1153 STATS_INC_NODEFREES(cachep); 1154 if (l3->alien && l3->alien[nodeid]) { 1155 alien = l3->alien[nodeid]; 1156 spin_lock(&alien->lock); 1157 if (unlikely(alien->avail == alien->limit)) { 1158 STATS_INC_ACOVERFLOW(cachep); 1159 __drain_alien_cache(cachep, alien, nodeid); 1160 } 1161 alien->entry[alien->avail++] = objp; 1162 spin_unlock(&alien->lock); 1163 } else { 1164 spin_lock(&(cachep->nodelists[nodeid])->list_lock); 1165 free_block(cachep, &objp, 1, nodeid); 1166 spin_unlock(&(cachep->nodelists[nodeid])->list_lock); 1167 } 1168 return 1; 1169} 1170#endif 1171 1172static int __cpuinit cpuup_callback(struct notifier_block *nfb, 1173 unsigned long action, void *hcpu) 1174{ 1175 long cpu = (long)hcpu; 1176 struct kmem_cache *cachep; 1177 struct kmem_list3 *l3 = NULL; 1178 int node = cpu_to_node(cpu); 1179 int memsize = sizeof(struct kmem_list3); 1180 1181 switch (action) { 1182 case CPU_UP_PREPARE: 1183 mutex_lock(&cache_chain_mutex); 1184 /* 1185 * We need to do this right in the beginning since 1186 * alloc_arraycache's are going to use this list. 1187 * kmalloc_node allows us to add the slab to the right 1188 * kmem_list3 and not this cpu's kmem_list3 1189 */ 1190 1191 list_for_each_entry(cachep, &cache_chain, next) { 1192 /* 1193 * Set up the size64 kmemlist for cpu before we can 1194 * begin anything. Make sure some other cpu on this 1195 * node has not already allocated this 1196 */ 1197 if (!cachep->nodelists[node]) { 1198 l3 = kmalloc_node(memsize, GFP_KERNEL, node); 1199 if (!l3) 1200 goto bad; 1201 kmem_list3_init(l3); 1202 l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + 1203 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 1204 1205 /* 1206 * The l3s don't come and go as CPUs come and 1207 * go. cache_chain_mutex is sufficient 1208 * protection here. 1209 */ 1210 cachep->nodelists[node] = l3; 1211 } 1212 1213 spin_lock_irq(&cachep->nodelists[node]->list_lock); 1214 cachep->nodelists[node]->free_limit = 1215 (1 + nr_cpus_node(node)) * 1216 cachep->batchcount + cachep->num; 1217 spin_unlock_irq(&cachep->nodelists[node]->list_lock); 1218 } 1219 1220 /* 1221 * Now we can go ahead with allocating the shared arrays and 1222 * array caches 1223 */ 1224 list_for_each_entry(cachep, &cache_chain, next) { 1225 struct array_cache *nc; 1226 struct array_cache *shared; 1227 struct array_cache **alien = NULL; 1228 1229 nc = alloc_arraycache(node, cachep->limit, 1230 cachep->batchcount); 1231 if (!nc) 1232 goto bad; 1233 shared = alloc_arraycache(node, 1234 cachep->shared * cachep->batchcount, 1235 0xbaadf00d); 1236 if (!shared) 1237 goto bad; 1238 1239 if (use_alien_caches) { 1240 alien = alloc_alien_cache(node, cachep->limit); 1241 if (!alien) 1242 goto bad; 1243 } 1244 cachep->array[cpu] = nc; 1245 l3 = cachep->nodelists[node]; 1246 BUG_ON(!l3); 1247 1248 spin_lock_irq(&l3->list_lock); 1249 if (!l3->shared) { 1250 /* 1251 * We are serialised from CPU_DEAD or 1252 * CPU_UP_CANCELLED by the cpucontrol lock 1253 */ 1254 l3->shared = shared; 1255 shared = NULL; 1256 } 1257#ifdef CONFIG_NUMA 1258 if (!l3->alien) { 1259 l3->alien = alien; 1260 alien = NULL; 1261 } 1262#endif 1263 spin_unlock_irq(&l3->list_lock); 1264 kfree(shared); 1265 free_alien_cache(alien); 1266 } 1267 break; 1268 case CPU_ONLINE: 1269 mutex_unlock(&cache_chain_mutex); 1270 start_cpu_timer(cpu); 1271 break; 1272#ifdef CONFIG_HOTPLUG_CPU 1273 case CPU_DOWN_PREPARE: 1274 mutex_lock(&cache_chain_mutex); 1275 break; 1276 case CPU_DOWN_FAILED: 1277 mutex_unlock(&cache_chain_mutex); 1278 break; 1279 case CPU_DEAD: 1280 /* 1281 * Even if all the cpus of a node are down, we don't free the 1282 * kmem_list3 of any cache. This to avoid a race between 1283 * cpu_down, and a kmalloc allocation from another cpu for 1284 * memory from the node of the cpu going down. The list3 1285 * structure is usually allocated from kmem_cache_create() and 1286 * gets destroyed at kmem_cache_destroy(). 1287 */ 1288 /* fall thru */ 1289#endif 1290 case CPU_UP_CANCELED: 1291 list_for_each_entry(cachep, &cache_chain, next) { 1292 struct array_cache *nc; 1293 struct array_cache *shared; 1294 struct array_cache **alien; 1295 cpumask_t mask; 1296 1297 mask = node_to_cpumask(node); 1298 /* cpu is dead; no one can alloc from it. */ 1299 nc = cachep->array[cpu]; 1300 cachep->array[cpu] = NULL; 1301 l3 = cachep->nodelists[node]; 1302 1303 if (!l3) 1304 goto free_array_cache; 1305 1306 spin_lock_irq(&l3->list_lock); 1307 1308 /* Free limit for this kmem_list3 */ 1309 l3->free_limit -= cachep->batchcount; 1310 if (nc) 1311 free_block(cachep, nc->entry, nc->avail, node); 1312 1313 if (!cpus_empty(mask)) { 1314 spin_unlock_irq(&l3->list_lock); 1315 goto free_array_cache; 1316 } 1317 1318 shared = l3->shared; 1319 if (shared) { 1320 free_block(cachep, l3->shared->entry, 1321 l3->shared->avail, node); 1322 l3->shared = NULL; 1323 } 1324 1325 alien = l3->alien; 1326 l3->alien = NULL; 1327 1328 spin_unlock_irq(&l3->list_lock); 1329 1330 kfree(shared); 1331 if (alien) { 1332 drain_alien_cache(cachep, alien); 1333 free_alien_cache(alien); 1334 } 1335free_array_cache: 1336 kfree(nc); 1337 } 1338 /* 1339 * In the previous loop, all the objects were freed to 1340 * the respective cache's slabs, now we can go ahead and 1341 * shrink each nodelist to its limit. 1342 */ 1343 list_for_each_entry(cachep, &cache_chain, next) { 1344 l3 = cachep->nodelists[node]; 1345 if (!l3) 1346 continue; 1347 drain_freelist(cachep, l3, l3->free_objects); 1348 } 1349 mutex_unlock(&cache_chain_mutex); 1350 break; 1351 } 1352 return NOTIFY_OK; 1353bad: 1354 return NOTIFY_BAD; 1355} 1356 1357static struct notifier_block __cpuinitdata cpucache_notifier = { 1358 &cpuup_callback, NULL, 0 1359}; 1360 1361/* 1362 * swap the static kmem_list3 with kmalloced memory 1363 */ 1364static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list, 1365 int nodeid) 1366{ 1367 struct kmem_list3 *ptr; 1368 1369 ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, nodeid); 1370 BUG_ON(!ptr); 1371 1372 local_irq_disable(); 1373 memcpy(ptr, list, sizeof(struct kmem_list3)); 1374 /* 1375 * Do not assume that spinlocks can be initialized via memcpy: 1376 */ 1377 spin_lock_init(&ptr->list_lock); 1378 1379 MAKE_ALL_LISTS(cachep, ptr, nodeid); 1380 cachep->nodelists[nodeid] = ptr; 1381 local_irq_enable(); 1382} 1383 1384/* 1385 * Initialisation. Called after the page allocator have been initialised and 1386 * before smp_init(). 1387 */ 1388void __init kmem_cache_init(void) 1389{ 1390 size_t left_over; 1391 struct cache_sizes *sizes; 1392 struct cache_names *names; 1393 int i; 1394 int order; 1395 int node; 1396 1397 for (i = 0; i < NUM_INIT_LISTS; i++) { 1398 kmem_list3_init(&initkmem_list3[i]); 1399 if (i < MAX_NUMNODES) 1400 cache_cache.nodelists[i] = NULL; 1401 } 1402 1403 /* 1404 * Fragmentation resistance on low memory - only use bigger 1405 * page orders on machines with more than 32MB of memory. 1406 */ 1407 if (num_physpages > (32 << 20) >> PAGE_SHIFT) 1408 slab_break_gfp_order = BREAK_GFP_ORDER_HI; 1409 1410 /* Bootstrap is tricky, because several objects are allocated 1411 * from caches that do not exist yet: 1412 * 1) initialize the cache_cache cache: it contains the struct 1413 * kmem_cache structures of all caches, except cache_cache itself: 1414 * cache_cache is statically allocated. 1415 * Initially an __init data area is used for the head array and the 1416 * kmem_list3 structures, it's replaced with a kmalloc allocated 1417 * array at the end of the bootstrap. 1418 * 2) Create the first kmalloc cache. 1419 * The struct kmem_cache for the new cache is allocated normally. 1420 * An __init data area is used for the head array. 1421 * 3) Create the remaining kmalloc caches, with minimally sized 1422 * head arrays. 1423 * 4) Replace the __init data head arrays for cache_cache and the first 1424 * kmalloc cache with kmalloc allocated arrays. 1425 * 5) Replace the __init data for kmem_list3 for cache_cache and 1426 * the other cache's with kmalloc allocated memory. 1427 * 6) Resize the head arrays of the kmalloc caches to their final sizes. 1428 */ 1429 1430 node = numa_node_id(); 1431 1432 /* 1) create the cache_cache */ 1433 INIT_LIST_HEAD(&cache_chain); 1434 list_add(&cache_cache.next, &cache_chain); 1435 cache_cache.colour_off = cache_line_size(); 1436 cache_cache.array[smp_processor_id()] = &initarray_cache.cache; 1437 cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE]; 1438 1439 cache_cache.buffer_size = ALIGN(cache_cache.buffer_size, 1440 cache_line_size()); 1441 cache_cache.reciprocal_buffer_size = 1442 reciprocal_value(cache_cache.buffer_size); 1443 1444 for (order = 0; order < MAX_ORDER; order++) { 1445 cache_estimate(order, cache_cache.buffer_size, 1446 cache_line_size(), 0, &left_over, &cache_cache.num); 1447 if (cache_cache.num) 1448 break; 1449 } 1450 BUG_ON(!cache_cache.num); 1451 cache_cache.gfporder = order; 1452 cache_cache.colour = left_over / cache_cache.colour_off; 1453 cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + 1454 sizeof(struct slab), cache_line_size()); 1455 1456 /* 2+3) create the kmalloc caches */ 1457 sizes = malloc_sizes; 1458 names = cache_names; 1459 1460 /* 1461 * Initialize the caches that provide memory for the array cache and the 1462 * kmem_list3 structures first. Without this, further allocations will 1463 * bug. 1464 */ 1465 1466 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name, 1467 sizes[INDEX_AC].cs_size, 1468 ARCH_KMALLOC_MINALIGN, 1469 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1470 NULL, NULL); 1471 1472 if (INDEX_AC != INDEX_L3) { 1473 sizes[INDEX_L3].cs_cachep = 1474 kmem_cache_create(names[INDEX_L3].name, 1475 sizes[INDEX_L3].cs_size, 1476 ARCH_KMALLOC_MINALIGN, 1477 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1478 NULL, NULL); 1479 } 1480 1481 slab_early_init = 0; 1482 1483 while (sizes->cs_size != ULONG_MAX) { 1484 /* 1485 * For performance, all the general caches are L1 aligned. 1486 * This should be particularly beneficial on SMP boxes, as it 1487 * eliminates "false sharing". 1488 * Note for systems short on memory removing the alignment will 1489 * allow tighter packing of the smaller caches. 1490 */ 1491 if (!sizes->cs_cachep) { 1492 sizes->cs_cachep = kmem_cache_create(names->name, 1493 sizes->cs_size, 1494 ARCH_KMALLOC_MINALIGN, 1495 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1496 NULL, NULL); 1497 } 1498#ifdef CONFIG_ZONE_DMA 1499 sizes->cs_dmacachep = kmem_cache_create( 1500 names->name_dma, 1501 sizes->cs_size, 1502 ARCH_KMALLOC_MINALIGN, 1503 ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| 1504 SLAB_PANIC, 1505 NULL, NULL); 1506#endif 1507 sizes++; 1508 names++; 1509 } 1510 /* 4) Replace the bootstrap head arrays */ 1511 { 1512 struct array_cache *ptr; 1513 1514 ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); 1515 1516 local_irq_disable(); 1517 BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache); 1518 memcpy(ptr, cpu_cache_get(&cache_cache), 1519 sizeof(struct arraycache_init)); 1520 /* 1521 * Do not assume that spinlocks can be initialized via memcpy: 1522 */ 1523 spin_lock_init(&ptr->lock); 1524 1525 cache_cache.array[smp_processor_id()] = ptr; 1526 local_irq_enable(); 1527 1528 ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); 1529 1530 local_irq_disable(); 1531 BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep) 1532 != &initarray_generic.cache); 1533 memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep), 1534 sizeof(struct arraycache_init)); 1535 /* 1536 * Do not assume that spinlocks can be initialized via memcpy: 1537 */ 1538 spin_lock_init(&ptr->lock); 1539 1540 malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] = 1541 ptr; 1542 local_irq_enable(); 1543 } 1544 /* 5) Replace the bootstrap kmem_list3's */ 1545 { 1546 int nid; 1547 1548 /* Replace the static kmem_list3 structures for the boot cpu */ 1549 init_list(&cache_cache, &initkmem_list3[CACHE_CACHE], node); 1550 1551 for_each_online_node(nid) { 1552 init_list(malloc_sizes[INDEX_AC].cs_cachep, 1553 &initkmem_list3[SIZE_AC + nid], nid); 1554 1555 if (INDEX_AC != INDEX_L3) { 1556 init_list(malloc_sizes[INDEX_L3].cs_cachep, 1557 &initkmem_list3[SIZE_L3 + nid], nid); 1558 } 1559 } 1560 } 1561 1562 /* 6) resize the head arrays to their final sizes */ 1563 { 1564 struct kmem_cache *cachep; 1565 mutex_lock(&cache_chain_mutex); 1566 list_for_each_entry(cachep, &cache_chain, next) 1567 if (enable_cpucache(cachep)) 1568 BUG(); 1569 mutex_unlock(&cache_chain_mutex); 1570 } 1571 1572 /* Annotate slab for lockdep -- annotate the malloc caches */ 1573 init_lock_keys(); 1574 1575 1576 /* Done! */ 1577 g_cpucache_up = FULL; 1578 1579 /* 1580 * Register a cpu startup notifier callback that initializes 1581 * cpu_cache_get for all new cpus 1582 */ 1583 register_cpu_notifier(&cpucache_notifier); 1584 1585 /* 1586 * The reap timers are started later, with a module init call: That part 1587 * of the kernel is not yet operational. 1588 */ 1589} 1590 1591static int __init cpucache_init(void) 1592{ 1593 int cpu; 1594 1595 /* 1596 * Register the timers that return unneeded pages to the page allocator 1597 */ 1598 for_each_online_cpu(cpu) 1599 start_cpu_timer(cpu); 1600 return 0; 1601} 1602__initcall(cpucache_init); 1603 1604/* 1605 * Interface to system's page allocator. No need to hold the cache-lock. 1606 * 1607 * If we requested dmaable memory, we will get it. Even if we 1608 * did not request dmaable memory, we might get it, but that 1609 * would be relatively rare and ignorable. 1610 */ 1611static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) 1612{ 1613 struct page *page; 1614 int nr_pages; 1615 int i; 1616 1617#ifndef CONFIG_MMU 1618 /* 1619 * Nommu uses slab's for process anonymous memory allocations, and thus 1620 * requires __GFP_COMP to properly refcount higher order allocations 1621 */ 1622 flags |= __GFP_COMP; 1623#endif 1624 1625 flags |= cachep->gfpflags; 1626 1627 page = alloc_pages_node(nodeid, flags, cachep->gfporder); 1628 if (!page) 1629 return NULL; 1630 1631 nr_pages = (1 << cachep->gfporder); 1632 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 1633 add_zone_page_state(page_zone(page), 1634 NR_SLAB_RECLAIMABLE, nr_pages); 1635 else 1636 add_zone_page_state(page_zone(page), 1637 NR_SLAB_UNRECLAIMABLE, nr_pages); 1638 for (i = 0; i < nr_pages; i++) 1639 __SetPageSlab(page + i); 1640 return page_address(page); 1641} 1642 1643/* 1644 * Interface to system's page release. 1645 */ 1646static void kmem_freepages(struct kmem_cache *cachep, void *addr) 1647{ 1648 unsigned long i = (1 << cachep->gfporder); 1649 struct page *page = virt_to_page(addr); 1650 const unsigned long nr_freed = i; 1651 1652 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 1653 sub_zone_page_state(page_zone(page), 1654 NR_SLAB_RECLAIMABLE, nr_freed); 1655 else 1656 sub_zone_page_state(page_zone(page), 1657 NR_SLAB_UNRECLAIMABLE, nr_freed); 1658 while (i--) { 1659 BUG_ON(!PageSlab(page)); 1660 __ClearPageSlab(page); 1661 page++; 1662 } 1663 if (current->reclaim_state) 1664 current->reclaim_state->reclaimed_slab += nr_freed; 1665 free_pages((unsigned long)addr, cachep->gfporder); 1666} 1667 1668static void kmem_rcu_free(struct rcu_head *head) 1669{ 1670 struct slab_rcu *slab_rcu = (struct slab_rcu *)head; 1671 struct kmem_cache *cachep = slab_rcu->cachep; 1672 1673 kmem_freepages(cachep, slab_rcu->addr); 1674 if (OFF_SLAB(cachep)) 1675 kmem_cache_free(cachep->slabp_cache, slab_rcu); 1676} 1677 1678#if DEBUG 1679 1680#ifdef CONFIG_DEBUG_PAGEALLOC 1681static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr, 1682 unsigned long caller) 1683{ 1684 int size = obj_size(cachep); 1685 1686 addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)]; 1687 1688 if (size < 5 * sizeof(unsigned long)) 1689 return; 1690 1691 *addr++ = 0x12345678; 1692 *addr++ = caller; 1693 *addr++ = smp_processor_id(); 1694 size -= 3 * sizeof(unsigned long); 1695 { 1696 unsigned long *sptr = &caller; 1697 unsigned long svalue; 1698 1699 while (!kstack_end(sptr)) { 1700 svalue = *sptr++; 1701 if (kernel_text_address(svalue)) { 1702 *addr++ = svalue; 1703 size -= sizeof(unsigned long); 1704 if (size <= sizeof(unsigned long)) 1705 break; 1706 } 1707 } 1708 1709 } 1710 *addr++ = 0x87654321; 1711} 1712#endif 1713 1714static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val) 1715{ 1716 int size = obj_size(cachep); 1717 addr = &((char *)addr)[obj_offset(cachep)]; 1718 1719 memset(addr, val, size); 1720 *(unsigned char *)(addr + size - 1) = POISON_END; 1721} 1722 1723static void dump_line(char *data, int offset, int limit) 1724{ 1725 int i; 1726 unsigned char error = 0; 1727 int bad_count = 0; 1728 1729 printk(KERN_ERR "%03x:", offset); 1730 for (i = 0; i < limit; i++) { 1731 if (data[offset + i] != POISON_FREE) { 1732 error = data[offset + i]; 1733 bad_count++; 1734 } 1735 printk(" %02x", (unsigned char)data[offset + i]); 1736 } 1737 printk("\n"); 1738 1739 if (bad_count == 1) { 1740 error ^= POISON_FREE; 1741 if (!(error & (error - 1))) { 1742 printk(KERN_ERR "Single bit error detected. Probably " 1743 "bad RAM.\n"); 1744#ifdef CONFIG_X86 1745 printk(KERN_ERR "Run memtest86+ or a similar memory " 1746 "test tool.\n"); 1747#else 1748 printk(KERN_ERR "Run a memory test tool.\n"); 1749#endif 1750 } 1751 } 1752} 1753#endif 1754 1755#if DEBUG 1756 1757static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines) 1758{ 1759 int i, size; 1760 char *realobj; 1761 1762 if (cachep->flags & SLAB_RED_ZONE) { 1763 printk(KERN_ERR "Redzone: 0x%lx/0x%lx.\n", 1764 *dbg_redzone1(cachep, objp), 1765 *dbg_redzone2(cachep, objp)); 1766 } 1767 1768 if (cachep->flags & SLAB_STORE_USER) { 1769 printk(KERN_ERR "Last user: [<%p>]", 1770 *dbg_userword(cachep, objp)); 1771 print_symbol("(%s)", 1772 (unsigned long)*dbg_userword(cachep, objp)); 1773 printk("\n"); 1774 } 1775 realobj = (char *)objp + obj_offset(cachep); 1776 size = obj_size(cachep); 1777 for (i = 0; i < size && lines; i += 16, lines--) { 1778 int limit; 1779 limit = 16; 1780 if (i + limit > size) 1781 limit = size - i; 1782 dump_line(realobj, i, limit); 1783 } 1784} 1785 1786static void check_poison_obj(struct kmem_cache *cachep, void *objp) 1787{ 1788 char *realobj; 1789 int size, i; 1790 int lines = 0; 1791 1792 realobj = (char *)objp + obj_offset(cachep); 1793 size = obj_size(cachep); 1794 1795 for (i = 0; i < size; i++) { 1796 char exp = POISON_FREE; 1797 if (i == size - 1) 1798 exp = POISON_END; 1799 if (realobj[i] != exp) { 1800 int limit; 1801 /* Mismatch ! */ 1802 /* Print header */ 1803 if (lines == 0) { 1804 printk(KERN_ERR 1805 "Slab corruption: %s start=%p, len=%d\n", 1806 cachep->name, realobj, size); 1807 print_objinfo(cachep, objp, 0); 1808 } 1809 /* Hexdump the affected line */ 1810 i = (i / 16) * 16; 1811 limit = 16; 1812 if (i + limit > size) 1813 limit = size - i; 1814 dump_line(realobj, i, limit); 1815 i += 16; 1816 lines++; 1817 /* Limit to 5 lines */ 1818 if (lines > 5) 1819 break; 1820 } 1821 } 1822 if (lines != 0) { 1823 /* Print some data about the neighboring objects, if they 1824 * exist: 1825 */ 1826 struct slab *slabp = virt_to_slab(objp); 1827 unsigned int objnr; 1828 1829 objnr = obj_to_index(cachep, slabp, objp); 1830 if (objnr) { 1831 objp = index_to_obj(cachep, slabp, objnr - 1); 1832 realobj = (char *)objp + obj_offset(cachep); 1833 printk(KERN_ERR "Prev obj: start=%p, len=%d\n", 1834 realobj, size); 1835 print_objinfo(cachep, objp, 2); 1836 } 1837 if (objnr + 1 < cachep->num) { 1838 objp = index_to_obj(cachep, slabp, objnr + 1); 1839 realobj = (char *)objp + obj_offset(cachep); 1840 printk(KERN_ERR "Next obj: start=%p, len=%d\n", 1841 realobj, size); 1842 print_objinfo(cachep, objp, 2); 1843 } 1844 } 1845} 1846#endif 1847 1848#if DEBUG 1849/** 1850 * slab_destroy_objs - destroy a slab and its objects 1851 * @cachep: cache pointer being destroyed 1852 * @slabp: slab pointer being destroyed 1853 * 1854 * Call the registered destructor for each object in a slab that is being 1855 * destroyed. 1856 */ 1857static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) 1858{ 1859 int i; 1860 for (i = 0; i < cachep->num; i++) { 1861 void *objp = index_to_obj(cachep, slabp, i); 1862 1863 if (cachep->flags & SLAB_POISON) { 1864#ifdef CONFIG_DEBUG_PAGEALLOC 1865 if (cachep->buffer_size % PAGE_SIZE == 0 && 1866 OFF_SLAB(cachep)) 1867 kernel_map_pages(virt_to_page(objp), 1868 cachep->buffer_size / PAGE_SIZE, 1); 1869 else 1870 check_poison_obj(cachep, objp); 1871#else 1872 check_poison_obj(cachep, objp); 1873#endif 1874 } 1875 if (cachep->flags & SLAB_RED_ZONE) { 1876 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) 1877 slab_error(cachep, "start of a freed object " 1878 "was overwritten"); 1879 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) 1880 slab_error(cachep, "end of a freed object " 1881 "was overwritten"); 1882 } 1883 if (cachep->dtor && !(cachep->flags & SLAB_POISON)) 1884 (cachep->dtor) (objp + obj_offset(cachep), cachep, 0); 1885 } 1886} 1887#else 1888static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) 1889{ 1890 if (cachep->dtor) { 1891 int i; 1892 for (i = 0; i < cachep->num; i++) { 1893 void *objp = index_to_obj(cachep, slabp, i); 1894 (cachep->dtor) (objp, cachep, 0); 1895 } 1896 } 1897} 1898#endif 1899 1900/** 1901 * slab_destroy - destroy and release all objects in a slab 1902 * @cachep: cache pointer being destroyed 1903 * @slabp: slab pointer being destroyed 1904 * 1905 * Destroy all the objs in a slab, and release the mem back to the system. 1906 * Before calling the slab must have been unlinked from the cache. The 1907 * cache-lock is not held/needed. 1908 */ 1909static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp) 1910{ 1911 void *addr = slabp->s_mem - slabp->colouroff; 1912 1913 slab_destroy_objs(cachep, slabp); 1914 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) { 1915 struct slab_rcu *slab_rcu; 1916 1917 slab_rcu = (struct slab_rcu *)slabp; 1918 slab_rcu->cachep = cachep; 1919 slab_rcu->addr = addr; 1920 call_rcu(&slab_rcu->head, kmem_rcu_free); 1921 } else { 1922 kmem_freepages(cachep, addr); 1923 if (OFF_SLAB(cachep)) 1924 kmem_cache_free(cachep->slabp_cache, slabp); 1925 } 1926} 1927 1928/* 1929 * For setting up all the kmem_list3s for cache whose buffer_size is same as 1930 * size of kmem_list3. 1931 */ 1932static void set_up_list3s(struct kmem_cache *cachep, int index) 1933{ 1934 int node; 1935 1936 for_each_online_node(node) { 1937 cachep->nodelists[node] = &initkmem_list3[index + node]; 1938 cachep->nodelists[node]->next_reap = jiffies + 1939 REAPTIMEOUT_LIST3 + 1940 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 1941 } 1942} 1943 1944static void __kmem_cache_destroy(struct kmem_cache *cachep) 1945{ 1946 int i; 1947 struct kmem_list3 *l3; 1948 1949 for_each_online_cpu(i) 1950 kfree(cachep->array[i]); 1951 1952 /* NUMA: free the list3 structures */ 1953 for_each_online_node(i) { 1954 l3 = cachep->nodelists[i]; 1955 if (l3) { 1956 kfree(l3->shared); 1957 free_alien_cache(l3->alien); 1958 kfree(l3); 1959 } 1960 } 1961 kmem_cache_free(&cache_cache, cachep); 1962} 1963 1964 1965/** 1966 * calculate_slab_order - calculate size (page order) of slabs 1967 * @cachep: pointer to the cache that is being created 1968 * @size: size of objects to be created in this cache. 1969 * @align: required alignment for the objects. 1970 * @flags: slab allocation flags 1971 * 1972 * Also calculates the number of objects per slab. 1973 * 1974 * This could be made much more intelligent. For now, try to avoid using 1975 * high order pages for slabs. When the gfp() functions are more friendly 1976 * towards high-order requests, this should be changed. 1977 */ 1978static size_t calculate_slab_order(struct kmem_cache *cachep, 1979 size_t size, size_t align, unsigned long flags) 1980{ 1981 unsigned long offslab_limit; 1982 size_t left_over = 0; 1983 int gfporder; 1984 1985 for (gfporder = 0; gfporder <= MAX_GFP_ORDER; gfporder++) { 1986 unsigned int num; 1987 size_t remainder; 1988 1989 cache_estimate(gfporder, size, align, flags, &remainder, &num); 1990 if (!num) 1991 continue; 1992 1993 if (flags & CFLGS_OFF_SLAB) { 1994 /* 1995 * Max number of objs-per-slab for caches which 1996 * use off-slab slabs. Needed to avoid a possible 1997 * looping condition in cache_grow(). 1998 */ 1999 offslab_limit = size - sizeof(struct slab); 2000 offslab_limit /= sizeof(kmem_bufctl_t); 2001 2002 if (num > offslab_limit) 2003 break; 2004 } 2005 2006 /* Found something acceptable - save it away */ 2007 cachep->num = num; 2008 cachep->gfporder = gfporder; 2009 left_over = remainder; 2010 2011 /* 2012 * A VFS-reclaimable slab tends to have most allocations 2013 * as GFP_NOFS and we really don't want to have to be allocating 2014 * higher-order pages when we are unable to shrink dcache. 2015 */ 2016 if (flags & SLAB_RECLAIM_ACCOUNT) 2017 break; 2018 2019 /* 2020 * Large number of objects is good, but very large slabs are 2021 * currently bad for the gfp()s. 2022 */ 2023 if (gfporder >= slab_break_gfp_order) 2024 break; 2025 2026 /* 2027 * Acceptable internal fragmentation? 2028 */ 2029 if (left_over * 8 <= (PAGE_SIZE << gfporder)) 2030 break; 2031 } 2032 return left_over; 2033} 2034 2035static int setup_cpu_cache(struct kmem_cache *cachep) 2036{ 2037 if (g_cpucache_up == FULL) 2038 return enable_cpucache(cachep); 2039 2040 if (g_cpucache_up == NONE) { 2041 /* 2042 * Note: the first kmem_cache_create must create the cache 2043 * that's used by kmalloc(24), otherwise the creation of 2044 * further caches will BUG(). 2045 */ 2046 cachep->array[smp_processor_id()] = &initarray_generic.cache; 2047 2048 /* 2049 * If the cache that's used by kmalloc(sizeof(kmem_list3)) is 2050 * the first cache, then we need to set up all its list3s, 2051 * otherwise the creation of further caches will BUG(). 2052 */ 2053 set_up_list3s(cachep, SIZE_AC); 2054 if (INDEX_AC == INDEX_L3) 2055 g_cpucache_up = PARTIAL_L3; 2056 else 2057 g_cpucache_up = PARTIAL_AC; 2058 } else { 2059 cachep->array[smp_processor_id()] = 2060 kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); 2061 2062 if (g_cpucache_up == PARTIAL_AC) { 2063 set_up_list3s(cachep, SIZE_L3); 2064 g_cpucache_up = PARTIAL_L3; 2065 } else { 2066 int node; 2067 for_each_online_node(node) { 2068 cachep->nodelists[node] = 2069 kmalloc_node(sizeof(struct kmem_list3), 2070 GFP_KERNEL, node); 2071 BUG_ON(!cachep->nodelists[node]); 2072 kmem_list3_init(cachep->nodelists[node]); 2073 } 2074 } 2075 } 2076 cachep->nodelists[numa_node_id()]->next_reap = 2077 jiffies + REAPTIMEOUT_LIST3 + 2078 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 2079 2080 cpu_cache_get(cachep)->avail = 0; 2081 cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES; 2082 cpu_cache_get(cachep)->batchcount = 1; 2083 cpu_cache_get(cachep)->touched = 0; 2084 cachep->batchcount = 1; 2085 cachep->limit = BOOT_CPUCACHE_ENTRIES; 2086 return 0; 2087} 2088 2089/** 2090 * kmem_cache_create - Create a cache. 2091 * @name: A string which is used in /proc/slabinfo to identify this cache. 2092 * @size: The size of objects to be created in this cache. 2093 * @align: The required alignment for the objects. 2094 * @flags: SLAB flags 2095 * @ctor: A constructor for the objects. 2096 * @dtor: A destructor for the objects. 2097 * 2098 * Returns a ptr to the cache on success, NULL on failure. 2099 * Cannot be called within a int, but can be interrupted. 2100 * The @ctor is run when new pages are allocated by the cache 2101 * and the @dtor is run before the pages are handed back. 2102 * 2103 * @name must be valid until the cache is destroyed. This implies that 2104 * the module calling this has to destroy the cache before getting unloaded. 2105 * 2106 * The flags are 2107 * 2108 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) 2109 * to catch references to uninitialised memory. 2110 * 2111 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check 2112 * for buffer overruns. 2113 * 2114 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware 2115 * cacheline. This can be beneficial if you're counting cycles as closely 2116 * as davem. 2117 */ 2118struct kmem_cache * 2119kmem_cache_create (const char *name, size_t size, size_t align, 2120 unsigned long flags, 2121 void (*ctor)(void*, struct kmem_cache *, unsigned long), 2122 void (*dtor)(void*, struct kmem_cache *, unsigned long)) 2123{ 2124 size_t left_over, slab_size, ralign; 2125 struct kmem_cache *cachep = NULL, *pc; 2126 2127 /* 2128 * Sanity checks... these are all serious usage bugs. 2129 */ 2130 if (!name || in_interrupt() || (size < BYTES_PER_WORD) || 2131 (size > (1 << MAX_OBJ_ORDER) * PAGE_SIZE) || (dtor && !ctor)) { 2132 printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__, 2133 name); 2134 BUG(); 2135 } 2136 2137 /* 2138 * We use cache_chain_mutex to ensure a consistent view of 2139 * cpu_online_map as well. Please see cpuup_callback 2140 */ 2141 mutex_lock(&cache_chain_mutex); 2142 2143 list_for_each_entry(pc, &cache_chain, next) { 2144 char tmp; 2145 int res; 2146 2147 /* 2148 * This happens when the module gets unloaded and doesn't 2149 * destroy its slab cache and no-one else reuses the vmalloc 2150 * area of the module. Print a warning. 2151 */ 2152 res = probe_kernel_address(pc->name, tmp); 2153 if (res) { 2154 printk("SLAB: cache with size %d has lost its name\n", 2155 pc->buffer_size); 2156 continue; 2157 } 2158 2159 if (!strcmp(pc->name, name)) { 2160 printk("kmem_cache_create: duplicate cache %s\n", name); 2161 dump_stack(); 2162 goto oops; 2163 } 2164 } 2165 2166#if DEBUG 2167 WARN_ON(strchr(name, ' ')); /* It confuses parsers */ 2168 if ((flags & SLAB_DEBUG_INITIAL) && !ctor) { 2169 /* No constructor, but inital state check requested */ 2170 printk(KERN_ERR "%s: No con, but init state check " 2171 "requested - %s\n", __FUNCTION__, name); 2172 flags &= ~SLAB_DEBUG_INITIAL; 2173 } 2174#if FORCED_DEBUG 2175 /* 2176 * Enable redzoning and last user accounting, except for caches with 2177 * large objects, if the increased size would increase the object size 2178 * above the next power of two: caches with object sizes just above a 2179 * power of two have a significant amount of internal fragmentation. 2180 */ 2181 if (size < 4096 || fls(size - 1) == fls(size-1 + 3 * BYTES_PER_WORD)) 2182 flags |= SLAB_RED_ZONE | SLAB_STORE_USER; 2183 if (!(flags & SLAB_DESTROY_BY_RCU)) 2184 flags |= SLAB_POISON; 2185#endif 2186 if (flags & SLAB_DESTROY_BY_RCU) 2187 BUG_ON(flags & SLAB_POISON); 2188#endif 2189 if (flags & SLAB_DESTROY_BY_RCU) 2190 BUG_ON(dtor); 2191 2192 /* 2193 * Always checks flags, a caller might be expecting debug support which 2194 * isn't available. 2195 */ 2196 BUG_ON(flags & ~CREATE_MASK); 2197 2198 /* 2199 * Check that size is in terms of words. This is needed to avoid 2200 * unaligned accesses for some archs when redzoning is used, and makes 2201 * sure any on-slab bufctl's are also correctly aligned. 2202 */ 2203 if (size & (BYTES_PER_WORD - 1)) { 2204 size += (BYTES_PER_WORD - 1); 2205 size &= ~(BYTES_PER_WORD - 1); 2206 } 2207 2208 /* calculate the final buffer alignment: */ 2209 2210 /* 1) arch recommendation: can be overridden for debug */ 2211 if (flags & SLAB_HWCACHE_ALIGN) { 2212 /* 2213 * Default alignment: as specified by the arch code. Except if 2214 * an object is really small, then squeeze multiple objects into 2215 * one cacheline. 2216 */ 2217 ralign = cache_line_size(); 2218 while (size <= ralign / 2) 2219 ralign /= 2; 2220 } else { 2221 ralign = BYTES_PER_WORD; 2222 } 2223 2224 /* 2225 * Redzoning and user store require word alignment. Note this will be 2226 * overridden by architecture or caller mandated alignment if either 2227 * is greater than BYTES_PER_WORD. 2228 */ 2229 if (flags & SLAB_RED_ZONE || flags & SLAB_STORE_USER) 2230 ralign = BYTES_PER_WORD; 2231 2232 /* 2) arch mandated alignment */ 2233 if (ralign < ARCH_SLAB_MINALIGN) { 2234 ralign = ARCH_SLAB_MINALIGN; 2235 } 2236 /* 3) caller mandated alignment */ 2237 if (ralign < align) { 2238 ralign = align; 2239 } 2240 /* disable debug if necessary */ 2241 if (ralign > BYTES_PER_WORD) 2242 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); 2243 /* 2244 * 4) Store it. 2245 */ 2246 align = ralign; 2247 2248 /* Get cache's description obj. */ 2249 cachep = kmem_cache_zalloc(&cache_cache, GFP_KERNEL); 2250 if (!cachep) 2251 goto oops; 2252 2253#if DEBUG 2254 cachep->obj_size = size; 2255 2256 /* 2257 * Both debugging options require word-alignment which is calculated 2258 * into align above. 2259 */ 2260 if (flags & SLAB_RED_ZONE) { 2261 /* add space for red zone words */ 2262 cachep->obj_offset += BYTES_PER_WORD; 2263 size += 2 * BYTES_PER_WORD; 2264 } 2265 if (flags & SLAB_STORE_USER) { 2266 /* user store requires one word storage behind the end of 2267 * the real object. 2268 */ 2269 size += BYTES_PER_WORD; 2270 } 2271#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) 2272 if (size >= malloc_sizes[INDEX_L3 + 1].cs_size 2273 && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) { 2274 cachep->obj_offset += PAGE_SIZE - size; 2275 size = PAGE_SIZE; 2276 } 2277#endif 2278#endif 2279 2280 /* 2281 * Determine if the slab management is 'on' or 'off' slab. 2282 * (bootstrapping cannot cope with offslab caches so don't do 2283 * it too early on.) 2284 */ 2285 if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init) 2286 /* 2287 * Size is large, assume best to place the slab management obj 2288 * off-slab (should allow better packing of objs). 2289 */ 2290 flags |= CFLGS_OFF_SLAB; 2291 2292 size = ALIGN(size, align); 2293 2294 left_over = calculate_slab_order(cachep, size, align, flags); 2295 2296 if (!cachep->num) { 2297 printk("kmem_cache_create: couldn't create cache %s.\n", name); 2298 kmem_cache_free(&cache_cache, cachep); 2299 cachep = NULL; 2300 goto oops; 2301 } 2302 slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t) 2303 + sizeof(struct slab), align); 2304 2305 /* 2306 * If the slab has been placed off-slab, and we have enough space then 2307 * move it on-slab. This is at the expense of any extra colouring. 2308 */ 2309 if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) { 2310 flags &= ~CFLGS_OFF_SLAB; 2311 left_over -= slab_size; 2312 } 2313 2314 if (flags & CFLGS_OFF_SLAB) { 2315 /* really off slab. No need for manual alignment */ 2316 slab_size = 2317 cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab); 2318 } 2319 2320 cachep->colour_off = cache_line_size(); 2321 /* Offset must be a multiple of the alignment. */ 2322 if (cachep->colour_off < align) 2323 cachep->colour_off = align; 2324 cachep->colour = left_over / cachep->colour_off; 2325 cachep->slab_size = slab_size; 2326 cachep->flags = flags; 2327 cachep->gfpflags = 0; 2328 if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA)) 2329 cachep->gfpflags |= GFP_DMA; 2330 cachep->buffer_size = size; 2331 cachep->reciprocal_buffer_size = reciprocal_value(size); 2332 2333 if (flags & CFLGS_OFF_SLAB) { 2334 cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u); 2335 /* 2336 * This is a possibility for one of the malloc_sizes caches. 2337 * But since we go off slab only for object size greater than 2338 * PAGE_SIZE/8, and malloc_sizes gets created in ascending order, 2339 * this should not happen at all. 2340 * But leave a BUG_ON for some lucky dude. 2341 */ 2342 BUG_ON(!cachep->slabp_cache); 2343 } 2344 cachep->ctor = ctor; 2345 cachep->dtor = dtor; 2346 cachep->name = name; 2347 2348 if (setup_cpu_cache(cachep)) { 2349 __kmem_cache_destroy(cachep); 2350 cachep = NULL; 2351 goto oops; 2352 } 2353 2354 /* cache setup completed, link it into the list */ 2355 list_add(&cachep->next, &cache_chain); 2356oops: 2357 if (!cachep && (flags & SLAB_PANIC)) 2358 panic("kmem_cache_create(): failed to create slab `%s'\n", 2359 name); 2360 mutex_unlock(&cache_chain_mutex); 2361 return cachep; 2362} 2363EXPORT_SYMBOL(kmem_cache_create); 2364 2365#if DEBUG 2366static void check_irq_off(void) 2367{ 2368 BUG_ON(!irqs_disabled()); 2369} 2370 2371static void check_irq_on(void) 2372{ 2373 BUG_ON(irqs_disabled()); 2374} 2375 2376static void check_spinlock_acquired(struct kmem_cache *cachep) 2377{ 2378#ifdef CONFIG_SMP 2379 check_irq_off(); 2380 assert_spin_locked(&cachep->nodelists[numa_node_id()]->list_lock); 2381#endif 2382} 2383 2384static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node) 2385{ 2386#ifdef CONFIG_SMP 2387 check_irq_off(); 2388 assert_spin_locked(&cachep->nodelists[node]->list_lock); 2389#endif 2390} 2391 2392#else 2393#define check_irq_off() do { } while(0) 2394#define check_irq_on() do { } while(0) 2395#define check_spinlock_acquired(x) do { } while(0) 2396#define check_spinlock_acquired_node(x, y) do { } while(0) 2397#endif 2398 2399static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, 2400 struct array_cache *ac, 2401 int force, int node); 2402 2403static void do_drain(void *arg) 2404{ 2405 struct kmem_cache *cachep = arg; 2406 struct array_cache *ac; 2407 int node = numa_node_id(); 2408 2409 check_irq_off(); 2410 ac = cpu_cache_get(cachep); 2411 spin_lock(&cachep->nodelists[node]->list_lock); 2412 free_block(cachep, ac->entry, ac->avail, node); 2413 spin_unlock(&cachep->nodelists[node]->list_lock); 2414 ac->avail = 0; 2415} 2416 2417static void drain_cpu_caches(struct kmem_cache *cachep) 2418{ 2419 struct kmem_list3 *l3; 2420 int node; 2421 2422 on_each_cpu(do_drain, cachep, 1, 1); 2423 check_irq_on(); 2424 for_each_online_node(node) { 2425 l3 = cachep->nodelists[node]; 2426 if (l3 && l3->alien) 2427 drain_alien_cache(cachep, l3->alien); 2428 } 2429 2430 for_each_online_node(node) { 2431 l3 = cachep->nodelists[node]; 2432 if (l3) 2433 drain_array(cachep, l3, l3->shared, 1, node); 2434 } 2435} 2436 2437/* 2438 * Remove slabs from the list of free slabs. 2439 * Specify the number of slabs to drain in tofree. 2440 * 2441 * Returns the actual number of slabs released. 2442 */ 2443static int drain_freelist(struct kmem_cache *cache, 2444 struct kmem_list3 *l3, int tofree) 2445{ 2446 struct list_head *p; 2447 int nr_freed; 2448 struct slab *slabp; 2449 2450 nr_freed = 0; 2451 while (nr_freed < tofree && !list_empty(&l3->slabs_free)) { 2452 2453 spin_lock_irq(&l3->list_lock); 2454 p = l3->slabs_free.prev; 2455 if (p == &l3->slabs_free) { 2456 spin_unlock_irq(&l3->list_lock); 2457 goto out; 2458 } 2459 2460 slabp = list_entry(p, struct slab, list); 2461#if DEBUG 2462 BUG_ON(slabp->inuse); 2463#endif 2464 list_del(&slabp->list); 2465 /* 2466 * Safe to drop the lock. The slab is no longer linked 2467 * to the cache. 2468 */ 2469 l3->free_objects -= cache->num; 2470 spin_unlock_irq(&l3->list_lock); 2471 slab_destroy(cache, slabp); 2472 nr_freed++; 2473 } 2474out: 2475 return nr_freed; 2476} 2477 2478/* Called with cache_chain_mutex held to protect against cpu hotplug */ 2479static int __cache_shrink(struct kmem_cache *cachep) 2480{ 2481 int ret = 0, i = 0; 2482 struct kmem_list3 *l3; 2483 2484 drain_cpu_caches(cachep); 2485 2486 check_irq_on(); 2487 for_each_online_node(i) { 2488 l3 = cachep->nodelists[i]; 2489 if (!l3) 2490 continue; 2491 2492 drain_freelist(cachep, l3, l3->free_objects); 2493 2494 ret += !list_empty(&l3->slabs_full) || 2495 !list_empty(&l3->slabs_partial); 2496 } 2497 return (ret ? 1 : 0); 2498} 2499 2500/** 2501 * kmem_cache_shrink - Shrink a cache. 2502 * @cachep: The cache to shrink. 2503 * 2504 * Releases as many slabs as possible for a cache. 2505 * To help debugging, a zero exit status indicates all slabs were released. 2506 */ 2507int kmem_cache_shrink(struct kmem_cache *cachep) 2508{ 2509 int ret; 2510 BUG_ON(!cachep || in_interrupt()); 2511 2512 mutex_lock(&cache_chain_mutex); 2513 ret = __cache_shrink(cachep); 2514 mutex_unlock(&cache_chain_mutex); 2515 return ret; 2516} 2517EXPORT_SYMBOL(kmem_cache_shrink); 2518 2519/** 2520 * kmem_cache_destroy - delete a cache 2521 * @cachep: the cache to destroy 2522 * 2523 * Remove a &struct kmem_cache object from the slab cache. 2524 * 2525 * It is expected this function will be called by a module when it is 2526 * unloaded. This will remove the cache completely, and avoid a duplicate 2527 * cache being allocated each time a module is loaded and unloaded, if the 2528 * module doesn't have persistent in-kernel storage across loads and unloads. 2529 * 2530 * The cache must be empty before calling this function. 2531 * 2532 * The caller must guarantee that noone will allocate memory from the cache 2533 * during the kmem_cache_destroy(). 2534 */ 2535void kmem_cache_destroy(struct kmem_cache *cachep) 2536{ 2537 BUG_ON(!cachep || in_interrupt()); 2538 2539 /* Find the cache in the chain of caches. */ 2540 mutex_lock(&cache_chain_mutex); 2541 /* 2542 * the chain is never empty, cache_cache is never destroyed 2543 */ 2544 list_del(&cachep->next); 2545 if (__cache_shrink(cachep)) { 2546 slab_error(cachep, "Can't free all objects"); 2547 list_add(&cachep->next, &cache_chain); 2548 mutex_unlock(&cache_chain_mutex); 2549 return; 2550 } 2551 2552 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) 2553 synchronize_rcu(); 2554 2555 __kmem_cache_destroy(cachep); 2556 mutex_unlock(&cache_chain_mutex); 2557} 2558EXPORT_SYMBOL(kmem_cache_destroy); 2559 2560/* 2561 * Get the memory for a slab management obj. 2562 * For a slab cache when the slab descriptor is off-slab, slab descriptors 2563 * always come from malloc_sizes caches. The slab descriptor cannot 2564 * come from the same cache which is getting created because, 2565 * when we are searching for an appropriate cache for these 2566 * descriptors in kmem_cache_create, we search through the malloc_sizes array. 2567 * If we are creating a malloc_sizes cache here it would not be visible to 2568 * kmem_find_general_cachep till the initialization is complete. 2569 * Hence we cannot have slabp_cache same as the original cache. 2570 */ 2571static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, 2572 int colour_off, gfp_t local_flags, 2573 int nodeid) 2574{ 2575 struct slab *slabp; 2576 2577 if (OFF_SLAB(cachep)) { 2578 /* Slab management obj is off-slab. */ 2579 slabp = kmem_cache_alloc_node(cachep->slabp_cache, 2580 local_flags & ~GFP_THISNODE, nodeid); 2581 if (!slabp) 2582 return NULL; 2583 } else { 2584 slabp = objp + colour_off; 2585 colour_off += cachep->slab_size; 2586 } 2587 slabp->inuse = 0; 2588 slabp->colouroff = colour_off; 2589 slabp->s_mem = objp + colour_off; 2590 slabp->nodeid = nodeid; 2591 return slabp; 2592} 2593 2594static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp) 2595{ 2596 return (kmem_bufctl_t *) (slabp + 1); 2597} 2598 2599static void cache_init_objs(struct kmem_cache *cachep, 2600 struct slab *slabp, unsigned long ctor_flags) 2601{ 2602 int i; 2603 2604 for (i = 0; i < cachep->num; i++) { 2605 void *objp = index_to_obj(cachep, slabp, i); 2606#if DEBUG 2607 /* need to poison the objs? */ 2608 if (cachep->flags & SLAB_POISON) 2609 poison_obj(cachep, objp, POISON_FREE); 2610 if (cachep->flags & SLAB_STORE_USER) 2611 *dbg_userword(cachep, objp) = NULL; 2612 2613 if (cachep->flags & SLAB_RED_ZONE) { 2614 *dbg_redzone1(cachep, objp) = RED_INACTIVE; 2615 *dbg_redzone2(cachep, objp) = RED_INACTIVE; 2616 } 2617 /* 2618 * Constructors are not allowed to allocate memory from the same 2619 * cache which they are a constructor for. Otherwise, deadlock. 2620 * They must also be threaded. 2621 */ 2622 if (cachep->ctor && !(cachep->flags & SLAB_POISON)) 2623 cachep->ctor(objp + obj_offset(cachep), cachep, 2624 ctor_flags); 2625 2626 if (cachep->flags & SLAB_RED_ZONE) { 2627 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) 2628 slab_error(cachep, "constructor overwrote the" 2629 " end of an object"); 2630 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) 2631 slab_error(cachep, "constructor overwrote the" 2632 " start of an object"); 2633 } 2634 if ((cachep->buffer_size % PAGE_SIZE) == 0 && 2635 OFF_SLAB(cachep) && cachep->flags & SLAB_POISON) 2636 kernel_map_pages(virt_to_page(objp), 2637 cachep->buffer_size / PAGE_SIZE, 0); 2638#else 2639 if (cachep->ctor) 2640 cachep->ctor(objp, cachep, ctor_flags); 2641#endif 2642 slab_bufctl(slabp)[i] = i + 1; 2643 } 2644 slab_bufctl(slabp)[i - 1] = BUFCTL_END; 2645 slabp->free = 0; 2646} 2647 2648static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags) 2649{ 2650 if (CONFIG_ZONE_DMA_FLAG) { 2651 if (flags & GFP_DMA) 2652 BUG_ON(!(cachep->gfpflags & GFP_DMA)); 2653 else 2654 BUG_ON(cachep->gfpflags & GFP_DMA); 2655 } 2656} 2657 2658static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp, 2659 int nodeid) 2660{ 2661 void *objp = index_to_obj(cachep, slabp, slabp->free); 2662 kmem_bufctl_t next; 2663 2664 slabp->inuse++; 2665 next = slab_bufctl(slabp)[slabp->free]; 2666#if DEBUG 2667 slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE; 2668 WARN_ON(slabp->nodeid != nodeid); 2669#endif 2670 slabp->free = next; 2671 2672 return objp; 2673} 2674 2675static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, 2676 void *objp, int nodeid) 2677{ 2678 unsigned int objnr = obj_to_index(cachep, slabp, objp); 2679 2680#if DEBUG 2681 /* Verify that the slab belongs to the intended node */ 2682 WARN_ON(slabp->nodeid != nodeid); 2683 2684 if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) { 2685 printk(KERN_ERR "slab: double free detected in cache " 2686 "'%s', objp %p\n", cachep->name, objp); 2687 BUG(); 2688 } 2689#endif 2690 slab_bufctl(slabp)[objnr] = slabp->free; 2691 slabp->free = objnr; 2692 slabp->inuse--; 2693} 2694 2695/* 2696 * Map pages beginning at addr to the given cache and slab. This is required 2697 * for the slab allocator to be able to lookup the cache and slab of a 2698 * virtual address for kfree, ksize, kmem_ptr_validate, and slab debugging. 2699 */ 2700static void slab_map_pages(struct kmem_cache *cache, struct slab *slab, 2701 void *addr) 2702{ 2703 int nr_pages; 2704 struct page *page; 2705 2706 page = virt_to_page(addr); 2707 2708 nr_pages = 1; 2709 if (likely(!PageCompound(page))) 2710 nr_pages <<= cache->gfporder; 2711 2712 do { 2713 page_set_cache(page, cache); 2714 page_set_slab(page, slab); 2715 page++; 2716 } while (--nr_pages); 2717} 2718 2719/* 2720 * Grow (by 1) the number of slabs within a cache. This is called by 2721 * kmem_cache_alloc() when there are no active objs left in a cache. 2722 */ 2723static int cache_grow(struct kmem_cache *cachep, 2724 gfp_t flags, int nodeid, void *objp) 2725{ 2726 struct slab *slabp; 2727 size_t offset; 2728 gfp_t local_flags; 2729 unsigned long ctor_flags; 2730 struct kmem_list3 *l3; 2731 2732 /* 2733 * Be lazy and only check for valid flags here, keeping it out of the 2734 * critical path in kmem_cache_alloc(). 2735 */ 2736 BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK | __GFP_NO_GROW)); 2737 if (flags & __GFP_NO_GROW) 2738 return 0; 2739 2740 ctor_flags = SLAB_CTOR_CONSTRUCTOR; 2741 local_flags = (flags & GFP_LEVEL_MASK); 2742 if (!(local_flags & __GFP_WAIT)) 2743 /* 2744 * Not allowed to sleep. Need to tell a constructor about 2745 * this - it might need to know... 2746 */ 2747 ctor_flags |= SLAB_CTOR_ATOMIC; 2748 2749 /* Take the l3 list lock to change the colour_next on this node */ 2750 check_irq_off(); 2751 l3 = cachep->nodelists[nodeid]; 2752 spin_lock(&l3->list_lock); 2753 2754 /* Get colour for the slab, and cal the next value. */ 2755 offset = l3->colour_next; 2756 l3->colour_next++; 2757 if (l3->colour_next >= cachep->colour) 2758 l3->colour_next = 0; 2759 spin_unlock(&l3->list_lock); 2760 2761 offset *= cachep->colour_off; 2762 2763 if (local_flags & __GFP_WAIT) 2764 local_irq_enable(); 2765 2766 /* 2767 * The test for missing atomic flag is performed here, rather than 2768 * the more obvious place, simply to reduce the critical path length 2769 * in kmem_cache_alloc(). If a caller is seriously mis-behaving they 2770 * will eventually be caught here (where it matters). 2771 */ 2772 kmem_flagcheck(cachep, flags); 2773 2774 /* 2775 * Get mem for the objs. Attempt to allocate a physical page from 2776 * 'nodeid'. 2777 */ 2778 if (!objp) 2779 objp = kmem_getpages(cachep, flags, nodeid); 2780 if (!objp) 2781 goto failed; 2782 2783 /* Get slab management. */ 2784 slabp = alloc_slabmgmt(cachep, objp, offset, 2785 local_flags & ~GFP_THISNODE, nodeid); 2786 if (!slabp) 2787 goto opps1; 2788 2789 slabp->nodeid = nodeid; 2790 slab_map_pages(cachep, slabp, objp); 2791 2792 cache_init_objs(cachep, slabp, ctor_flags); 2793 2794 if (local_flags & __GFP_WAIT) 2795 local_irq_disable(); 2796 check_irq_off(); 2797 spin_lock(&l3->list_lock); 2798 2799 /* Make slab active. */ 2800 list_add_tail(&slabp->list, &(l3->slabs_free)); 2801 STATS_INC_GROWN(cachep); 2802 l3->free_objects += cachep->num; 2803 spin_unlock(&l3->list_lock); 2804 return 1; 2805opps1: 2806 kmem_freepages(cachep, objp); 2807failed: 2808 if (local_flags & __GFP_WAIT) 2809 local_irq_disable(); 2810 return 0; 2811} 2812 2813#if DEBUG 2814 2815/* 2816 * Perform extra freeing checks: 2817 * - detect bad pointers. 2818 * - POISON/RED_ZONE checking 2819 * - destructor calls, for caches with POISON+dtor 2820 */ 2821static void kfree_debugcheck(const void *objp) 2822{ 2823 if (!virt_addr_valid(objp)) { 2824 printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n", 2825 (unsigned long)objp); 2826 BUG(); 2827 } 2828} 2829 2830static inline void verify_redzone_free(struct kmem_cache *cache, void *obj) 2831{ 2832 unsigned long redzone1, redzone2; 2833 2834 redzone1 = *dbg_redzone1(cache, obj); 2835 redzone2 = *dbg_redzone2(cache, obj); 2836 2837 /* 2838 * Redzone is ok. 2839 */ 2840 if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE) 2841 return; 2842 2843 if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE) 2844 slab_error(cache, "double free detected"); 2845 else 2846 slab_error(cache, "memory outside object was overwritten"); 2847 2848 printk(KERN_ERR "%p: redzone 1:0x%lx, redzone 2:0x%lx.\n", 2849 obj, redzone1, redzone2); 2850} 2851 2852static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, 2853 void *caller) 2854{ 2855 struct page *page; 2856 unsigned int objnr; 2857 struct slab *slabp; 2858 2859 objp -= obj_offset(cachep); 2860 kfree_debugcheck(objp); 2861 page = virt_to_page(objp); 2862 2863 slabp = page_get_slab(page); 2864 2865 if (cachep->flags & SLAB_RED_ZONE) { 2866 verify_redzone_free(cachep, objp); 2867 *dbg_redzone1(cachep, objp) = RED_INACTIVE; 2868 *dbg_redzone2(cachep, objp) = RED_INACTIVE; 2869 } 2870 if (cachep->flags & SLAB_STORE_USER) 2871 *dbg_userword(cachep, objp) = caller; 2872 2873 objnr = obj_to_index(cachep, slabp, objp); 2874 2875 BUG_ON(objnr >= cachep->num); 2876 BUG_ON(objp != index_to_obj(cachep, slabp, objnr)); 2877 2878 if (cachep->flags & SLAB_DEBUG_INITIAL) { 2879 /* 2880 * Need to call the slab's constructor so the caller can 2881 * perform a verify of its state (debugging). Called without 2882 * the cache-lock held. 2883 */ 2884 cachep->ctor(objp + obj_offset(cachep), 2885 cachep, SLAB_CTOR_CONSTRUCTOR | SLAB_CTOR_VERIFY); 2886 } 2887 if (cachep->flags & SLAB_POISON && cachep->dtor) { 2888 /* we want to cache poison the object, 2889 * call the destruction callback 2890 */ 2891 cachep->dtor(objp + obj_offset(cachep), cachep, 0); 2892 } 2893#ifdef CONFIG_DEBUG_SLAB_LEAK 2894 slab_bufctl(slabp)[objnr] = BUFCTL_FREE; 2895#endif 2896 if (cachep->flags & SLAB_POISON) { 2897#ifdef CONFIG_DEBUG_PAGEALLOC 2898 if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) { 2899 store_stackinfo(cachep, objp, (unsigned long)caller); 2900 kernel_map_pages(virt_to_page(objp), 2901 cachep->buffer_size / PAGE_SIZE, 0); 2902 } else { 2903 poison_obj(cachep, objp, POISON_FREE); 2904 } 2905#else 2906 poison_obj(cachep, objp, POISON_FREE); 2907#endif 2908 } 2909 return objp; 2910} 2911 2912static void check_slabp(struct kmem_cache *cachep, struct slab *slabp) 2913{ 2914 kmem_bufctl_t i; 2915 int entries = 0; 2916 2917 /* Check slab's freelist to see if this obj is there. */ 2918 for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) { 2919 entries++; 2920 if (entries > cachep->num || i >= cachep->num) 2921 goto bad; 2922 } 2923 if (entries != cachep->num - slabp->inuse) { 2924bad: 2925 printk(KERN_ERR "slab: Internal list corruption detected in " 2926 "cache '%s'(%d), slabp %p(%d). Hexdump:\n", 2927 cachep->name, cachep->num, slabp, slabp->inuse); 2928 for (i = 0; 2929 i < sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t); 2930 i++) { 2931 if (i % 16 == 0) 2932 printk("\n%03x:", i); 2933 printk(" %02x", ((unsigned char *)slabp)[i]); 2934 } 2935 printk("\n"); 2936 BUG(); 2937 } 2938} 2939#else 2940#define kfree_debugcheck(x) do { } while(0) 2941#define cache_free_debugcheck(x,objp,z) (objp) 2942#define check_slabp(x,y) do { } while(0) 2943#endif 2944 2945static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) 2946{ 2947 int batchcount; 2948 struct kmem_list3 *l3; 2949 struct array_cache *ac; 2950 int node; 2951 2952 node = numa_node_id(); 2953 2954 check_irq_off(); 2955 ac = cpu_cache_get(cachep); 2956retry: 2957 batchcount = ac->batchcount; 2958 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { 2959 /* 2960 * If there was little recent activity on this cache, then 2961 * perform only a partial refill. Otherwise we could generate 2962 * refill bouncing. 2963 */ 2964 batchcount = BATCHREFILL_LIMIT; 2965 } 2966 l3 = cachep->nodelists[node]; 2967 2968 BUG_ON(ac->avail > 0 || !l3); 2969 spin_lock(&l3->list_lock); 2970 2971 /* See if we can refill from the shared array */ 2972 if (l3->shared && transfer_objects(ac, l3->shared, batchcount)) 2973 goto alloc_done; 2974 2975 while (batchcount > 0) { 2976 struct list_head *entry; 2977 struct slab *slabp; 2978 /* Get slab alloc is to come from. */ 2979 entry = l3->slabs_partial.next; 2980 if (entry == &l3->slabs_partial) { 2981 l3->free_touched = 1; 2982 entry = l3->slabs_free.next; 2983 if (entry == &l3->slabs_free) 2984 goto must_grow; 2985 } 2986 2987 slabp = list_entry(entry, struct slab, list); 2988 check_slabp(cachep, slabp); 2989 check_spinlock_acquired(cachep); 2990 while (slabp->inuse < cachep->num && batchcount--) { 2991 STATS_INC_ALLOCED(cachep); 2992 STATS_INC_ACTIVE(cachep); 2993 STATS_SET_HIGH(cachep); 2994 2995 ac->entry[ac->avail++] = slab_get_obj(cachep, slabp, 2996 node); 2997 } 2998 check_slabp(cachep, slabp); 2999 3000 /* move slabp to correct slabp list: */ 3001 list_del(&slabp->list); 3002 if (slabp->free == BUFCTL_END) 3003 list_add(&slabp->list, &l3->slabs_full); 3004 else 3005 list_add(&slabp->list, &l3->slabs_partial); 3006 } 3007 3008must_grow: 3009 l3->free_objects -= ac->avail; 3010alloc_done: 3011 spin_unlock(&l3->list_lock); 3012 3013 if (unlikely(!ac->avail)) { 3014 int x; 3015 x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL); 3016 3017 /* cache_grow can reenable interrupts, then ac could change. */ 3018 ac = cpu_cache_get(cachep); 3019 if (!x && ac->avail == 0) /* no objects in sight? abort */ 3020 return NULL; 3021 3022 if (!ac->avail) /* objects refilled by interrupt? */ 3023 goto retry; 3024 } 3025 ac->touched = 1; 3026 return ac->entry[--ac->avail]; 3027} 3028 3029static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep, 3030 gfp_t flags) 3031{ 3032 might_sleep_if(flags & __GFP_WAIT); 3033#if DEBUG 3034 kmem_flagcheck(cachep, flags); 3035#endif 3036} 3037 3038#if DEBUG 3039static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, 3040 gfp_t flags, void *objp, void *caller) 3041{ 3042 if (!objp) 3043 return objp; 3044 if (cachep->flags & SLAB_POISON) { 3045#ifdef CONFIG_DEBUG_PAGEALLOC 3046 if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) 3047 kernel_map_pages(virt_to_page(objp), 3048 cachep->buffer_size / PAGE_SIZE, 1); 3049 else 3050 check_poison_obj(cachep, objp); 3051#else 3052 check_poison_obj(cachep, objp); 3053#endif 3054 poison_obj(cachep, objp, POISON_INUSE); 3055 } 3056 if (cachep->flags & SLAB_STORE_USER) 3057 *dbg_userword(cachep, objp) = caller; 3058 3059 if (cachep->flags & SLAB_RED_ZONE) { 3060 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || 3061 *dbg_redzone2(cachep, objp) != RED_INACTIVE) { 3062 slab_error(cachep, "double free, or memory outside" 3063 " object was overwritten"); 3064 printk(KERN_ERR 3065 "%p: redzone 1:0x%lx, redzone 2:0x%lx\n", 3066 objp, *dbg_redzone1(cachep, objp), 3067 *dbg_redzone2(cachep, objp)); 3068 } 3069 *dbg_redzone1(cachep, objp) = RED_ACTIVE; 3070 *dbg_redzone2(cachep, objp) = RED_ACTIVE; 3071 } 3072#ifdef CONFIG_DEBUG_SLAB_LEAK 3073 { 3074 struct slab *slabp; 3075 unsigned objnr; 3076 3077 slabp = page_get_slab(virt_to_page(objp)); 3078 objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size; 3079 slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE; 3080 } 3081#endif 3082 objp += obj_offset(cachep); 3083 if (cachep->ctor && cachep->flags & SLAB_POISON) { 3084 unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR; 3085 3086 if (!(flags & __GFP_WAIT)) 3087 ctor_flags |= SLAB_CTOR_ATOMIC; 3088 3089 cachep->ctor(objp, cachep, ctor_flags); 3090 } 3091#if ARCH_SLAB_MINALIGN 3092 if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) { 3093 printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", 3094 objp, ARCH_SLAB_MINALIGN); 3095 } 3096#endif 3097 return objp; 3098} 3099#else 3100#define cache_alloc_debugcheck_after(a,b,objp,d) (objp) 3101#endif 3102 3103#ifdef CONFIG_FAILSLAB 3104 3105static struct failslab_attr { 3106 3107 struct fault_attr attr; 3108 3109 u32 ignore_gfp_wait; 3110#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 3111 struct dentry *ignore_gfp_wait_file; 3112#endif 3113 3114} failslab = { 3115 .attr = FAULT_ATTR_INITIALIZER, 3116 .ignore_gfp_wait = 1, 3117}; 3118 3119static int __init setup_failslab(char *str) 3120{ 3121 return setup_fault_attr(&failslab.attr, str); 3122} 3123__setup("failslab=", setup_failslab); 3124 3125static int should_failslab(struct kmem_cache *cachep, gfp_t flags) 3126{ 3127 if (cachep == &cache_cache) 3128 return 0; 3129 if (flags & __GFP_NOFAIL) 3130 return 0; 3131 if (failslab.ignore_gfp_wait && (flags & __GFP_WAIT)) 3132 return 0; 3133 3134 return should_fail(&failslab.attr, obj_size(cachep)); 3135} 3136 3137#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 3138 3139static int __init failslab_debugfs(void) 3140{ 3141 mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; 3142 struct dentry *dir; 3143 int err; 3144 3145 err = init_fault_attr_dentries(&failslab.attr, "failslab"); 3146 if (err) 3147 return err; 3148 dir = failslab.attr.dentries.dir; 3149 3150 failslab.ignore_gfp_wait_file = 3151 debugfs_create_bool("ignore-gfp-wait", mode, dir, 3152 &failslab.ignore_gfp_wait); 3153 3154 if (!failslab.ignore_gfp_wait_file) { 3155 err = -ENOMEM; 3156 debugfs_remove(failslab.ignore_gfp_wait_file); 3157 cleanup_fault_attr_dentries(&failslab.attr); 3158 } 3159 3160 return err; 3161} 3162 3163late_initcall(failslab_debugfs); 3164 3165#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ 3166 3167#else /* CONFIG_FAILSLAB */ 3168 3169static inline int should_failslab(struct kmem_cache *cachep, gfp_t flags) 3170{ 3171 return 0; 3172} 3173 3174#endif /* CONFIG_FAILSLAB */ 3175 3176static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3177{ 3178 void *objp; 3179 struct array_cache *ac; 3180 3181 check_irq_off(); 3182 3183 if (should_failslab(cachep, flags)) 3184 return NULL; 3185 3186 ac = cpu_cache_get(cachep); 3187 if (likely(ac->avail)) { 3188 STATS_INC_ALLOCHIT(cachep); 3189 ac->touched = 1; 3190 objp = ac->entry[--ac->avail]; 3191 } else { 3192 STATS_INC_ALLOCMISS(cachep); 3193 objp = cache_alloc_refill(cachep, flags); 3194 } 3195 return objp; 3196} 3197 3198#ifdef CONFIG_NUMA 3199/* 3200 * Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY. 3201 * 3202 * If we are in_interrupt, then process context, including cpusets and 3203 * mempolicy, may not apply and should not be used for allocation policy. 3204 */ 3205static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) 3206{ 3207 int nid_alloc, nid_here; 3208 3209 if (in_interrupt() || (flags & __GFP_THISNODE)) 3210 return NULL; 3211 nid_alloc = nid_here = numa_node_id(); 3212 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) 3213 nid_alloc = cpuset_mem_spread_node(); 3214 else if (current->mempolicy) 3215 nid_alloc = slab_node(current->mempolicy); 3216 if (nid_alloc != nid_here) 3217 return ____cache_alloc_node(cachep, flags, nid_alloc); 3218 return NULL; 3219} 3220 3221/* 3222 * Fallback function if there was no memory available and no objects on a 3223 * certain node and fall back is permitted. First we scan all the 3224 * available nodelists for available objects. If that fails then we 3225 * perform an allocation without specifying a node. This allows the page 3226 * allocator to do its reclaim / fallback magic. We then insert the 3227 * slab into the proper nodelist and then allocate from it. 3228 */ 3229static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) 3230{ 3231 struct zonelist *zonelist; 3232 gfp_t local_flags; 3233 struct zone **z; 3234 void *obj = NULL; 3235 int nid; 3236 3237 if (flags & __GFP_THISNODE) 3238 return NULL; 3239 3240 zonelist = &NODE_DATA(slab_node(current->mempolicy)) 3241 ->node_zonelists[gfp_zone(flags)]; 3242 local_flags = (flags & GFP_LEVEL_MASK); 3243 3244retry: 3245 /* 3246 * Look through allowed nodes for objects available 3247 * from existing per node queues. 3248 */ 3249 for (z = zonelist->zones; *z && !obj; z++) { 3250 nid = zone_to_nid(*z); 3251 3252 if (cpuset_zone_allowed_hardwall(*z, flags) && 3253 cache->nodelists[nid] && 3254 cache->nodelists[nid]->free_objects) 3255 obj = ____cache_alloc_node(cache, 3256 flags | GFP_THISNODE, nid); 3257 } 3258 3259 if (!obj && !(flags & __GFP_NO_GROW)) { 3260 /* 3261 * This allocation will be performed within the constraints 3262 * of the current cpuset / memory policy requirements. 3263 * We may trigger various forms of reclaim on the allowed 3264 * set and go into memory reserves if necessary. 3265 */ 3266 if (local_flags & __GFP_WAIT) 3267 local_irq_enable(); 3268 kmem_flagcheck(cache, flags); 3269 obj = kmem_getpages(cache, flags, -1); 3270 if (local_flags & __GFP_WAIT) 3271 local_irq_disable(); 3272 if (obj) { 3273 /* 3274 * Insert into the appropriate per node queues 3275 */ 3276 nid = page_to_nid(virt_to_page(obj)); 3277 if (cache_grow(cache, flags, nid, obj)) { 3278 obj = ____cache_alloc_node(cache, 3279 flags | GFP_THISNODE, nid); 3280 if (!obj) 3281 /* 3282 * Another processor may allocate the 3283 * objects in the slab since we are 3284 * not holding any locks. 3285 */ 3286 goto retry; 3287 } else { 3288 /* cache_grow already freed obj */ 3289 obj = NULL; 3290 } 3291 } 3292 } 3293 return obj; 3294} 3295 3296/* 3297 * A interface to enable slab creation on nodeid 3298 */ 3299static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, 3300 int nodeid) 3301{ 3302 struct list_head *entry; 3303 struct slab *slabp; 3304 struct kmem_list3 *l3; 3305 void *obj; 3306 int x; 3307 3308 l3 = cachep->nodelists[nodeid]; 3309 BUG_ON(!l3); 3310 3311retry: 3312 check_irq_off(); 3313 spin_lock(&l3->list_lock); 3314 entry = l3->slabs_partial.next; 3315 if (entry == &l3->slabs_partial) { 3316 l3->free_touched = 1; 3317 entry = l3->slabs_free.next; 3318 if (entry == &l3->slabs_free) 3319 goto must_grow; 3320 } 3321 3322 slabp = list_entry(entry, struct slab, list); 3323 check_spinlock_acquired_node(cachep, nodeid); 3324 check_slabp(cachep, slabp); 3325 3326 STATS_INC_NODEALLOCS(cachep); 3327 STATS_INC_ACTIVE(cachep); 3328 STATS_SET_HIGH(cachep); 3329 3330 BUG_ON(slabp->inuse == cachep->num); 3331 3332 obj = slab_get_obj(cachep, slabp, nodeid); 3333 check_slabp(cachep, slabp); 3334 l3->free_objects--; 3335 /* move slabp to correct slabp list: */ 3336 list_del(&slabp->list); 3337 3338 if (slabp->free == BUFCTL_END) 3339 list_add(&slabp->list, &l3->slabs_full); 3340 else 3341 list_add(&slabp->list, &l3->slabs_partial); 3342 3343 spin_unlock(&l3->list_lock); 3344 goto done; 3345 3346must_grow: 3347 spin_unlock(&l3->list_lock); 3348 x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL); 3349 if (x) 3350 goto retry; 3351 3352 return fallback_alloc(cachep, flags); 3353 3354done: 3355 return obj; 3356} 3357 3358/** 3359 * kmem_cache_alloc_node - Allocate an object on the specified node 3360 * @cachep: The cache to allocate from. 3361 * @flags: See kmalloc(). 3362 * @nodeid: node number of the target node. 3363 * @caller: return address of caller, used for debug information 3364 * 3365 * Identical to kmem_cache_alloc but it will allocate memory on the given 3366 * node, which can improve the performance for cpu bound structures. 3367 * 3368 * Fallback to other node is possible if __GFP_THISNODE is not set. 3369 */ 3370static __always_inline void * 3371__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, 3372 void *caller) 3373{ 3374 unsigned long save_flags; 3375 void *ptr; 3376 3377 cache_alloc_debugcheck_before(cachep, flags); 3378 local_irq_save(save_flags); 3379 3380 if (unlikely(nodeid == -1)) 3381 nodeid = numa_node_id(); 3382 3383 if (unlikely(!cachep->nodelists[nodeid])) { 3384 /* Node not bootstrapped yet */ 3385 ptr = fallback_alloc(cachep, flags); 3386 goto out; 3387 } 3388 3389 if (nodeid == numa_node_id()) { 3390 /* 3391 * Use the locally cached objects if possible. 3392 * However ____cache_alloc does not allow fallback 3393 * to other nodes. It may fail while we still have 3394 * objects on other nodes available. 3395 */ 3396 ptr = ____cache_alloc(cachep, flags); 3397 if (ptr) 3398 goto out; 3399 } 3400 /* ___cache_alloc_node can fall back to other nodes */ 3401 ptr = ____cache_alloc_node(cachep, flags, nodeid); 3402 out: 3403 local_irq_restore(save_flags); 3404 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); 3405 3406 return ptr; 3407} 3408 3409static __always_inline void * 3410__do_cache_alloc(struct kmem_cache *cache, gfp_t flags) 3411{ 3412 void *objp; 3413 3414 if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) { 3415 objp = alternate_node_alloc(cache, flags); 3416 if (objp) 3417 goto out; 3418 } 3419 objp = ____cache_alloc(cache, flags); 3420 3421 /* 3422 * We may just have run out of memory on the local node. 3423 * ____cache_alloc_node() knows how to locate memory on other nodes 3424 */ 3425 if (!objp) 3426 objp = ____cache_alloc_node(cache, flags, numa_node_id()); 3427 3428 out: 3429 return objp; 3430} 3431#else 3432 3433static __always_inline void * 3434__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3435{ 3436 return ____cache_alloc(cachep, flags); 3437} 3438 3439#endif /* CONFIG_NUMA */ 3440 3441static __always_inline void * 3442__cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) 3443{ 3444 unsigned long save_flags; 3445 void *objp; 3446 3447 cache_alloc_debugcheck_before(cachep, flags); 3448 local_irq_save(save_flags); 3449 objp = __do_cache_alloc(cachep, flags); 3450 local_irq_restore(save_flags); 3451 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); 3452 prefetchw(objp); 3453 3454 return objp; 3455} 3456 3457/* 3458 * Caller needs to acquire correct kmem_list's list_lock 3459 */ 3460static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, 3461 int node) 3462{ 3463 int i; 3464 struct kmem_list3 *l3; 3465 3466 for (i = 0; i < nr_objects; i++) { 3467 void *objp = objpp[i]; 3468 struct slab *slabp; 3469 3470 slabp = virt_to_slab(objp); 3471 l3 = cachep->nodelists[node]; 3472 list_del(&slabp->list); 3473 check_spinlock_acquired_node(cachep, node); 3474 check_slabp(cachep, slabp); 3475 slab_put_obj(cachep, slabp, objp, node); 3476 STATS_DEC_ACTIVE(cachep); 3477 l3->free_objects++; 3478 check_slabp(cachep, slabp); 3479 3480 /* fixup slab chains */ 3481 if (slabp->inuse == 0) { 3482 if (l3->free_objects > l3->free_limit) { 3483 l3->free_objects -= cachep->num; 3484 /* No need to drop any previously held 3485 * lock here, even if we have a off-slab slab 3486 * descriptor it is guaranteed to come from 3487 * a different cache, refer to comments before 3488 * alloc_slabmgmt. 3489 */ 3490 slab_destroy(cachep, slabp); 3491 } else { 3492 list_add(&slabp->list, &l3->slabs_free); 3493 } 3494 } else { 3495 /* Unconditionally move a slab to the end of the 3496 * partial list on free - maximum time for the 3497 * other objects to be freed, too. 3498 */ 3499 list_add_tail(&slabp->list, &l3->slabs_partial); 3500 } 3501 } 3502} 3503 3504static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) 3505{ 3506 int batchcount; 3507 struct kmem_list3 *l3; 3508 int node = numa_node_id(); 3509 3510 batchcount = ac->batchcount; 3511#if DEBUG 3512 BUG_ON(!batchcount || batchcount > ac->avail); 3513#endif 3514 check_irq_off(); 3515 l3 = cachep->nodelists[node]; 3516 spin_lock(&l3->list_lock); 3517 if (l3->shared) { 3518 struct array_cache *shared_array = l3->shared; 3519 int max = shared_array->limit - shared_array->avail; 3520 if (max) { 3521 if (batchcount > max) 3522 batchcount = max; 3523 memcpy(&(shared_array->entry[shared_array->avail]), 3524 ac->entry, sizeof(void *) * batchcount); 3525 shared_array->avail += batchcount; 3526 goto free_done; 3527 } 3528 } 3529 3530 free_block(cachep, ac->entry, batchcount, node); 3531free_done: 3532#if STATS 3533 { 3534 int i = 0; 3535 struct list_head *p; 3536 3537 p = l3->slabs_free.next; 3538 while (p != &(l3->slabs_free)) { 3539 struct slab *slabp; 3540 3541 slabp = list_entry(p, struct slab, list); 3542 BUG_ON(slabp->inuse); 3543 3544 i++; 3545 p = p->next; 3546 } 3547 STATS_SET_FREEABLE(cachep, i); 3548 } 3549#endif 3550 spin_unlock(&l3->list_lock); 3551 ac->avail -= batchcount; 3552 memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail); 3553} 3554 3555/* 3556 * Release an obj back to its cache. If the obj has a constructed state, it must 3557 * be in this state _before_ it is released. Called with disabled ints. 3558 */ 3559static inline void __cache_free(struct kmem_cache *cachep, void *objp) 3560{ 3561 struct array_cache *ac = cpu_cache_get(cachep); 3562 3563 check_irq_off(); 3564 objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); 3565 3566 if (cache_free_alien(cachep, objp)) 3567 return; 3568 3569 if (likely(ac->avail < ac->limit)) { 3570 STATS_INC_FREEHIT(cachep); 3571 ac->entry[ac->avail++] = objp; 3572 return; 3573 } else { 3574 STATS_INC_FREEMISS(cachep); 3575 cache_flusharray(cachep, ac); 3576 ac->entry[ac->avail++] = objp; 3577 } 3578} 3579 3580/** 3581 * kmem_cache_alloc - Allocate an object 3582 * @cachep: The cache to allocate from. 3583 * @flags: See kmalloc(). 3584 * 3585 * Allocate an object from this cache. The flags are only relevant 3586 * if the cache has no available objects. 3587 */ 3588void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3589{ 3590 return __cache_alloc(cachep, flags, __builtin_return_address(0)); 3591} 3592EXPORT_SYMBOL(kmem_cache_alloc); 3593 3594/** 3595 * kmem_cache_zalloc - Allocate an object. The memory is set to zero. 3596 * @cache: The cache to allocate from. 3597 * @flags: See kmalloc(). 3598 * 3599 * Allocate an object from this cache and set the allocated memory to zero. 3600 * The flags are only relevant if the cache has no available objects. 3601 */ 3602void *kmem_cache_zalloc(struct kmem_cache *cache, gfp_t flags) 3603{ 3604 void *ret = __cache_alloc(cache, flags, __builtin_return_address(0)); 3605 if (ret) 3606 memset(ret, 0, obj_size(cache)); 3607 return ret; 3608} 3609EXPORT_SYMBOL(kmem_cache_zalloc); 3610 3611/** 3612 * kmem_ptr_validate - check if an untrusted pointer might 3613 * be a slab entry. 3614 * @cachep: the cache we're checking against 3615 * @ptr: pointer to validate 3616 * 3617 * This verifies that the untrusted pointer looks sane: 3618 * it is _not_ a guarantee that the pointer is actually 3619 * part of the slab cache in question, but it at least 3620 * validates that the pointer can be dereferenced and 3621 * looks half-way sane. 3622 * 3623 * Currently only used for dentry validation. 3624 */ 3625int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr) 3626{ 3627 unsigned long addr = (unsigned long)ptr; 3628 unsigned long min_addr = PAGE_OFFSET; 3629 unsigned long align_mask = BYTES_PER_WORD - 1; 3630 unsigned long size = cachep->buffer_size; 3631 struct page *page; 3632 3633 if (unlikely(addr < min_addr)) 3634 goto out; 3635 if (unlikely(addr > (unsigned long)high_memory - size)) 3636 goto out; 3637 if (unlikely(addr & align_mask)) 3638 goto out; 3639 if (unlikely(!kern_addr_valid(addr))) 3640 goto out; 3641 if (unlikely(!kern_addr_valid(addr + size - 1))) 3642 goto out; 3643 page = virt_to_page(ptr); 3644 if (unlikely(!PageSlab(page))) 3645 goto out; 3646 if (unlikely(page_get_cache(page) != cachep)) 3647 goto out; 3648 return 1; 3649out: 3650 return 0; 3651} 3652 3653#ifdef CONFIG_NUMA 3654void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) 3655{ 3656 return __cache_alloc_node(cachep, flags, nodeid, 3657 __builtin_return_address(0)); 3658} 3659EXPORT_SYMBOL(kmem_cache_alloc_node); 3660 3661static __always_inline void * 3662__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) 3663{ 3664 struct kmem_cache *cachep; 3665 3666 cachep = kmem_find_general_cachep(size, flags); 3667 if (unlikely(cachep == NULL)) 3668 return NULL; 3669 return kmem_cache_alloc_node(cachep, flags, node); 3670} 3671 3672#ifdef CONFIG_DEBUG_SLAB 3673void *__kmalloc_node(size_t size, gfp_t flags, int node) 3674{ 3675 return __do_kmalloc_node(size, flags, node, 3676 __builtin_return_address(0)); 3677} 3678EXPORT_SYMBOL(__kmalloc_node); 3679 3680void *__kmalloc_node_track_caller(size_t size, gfp_t flags, 3681 int node, void *caller) 3682{ 3683 return __do_kmalloc_node(size, flags, node, caller); 3684} 3685EXPORT_SYMBOL(__kmalloc_node_track_caller); 3686#else 3687void *__kmalloc_node(size_t size, gfp_t flags, int node) 3688{ 3689 return __do_kmalloc_node(size, flags, node, NULL); 3690} 3691EXPORT_SYMBOL(__kmalloc_node); 3692#endif /* CONFIG_DEBUG_SLAB */ 3693#endif /* CONFIG_NUMA */ 3694 3695/** 3696 * __do_kmalloc - allocate memory 3697 * @size: how many bytes of memory are required. 3698 * @flags: the type of memory to allocate (see kmalloc). 3699 * @caller: function caller for debug tracking of the caller 3700 */ 3701static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, 3702 void *caller) 3703{ 3704 struct kmem_cache *cachep; 3705 3706 /* If you want to save a few bytes .text space: replace 3707 * __ with kmem_. 3708 * Then kmalloc uses the uninlined functions instead of the inline 3709 * functions. 3710 */ 3711 cachep = __find_general_cachep(size, flags); 3712 if (unlikely(cachep == NULL)) 3713 return NULL; 3714 return __cache_alloc(cachep, flags, caller); 3715} 3716 3717 3718#ifdef CONFIG_DEBUG_SLAB 3719void *__kmalloc(size_t size, gfp_t flags) 3720{ 3721 return __do_kmalloc(size, flags, __builtin_return_address(0)); 3722} 3723EXPORT_SYMBOL(__kmalloc); 3724 3725void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller) 3726{ 3727 return __do_kmalloc(size, flags, caller); 3728} 3729EXPORT_SYMBOL(__kmalloc_track_caller); 3730 3731#else 3732void *__kmalloc(size_t size, gfp_t flags) 3733{ 3734 return __do_kmalloc(size, flags, NULL); 3735} 3736EXPORT_SYMBOL(__kmalloc); 3737#endif 3738 3739/** 3740 * kmem_cache_free - Deallocate an object 3741 * @cachep: The cache the allocation was from. 3742 * @objp: The previously allocated object. 3743 * 3744 * Free an object which was previously allocated from this 3745 * cache. 3746 */ 3747void kmem_cache_free(struct kmem_cache *cachep, void *objp) 3748{ 3749 unsigned long flags; 3750 3751 BUG_ON(virt_to_cache(objp) != cachep); 3752 3753 local_irq_save(flags); 3754 debug_check_no_locks_freed(objp, obj_size(cachep)); 3755 __cache_free(cachep, objp); 3756 local_irq_restore(flags); 3757} 3758EXPORT_SYMBOL(kmem_cache_free); 3759 3760/** 3761 * kfree - free previously allocated memory 3762 * @objp: pointer returned by kmalloc. 3763 * 3764 * If @objp is NULL, no operation is performed. 3765 * 3766 * Don't free memory not originally allocated by kmalloc() 3767 * or you will run into trouble. 3768 */ 3769void kfree(const void *objp) 3770{ 3771 struct kmem_cache *c; 3772 unsigned long flags; 3773 3774 if (unlikely(!objp)) 3775 return; 3776 local_irq_save(flags); 3777 kfree_debugcheck(objp); 3778 c = virt_to_cache(objp); 3779 debug_check_no_locks_freed(objp, obj_size(c)); 3780 __cache_free(c, (void *)objp); 3781 local_irq_restore(flags); 3782} 3783EXPORT_SYMBOL(kfree); 3784 3785unsigned int kmem_cache_size(struct kmem_cache *cachep) 3786{ 3787 return obj_size(cachep); 3788} 3789EXPORT_SYMBOL(kmem_cache_size); 3790 3791const char *kmem_cache_name(struct kmem_cache *cachep) 3792{ 3793 return cachep->name; 3794} 3795EXPORT_SYMBOL_GPL(kmem_cache_name); 3796 3797/* 3798 * This initializes kmem_list3 or resizes varioius caches for all nodes. 3799 */ 3800static int alloc_kmemlist(struct kmem_cache *cachep) 3801{ 3802 int node; 3803 struct kmem_list3 *l3; 3804 struct array_cache *new_shared; 3805 struct array_cache **new_alien = NULL; 3806 3807 for_each_online_node(node) { 3808 3809 if (use_alien_caches) { 3810 new_alien = alloc_alien_cache(node, cachep->limit); 3811 if (!new_alien) 3812 goto fail; 3813 } 3814 3815 new_shared = alloc_arraycache(node, 3816 cachep->shared*cachep->batchcount, 3817 0xbaadf00d); 3818 if (!new_shared) { 3819 free_alien_cache(new_alien); 3820 goto fail; 3821 } 3822 3823 l3 = cachep->nodelists[node]; 3824 if (l3) { 3825 struct array_cache *shared = l3->shared; 3826 3827 spin_lock_irq(&l3->list_lock); 3828 3829 if (shared) 3830 free_block(cachep, shared->entry, 3831 shared->avail, node); 3832 3833 l3->shared = new_shared; 3834 if (!l3->alien) { 3835 l3->alien = new_alien; 3836 new_alien = NULL; 3837 } 3838 l3->free_limit = (1 + nr_cpus_node(node)) * 3839 cachep->batchcount + cachep->num; 3840 spin_unlock_irq(&l3->list_lock); 3841 kfree(shared); 3842 free_alien_cache(new_alien); 3843 continue; 3844 } 3845 l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node); 3846 if (!l3) { 3847 free_alien_cache(new_alien); 3848 kfree(new_shared); 3849 goto fail; 3850 } 3851 3852 kmem_list3_init(l3); 3853 l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + 3854 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 3855 l3->shared = new_shared; 3856 l3->alien = new_alien; 3857 l3->free_limit = (1 + nr_cpus_node(node)) * 3858 cachep->batchcount + cachep->num; 3859 cachep->nodelists[node] = l3; 3860 } 3861 return 0; 3862 3863fail: 3864 if (!cachep->next.next) { 3865 /* Cache is not active yet. Roll back what we did */ 3866 node--; 3867 while (node >= 0) { 3868 if (cachep->nodelists[node]) { 3869 l3 = cachep->nodelists[node]; 3870 3871 kfree(l3->shared); 3872 free_alien_cache(l3->alien); 3873 kfree(l3); 3874 cachep->nodelists[node] = NULL; 3875 } 3876 node--; 3877 } 3878 } 3879 return -ENOMEM; 3880} 3881 3882struct ccupdate_struct { 3883 struct kmem_cache *cachep; 3884 struct array_cache *new[NR_CPUS]; 3885}; 3886 3887static void do_ccupdate_local(void *info) 3888{ 3889 struct ccupdate_struct *new = info; 3890 struct array_cache *old; 3891 3892 check_irq_off(); 3893 old = cpu_cache_get(new->cachep); 3894 3895 new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()]; 3896 new->new[smp_processor_id()] = old; 3897} 3898 3899/* Always called with the cache_chain_mutex held */ 3900static int do_tune_cpucache(struct kmem_cache *cachep, int limit, 3901 int batchcount, int shared) 3902{ 3903 struct ccupdate_struct *new; 3904 int i; 3905 3906 new = kzalloc(sizeof(*new), GFP_KERNEL); 3907 if (!new) 3908 return -ENOMEM; 3909 3910 for_each_online_cpu(i) { 3911 new->new[i] = alloc_arraycache(cpu_to_node(i), limit, 3912 batchcount); 3913 if (!new->new[i]) { 3914 for (i--; i >= 0; i--) 3915 kfree(new->new[i]); 3916 kfree(new); 3917 return -ENOMEM; 3918 } 3919 } 3920 new->cachep = cachep; 3921 3922 on_each_cpu(do_ccupdate_local, (void *)new, 1, 1); 3923 3924 check_irq_on(); 3925 cachep->batchcount = batchcount; 3926 cachep->limit = limit; 3927 cachep->shared = shared; 3928 3929 for_each_online_cpu(i) { 3930 struct array_cache *ccold = new->new[i]; 3931 if (!ccold) 3932 continue; 3933 spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); 3934 free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i)); 3935 spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); 3936 kfree(ccold); 3937 } 3938 kfree(new); 3939 return alloc_kmemlist(cachep); 3940} 3941 3942/* Called with cache_chain_mutex held always */ 3943static int enable_cpucache(struct kmem_cache *cachep) 3944{ 3945 int err; 3946 int limit, shared; 3947 3948 /* 3949 * The head array serves three purposes: 3950 * - create a LIFO ordering, i.e. return objects that are cache-warm 3951 * - reduce the number of spinlock operations. 3952 * - reduce the number of linked list operations on the slab and 3953 * bufctl chains: array operations are cheaper. 3954 * The numbers are guessed, we should auto-tune as described by 3955 * Bonwick. 3956 */ 3957 if (cachep->buffer_size > 131072) 3958 limit = 1; 3959 else if (cachep->buffer_size > PAGE_SIZE) 3960 limit = 8; 3961 else if (cachep->buffer_size > 1024) 3962 limit = 24; 3963 else if (cachep->buffer_size > 256) 3964 limit = 54; 3965 else 3966 limit = 120; 3967 3968 /* 3969 * CPU bound tasks (e.g. network routing) can exhibit cpu bound 3970 * allocation behaviour: Most allocs on one cpu, most free operations 3971 * on another cpu. For these cases, an efficient object passing between 3972 * cpus is necessary. This is provided by a shared array. The array 3973 * replaces Bonwick's magazine layer. 3974 * On uniprocessor, it's functionally equivalent (but less efficient) 3975 * to a larger limit. Thus disabled by default. 3976 */ 3977 shared = 0; 3978#ifdef CONFIG_SMP 3979 if (cachep->buffer_size <= PAGE_SIZE) 3980 shared = 8; 3981#endif 3982 3983#if DEBUG 3984 /* 3985 * With debugging enabled, large batchcount lead to excessively long 3986 * periods with disabled local interrupts. Limit the batchcount 3987 */ 3988 if (limit > 32) 3989 limit = 32; 3990#endif 3991 err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared); 3992 if (err) 3993 printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n", 3994 cachep->name, -err); 3995 return err; 3996} 3997 3998/* 3999 * Drain an array if it contains any elements taking the l3 lock only if 4000 * necessary. Note that the l3 listlock also protects the array_cache 4001 * if drain_array() is used on the shared array. 4002 */ 4003void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, 4004 struct array_cache *ac, int force, int node) 4005{ 4006 int tofree; 4007 4008 if (!ac || !ac->avail) 4009 return; 4010 if (ac->touched && !force) { 4011 ac->touched = 0; 4012 } else { 4013 spin_lock_irq(&l3->list_lock); 4014 if (ac->avail) { 4015 tofree = force ? ac->avail : (ac->limit + 4) / 5; 4016 if (tofree > ac->avail) 4017 tofree = (ac->avail + 1) / 2; 4018 free_block(cachep, ac->entry, tofree, node); 4019 ac->avail -= tofree; 4020 memmove(ac->entry, &(ac->entry[tofree]), 4021 sizeof(void *) * ac->avail); 4022 } 4023 spin_unlock_irq(&l3->list_lock); 4024 } 4025} 4026 4027/** 4028 * cache_reap - Reclaim memory from caches. 4029 * @w: work descriptor 4030 * 4031 * Called from workqueue/eventd every few seconds. 4032 * Purpose: 4033 * - clear the per-cpu caches for this CPU. 4034 * - return freeable pages to the main free memory pool. 4035 * 4036 * If we cannot acquire the cache chain mutex then just give up - we'll try 4037 * again on the next iteration. 4038 */ 4039static void cache_reap(struct work_struct *w) 4040{ 4041 struct kmem_cache *searchp; 4042 struct kmem_list3 *l3; 4043 int node = numa_node_id(); 4044 struct delayed_work *work = 4045 container_of(w, struct delayed_work, work); 4046 4047 if (!mutex_trylock(&cache_chain_mutex)) 4048 /* Give up. Setup the next iteration. */ 4049 goto out; 4050 4051 list_for_each_entry(searchp, &cache_chain, next) { 4052 check_irq_on(); 4053 4054 /* 4055 * We only take the l3 lock if absolutely necessary and we 4056 * have established with reasonable certainty that 4057 * we can do some work if the lock was obtained. 4058 */ 4059 l3 = searchp->nodelists[node]; 4060 4061 reap_alien(searchp, l3); 4062 4063 drain_array(searchp, l3, cpu_cache_get(searchp), 0, node); 4064 4065 /* 4066 * These are racy checks but it does not matter 4067 * if we skip one check or scan twice. 4068 */ 4069 if (time_after(l3->next_reap, jiffies)) 4070 goto next; 4071 4072 l3->next_reap = jiffies + REAPTIMEOUT_LIST3; 4073 4074 drain_array(searchp, l3, l3->shared, 0, node); 4075 4076 if (l3->free_touched) 4077 l3->free_touched = 0; 4078 else { 4079 int freed; 4080 4081 freed = drain_freelist(searchp, l3, (l3->free_limit + 4082 5 * searchp->num - 1) / (5 * searchp->num)); 4083 STATS_ADD_REAPED(searchp, freed); 4084 } 4085next: 4086 cond_resched(); 4087 } 4088 check_irq_on(); 4089 mutex_unlock(&cache_chain_mutex); 4090 next_reap_node(); 4091 refresh_cpu_vm_stats(smp_processor_id()); 4092out: 4093 /* Set up the next iteration */ 4094 schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_CPUC)); 4095} 4096 4097#ifdef CONFIG_PROC_FS 4098 4099static void print_slabinfo_header(struct seq_file *m) 4100{ 4101 /* 4102 * Output format version, so at least we can change it 4103 * without _too_ many complaints. 4104 */ 4105#if STATS 4106 seq_puts(m, "slabinfo - version: 2.1 (statistics)\n"); 4107#else 4108 seq_puts(m, "slabinfo - version: 2.1\n"); 4109#endif 4110 seq_puts(m, "# name <active_objs> <num_objs> <objsize> " 4111 "<objperslab> <pagesperslab>"); 4112 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>"); 4113 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); 4114#if STATS 4115 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> " 4116 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>"); 4117 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>"); 4118#endif 4119 seq_putc(m, '\n'); 4120} 4121 4122static void *s_start(struct seq_file *m, loff_t *pos) 4123{ 4124 loff_t n = *pos; 4125 struct list_head *p; 4126 4127 mutex_lock(&cache_chain_mutex); 4128 if (!n) 4129 print_slabinfo_header(m); 4130 p = cache_chain.next; 4131 while (n--) { 4132 p = p->next; 4133 if (p == &cache_chain) 4134 return NULL; 4135 } 4136 return list_entry(p, struct kmem_cache, next); 4137} 4138 4139static void *s_next(struct seq_file *m, void *p, loff_t *pos) 4140{ 4141 struct kmem_cache *cachep = p; 4142 ++*pos; 4143 return cachep->next.next == &cache_chain ? 4144 NULL : list_entry(cachep->next.next, struct kmem_cache, next); 4145} 4146 4147static void s_stop(struct seq_file *m, void *p) 4148{ 4149 mutex_unlock(&cache_chain_mutex); 4150} 4151 4152static int s_show(struct seq_file *m, void *p) 4153{ 4154 struct kmem_cache *cachep = p; 4155 struct slab *slabp; 4156 unsigned long active_objs; 4157 unsigned long num_objs; 4158 unsigned long active_slabs = 0; 4159 unsigned long num_slabs, free_objects = 0, shared_avail = 0; 4160 const char *name; 4161 char *error = NULL; 4162 int node; 4163 struct kmem_list3 *l3; 4164 4165 active_objs = 0; 4166 num_slabs = 0; 4167 for_each_online_node(node) { 4168 l3 = cachep->nodelists[node]; 4169 if (!l3) 4170 continue; 4171 4172 check_irq_on(); 4173 spin_lock_irq(&l3->list_lock); 4174 4175 list_for_each_entry(slabp, &l3->slabs_full, list) { 4176 if (slabp->inuse != cachep->num && !error) 4177 error = "slabs_full accounting error"; 4178 active_objs += cachep->num; 4179 active_slabs++; 4180 } 4181 list_for_each_entry(slabp, &l3->slabs_partial, list) { 4182 if (slabp->inuse == cachep->num && !error) 4183 error = "slabs_partial inuse accounting error"; 4184 if (!slabp->inuse && !error) 4185 error = "slabs_partial/inuse accounting error"; 4186 active_objs += slabp->inuse; 4187 active_slabs++; 4188 } 4189 list_for_each_entry(slabp, &l3->slabs_free, list) { 4190 if (slabp->inuse && !error) 4191 error = "slabs_free/inuse accounting error"; 4192 num_slabs++; 4193 } 4194 free_objects += l3->free_objects; 4195 if (l3->shared) 4196 shared_avail += l3->shared->avail; 4197 4198 spin_unlock_irq(&l3->list_lock); 4199 } 4200 num_slabs += active_slabs; 4201 num_objs = num_slabs * cachep->num; 4202 if (num_objs - active_objs != free_objects && !error) 4203 error = "free_objects accounting error"; 4204 4205 name = cachep->name; 4206 if (error) 4207 printk(KERN_ERR "slab: cache %s error: %s\n", name, error); 4208 4209 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", 4210 name, active_objs, num_objs, cachep->buffer_size, 4211 cachep->num, (1 << cachep->gfporder)); 4212 seq_printf(m, " : tunables %4u %4u %4u", 4213 cachep->limit, cachep->batchcount, cachep->shared); 4214 seq_printf(m, " : slabdata %6lu %6lu %6lu", 4215 active_slabs, num_slabs, shared_avail); 4216#if STATS 4217 { /* list3 stats */ 4218 unsigned long high = cachep->high_mark; 4219 unsigned long allocs = cachep->num_allocations; 4220 unsigned long grown = cachep->grown; 4221 unsigned long reaped = cachep->reaped; 4222 unsigned long errors = cachep->errors; 4223 unsigned long max_freeable = cachep->max_freeable; 4224 unsigned long node_allocs = cachep->node_allocs; 4225 unsigned long node_frees = cachep->node_frees; 4226 unsigned long overflows = cachep->node_overflow; 4227 4228 seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \ 4229 %4lu %4lu %4lu %4lu %4lu", allocs, high, grown, 4230 reaped, errors, max_freeable, node_allocs, 4231 node_frees, overflows); 4232 } 4233 /* cpu stats */ 4234 { 4235 unsigned long allochit = atomic_read(&cachep->allochit); 4236 unsigned long allocmiss = atomic_read(&cachep->allocmiss); 4237 unsigned long freehit = atomic_read(&cachep->freehit); 4238 unsigned long freemiss = atomic_read(&cachep->freemiss); 4239 4240 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu", 4241 allochit, allocmiss, freehit, freemiss); 4242 } 4243#endif 4244 seq_putc(m, '\n'); 4245 return 0; 4246} 4247 4248/* 4249 * slabinfo_op - iterator that generates /proc/slabinfo 4250 * 4251 * Output layout: 4252 * cache-name 4253 * num-active-objs 4254 * total-objs 4255 * object size 4256 * num-active-slabs 4257 * total-slabs 4258 * num-pages-per-slab 4259 * + further values on SMP and with statistics enabled 4260 */ 4261 4262const struct seq_operations slabinfo_op = { 4263 .start = s_start, 4264 .next = s_next, 4265 .stop = s_stop, 4266 .show = s_show, 4267}; 4268 4269#define MAX_SLABINFO_WRITE 128 4270/** 4271 * slabinfo_write - Tuning for the slab allocator 4272 * @file: unused 4273 * @buffer: user buffer 4274 * @count: data length 4275 * @ppos: unused 4276 */ 4277ssize_t slabinfo_write(struct file *file, const char __user * buffer, 4278 size_t count, loff_t *ppos) 4279{ 4280 char kbuf[MAX_SLABINFO_WRITE + 1], *tmp; 4281 int limit, batchcount, shared, res; 4282 struct kmem_cache *cachep; 4283 4284 if (count > MAX_SLABINFO_WRITE) 4285 return -EINVAL; 4286 if (copy_from_user(&kbuf, buffer, count)) 4287 return -EFAULT; 4288 kbuf[MAX_SLABINFO_WRITE] = '\0'; 4289 4290 tmp = strchr(kbuf, ' '); 4291 if (!tmp) 4292 return -EINVAL; 4293 *tmp = '\0'; 4294 tmp++; 4295 if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3) 4296 return -EINVAL; 4297 4298 /* Find the cache in the chain of caches. */ 4299 mutex_lock(&cache_chain_mutex); 4300 res = -EINVAL; 4301 list_for_each_entry(cachep, &cache_chain, next) { 4302 if (!strcmp(cachep->name, kbuf)) { 4303 if (limit < 1 || batchcount < 1 || 4304 batchcount > limit || shared < 0) { 4305 res = 0; 4306 } else { 4307 res = do_tune_cpucache(cachep, limit, 4308 batchcount, shared); 4309 } 4310 break; 4311 } 4312 } 4313 mutex_unlock(&cache_chain_mutex); 4314 if (res >= 0) 4315 res = count; 4316 return res; 4317} 4318 4319#ifdef CONFIG_DEBUG_SLAB_LEAK 4320 4321static void *leaks_start(struct seq_file *m, loff_t *pos) 4322{ 4323 loff_t n = *pos; 4324 struct list_head *p; 4325 4326 mutex_lock(&cache_chain_mutex); 4327 p = cache_chain.next; 4328 while (n--) { 4329 p = p->next; 4330 if (p == &cache_chain) 4331 return NULL; 4332 } 4333 return list_entry(p, struct kmem_cache, next); 4334} 4335 4336static inline int add_caller(unsigned long *n, unsigned long v) 4337{ 4338 unsigned long *p; 4339 int l; 4340 if (!v) 4341 return 1; 4342 l = n[1]; 4343 p = n + 2; 4344 while (l) { 4345 int i = l/2; 4346 unsigned long *q = p + 2 * i; 4347 if (*q == v) { 4348 q[1]++; 4349 return 1; 4350 } 4351 if (*q > v) { 4352 l = i; 4353 } else { 4354 p = q + 2; 4355 l -= i + 1; 4356 } 4357 } 4358 if (++n[1] == n[0]) 4359 return 0; 4360 memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n)); 4361 p[0] = v; 4362 p[1] = 1; 4363 return 1; 4364} 4365 4366static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s) 4367{ 4368 void *p; 4369 int i; 4370 if (n[0] == n[1]) 4371 return; 4372 for (i = 0, p = s->s_mem; i < c->num; i++, p += c->buffer_size) { 4373 if (slab_bufctl(s)[i] != BUFCTL_ACTIVE) 4374 continue; 4375 if (!add_caller(n, (unsigned long)*dbg_userword(c, p))) 4376 return; 4377 } 4378} 4379 4380static void show_symbol(struct seq_file *m, unsigned long address) 4381{ 4382#ifdef CONFIG_KALLSYMS 4383 char *modname; 4384 const char *name; 4385 unsigned long offset, size; 4386 char namebuf[KSYM_NAME_LEN+1]; 4387 4388 name = kallsyms_lookup(address, &size, &offset, &modname, namebuf); 4389 4390 if (name) { 4391 seq_printf(m, "%s+%#lx/%#lx", name, offset, size); 4392 if (modname) 4393 seq_printf(m, " [%s]", modname); 4394 return; 4395 } 4396#endif 4397 seq_printf(m, "%p", (void *)address); 4398} 4399 4400static int leaks_show(struct seq_file *m, void *p) 4401{ 4402 struct kmem_cache *cachep = p; 4403 struct slab *slabp; 4404 struct kmem_list3 *l3; 4405 const char *name; 4406 unsigned long *n = m->private; 4407 int node; 4408 int i; 4409 4410 if (!(cachep->flags & SLAB_STORE_USER)) 4411 return 0; 4412 if (!(cachep->flags & SLAB_RED_ZONE)) 4413 return 0; 4414 4415 /* OK, we can do it */ 4416 4417 n[1] = 0; 4418 4419 for_each_online_node(node) { 4420 l3 = cachep->nodelists[node]; 4421 if (!l3) 4422 continue; 4423 4424 check_irq_on(); 4425 spin_lock_irq(&l3->list_lock); 4426 4427 list_for_each_entry(slabp, &l3->slabs_full, list) 4428 handle_slab(n, cachep, slabp); 4429 list_for_each_entry(slabp, &l3->slabs_partial, list) 4430 handle_slab(n, cachep, slabp); 4431 spin_unlock_irq(&l3->list_lock); 4432 } 4433 name = cachep->name; 4434 if (n[0] == n[1]) { 4435 /* Increase the buffer size */ 4436 mutex_unlock(&cache_chain_mutex); 4437 m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL); 4438 if (!m->private) { 4439 /* Too bad, we are really out */ 4440 m->private = n; 4441 mutex_lock(&cache_chain_mutex); 4442 return -ENOMEM; 4443 } 4444 *(unsigned long *)m->private = n[0] * 2; 4445 kfree(n); 4446 mutex_lock(&cache_chain_mutex); 4447 /* Now make sure this entry will be retried */ 4448 m->count = m->size; 4449 return 0; 4450 } 4451 for (i = 0; i < n[1]; i++) { 4452 seq_printf(m, "%s: %lu ", name, n[2*i+3]); 4453 show_symbol(m, n[2*i+2]); 4454 seq_putc(m, '\n'); 4455 } 4456 4457 return 0; 4458} 4459 4460const struct seq_operations slabstats_op = { 4461 .start = leaks_start, 4462 .next = s_next, 4463 .stop = s_stop, 4464 .show = leaks_show, 4465}; 4466#endif 4467#endif 4468 4469/** 4470 * ksize - get the actual amount of memory allocated for a given object 4471 * @objp: Pointer to the object 4472 * 4473 * kmalloc may internally round up allocations and return more memory 4474 * than requested. ksize() can be used to determine the actual amount of 4475 * memory allocated. The caller may use this additional memory, even though 4476 * a smaller amount of memory was initially specified with the kmalloc call. 4477 * The caller must guarantee that objp points to a valid object previously 4478 * allocated with either kmalloc() or kmem_cache_alloc(). The object 4479 * must not be freed during the duration of the call. 4480 */ 4481unsigned int ksize(const void *objp) 4482{ 4483 if (unlikely(objp == NULL)) 4484 return 0; 4485 4486 return obj_size(virt_to_cache(objp)); 4487} 4488