slab.c revision 0718dc2a82c865ca75975acabaf984057f9fd488
1/* 2 * linux/mm/slab.c 3 * Written by Mark Hemment, 1996/97. 4 * (markhe@nextd.demon.co.uk) 5 * 6 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli 7 * 8 * Major cleanup, different bufctl logic, per-cpu arrays 9 * (c) 2000 Manfred Spraul 10 * 11 * Cleanup, make the head arrays unconditional, preparation for NUMA 12 * (c) 2002 Manfred Spraul 13 * 14 * An implementation of the Slab Allocator as described in outline in; 15 * UNIX Internals: The New Frontiers by Uresh Vahalia 16 * Pub: Prentice Hall ISBN 0-13-101908-2 17 * or with a little more detail in; 18 * The Slab Allocator: An Object-Caching Kernel Memory Allocator 19 * Jeff Bonwick (Sun Microsystems). 20 * Presented at: USENIX Summer 1994 Technical Conference 21 * 22 * The memory is organized in caches, one cache for each object type. 23 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct) 24 * Each cache consists out of many slabs (they are small (usually one 25 * page long) and always contiguous), and each slab contains multiple 26 * initialized objects. 27 * 28 * This means, that your constructor is used only for newly allocated 29 * slabs and you must pass objects with the same intializations to 30 * kmem_cache_free. 31 * 32 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM, 33 * normal). If you need a special memory type, then must create a new 34 * cache for that memory type. 35 * 36 * In order to reduce fragmentation, the slabs are sorted in 3 groups: 37 * full slabs with 0 free objects 38 * partial slabs 39 * empty slabs with no allocated objects 40 * 41 * If partial slabs exist, then new allocations come from these slabs, 42 * otherwise from empty slabs or new slabs are allocated. 43 * 44 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache 45 * during kmem_cache_destroy(). The caller must prevent concurrent allocs. 46 * 47 * Each cache has a short per-cpu head array, most allocs 48 * and frees go into that array, and if that array overflows, then 1/2 49 * of the entries in the array are given back into the global cache. 50 * The head array is strictly LIFO and should improve the cache hit rates. 51 * On SMP, it additionally reduces the spinlock operations. 52 * 53 * The c_cpuarray may not be read with enabled local interrupts - 54 * it's changed with a smp_call_function(). 55 * 56 * SMP synchronization: 57 * constructors and destructors are called without any locking. 58 * Several members in struct kmem_cache and struct slab never change, they 59 * are accessed without any locking. 60 * The per-cpu arrays are never accessed from the wrong cpu, no locking, 61 * and local interrupts are disabled so slab code is preempt-safe. 62 * The non-constant members are protected with a per-cache irq spinlock. 63 * 64 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch 65 * in 2000 - many ideas in the current implementation are derived from 66 * his patch. 67 * 68 * Further notes from the original documentation: 69 * 70 * 11 April '97. Started multi-threading - markhe 71 * The global cache-chain is protected by the mutex 'cache_chain_mutex'. 72 * The sem is only needed when accessing/extending the cache-chain, which 73 * can never happen inside an interrupt (kmem_cache_create(), 74 * kmem_cache_shrink() and kmem_cache_reap()). 75 * 76 * At present, each engine can be growing a cache. This should be blocked. 77 * 78 * 15 March 2005. NUMA slab allocator. 79 * Shai Fultheim <shai@scalex86.org>. 80 * Shobhit Dayal <shobhit@calsoftinc.com> 81 * Alok N Kataria <alokk@calsoftinc.com> 82 * Christoph Lameter <christoph@lameter.com> 83 * 84 * Modified the slab allocator to be node aware on NUMA systems. 85 * Each node has its own list of partial, free and full slabs. 86 * All object allocations for a node occur from node specific slab lists. 87 */ 88 89#include <linux/config.h> 90#include <linux/slab.h> 91#include <linux/mm.h> 92#include <linux/swap.h> 93#include <linux/cache.h> 94#include <linux/interrupt.h> 95#include <linux/init.h> 96#include <linux/compiler.h> 97#include <linux/cpuset.h> 98#include <linux/seq_file.h> 99#include <linux/notifier.h> 100#include <linux/kallsyms.h> 101#include <linux/cpu.h> 102#include <linux/sysctl.h> 103#include <linux/module.h> 104#include <linux/rcupdate.h> 105#include <linux/string.h> 106#include <linux/nodemask.h> 107#include <linux/mempolicy.h> 108#include <linux/mutex.h> 109 110#include <asm/uaccess.h> 111#include <asm/cacheflush.h> 112#include <asm/tlbflush.h> 113#include <asm/page.h> 114 115/* 116 * DEBUG - 1 for kmem_cache_create() to honour; SLAB_DEBUG_INITIAL, 117 * SLAB_RED_ZONE & SLAB_POISON. 118 * 0 for faster, smaller code (especially in the critical paths). 119 * 120 * STATS - 1 to collect stats for /proc/slabinfo. 121 * 0 for faster, smaller code (especially in the critical paths). 122 * 123 * FORCED_DEBUG - 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible) 124 */ 125 126#ifdef CONFIG_DEBUG_SLAB 127#define DEBUG 1 128#define STATS 1 129#define FORCED_DEBUG 1 130#else 131#define DEBUG 0 132#define STATS 0 133#define FORCED_DEBUG 0 134#endif 135 136/* Shouldn't this be in a header file somewhere? */ 137#define BYTES_PER_WORD sizeof(void *) 138 139#ifndef cache_line_size 140#define cache_line_size() L1_CACHE_BYTES 141#endif 142 143#ifndef ARCH_KMALLOC_MINALIGN 144/* 145 * Enforce a minimum alignment for the kmalloc caches. 146 * Usually, the kmalloc caches are cache_line_size() aligned, except when 147 * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned. 148 * Some archs want to perform DMA into kmalloc caches and need a guaranteed 149 * alignment larger than BYTES_PER_WORD. ARCH_KMALLOC_MINALIGN allows that. 150 * Note that this flag disables some debug features. 151 */ 152#define ARCH_KMALLOC_MINALIGN 0 153#endif 154 155#ifndef ARCH_SLAB_MINALIGN 156/* 157 * Enforce a minimum alignment for all caches. 158 * Intended for archs that get misalignment faults even for BYTES_PER_WORD 159 * aligned buffers. Includes ARCH_KMALLOC_MINALIGN. 160 * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables 161 * some debug features. 162 */ 163#define ARCH_SLAB_MINALIGN 0 164#endif 165 166#ifndef ARCH_KMALLOC_FLAGS 167#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN 168#endif 169 170/* Legal flag mask for kmem_cache_create(). */ 171#if DEBUG 172# define CREATE_MASK (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \ 173 SLAB_POISON | SLAB_HWCACHE_ALIGN | \ 174 SLAB_CACHE_DMA | \ 175 SLAB_MUST_HWCACHE_ALIGN | SLAB_STORE_USER | \ 176 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ 177 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD) 178#else 179# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ 180 SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN | \ 181 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ 182 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD) 183#endif 184 185/* 186 * kmem_bufctl_t: 187 * 188 * Bufctl's are used for linking objs within a slab 189 * linked offsets. 190 * 191 * This implementation relies on "struct page" for locating the cache & 192 * slab an object belongs to. 193 * This allows the bufctl structure to be small (one int), but limits 194 * the number of objects a slab (not a cache) can contain when off-slab 195 * bufctls are used. The limit is the size of the largest general cache 196 * that does not use off-slab slabs. 197 * For 32bit archs with 4 kB pages, is this 56. 198 * This is not serious, as it is only for large objects, when it is unwise 199 * to have too many per slab. 200 * Note: This limit can be raised by introducing a general cache whose size 201 * is less than 512 (PAGE_SIZE<<3), but greater than 256. 202 */ 203 204typedef unsigned int kmem_bufctl_t; 205#define BUFCTL_END (((kmem_bufctl_t)(~0U))-0) 206#define BUFCTL_FREE (((kmem_bufctl_t)(~0U))-1) 207#define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2) 208#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3) 209 210/* Max number of objs-per-slab for caches which use off-slab slabs. 211 * Needed to avoid a possible looping condition in cache_grow(). 212 */ 213static unsigned long offslab_limit; 214 215/* 216 * struct slab 217 * 218 * Manages the objs in a slab. Placed either at the beginning of mem allocated 219 * for a slab, or allocated from an general cache. 220 * Slabs are chained into three list: fully used, partial, fully free slabs. 221 */ 222struct slab { 223 struct list_head list; 224 unsigned long colouroff; 225 void *s_mem; /* including colour offset */ 226 unsigned int inuse; /* num of objs active in slab */ 227 kmem_bufctl_t free; 228 unsigned short nodeid; 229}; 230 231/* 232 * struct slab_rcu 233 * 234 * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to 235 * arrange for kmem_freepages to be called via RCU. This is useful if 236 * we need to approach a kernel structure obliquely, from its address 237 * obtained without the usual locking. We can lock the structure to 238 * stabilize it and check it's still at the given address, only if we 239 * can be sure that the memory has not been meanwhile reused for some 240 * other kind of object (which our subsystem's lock might corrupt). 241 * 242 * rcu_read_lock before reading the address, then rcu_read_unlock after 243 * taking the spinlock within the structure expected at that address. 244 * 245 * We assume struct slab_rcu can overlay struct slab when destroying. 246 */ 247struct slab_rcu { 248 struct rcu_head head; 249 struct kmem_cache *cachep; 250 void *addr; 251}; 252 253/* 254 * struct array_cache 255 * 256 * Purpose: 257 * - LIFO ordering, to hand out cache-warm objects from _alloc 258 * - reduce the number of linked list operations 259 * - reduce spinlock operations 260 * 261 * The limit is stored in the per-cpu structure to reduce the data cache 262 * footprint. 263 * 264 */ 265struct array_cache { 266 unsigned int avail; 267 unsigned int limit; 268 unsigned int batchcount; 269 unsigned int touched; 270 spinlock_t lock; 271 void *entry[0]; /* 272 * Must have this definition in here for the proper 273 * alignment of array_cache. Also simplifies accessing 274 * the entries. 275 * [0] is for gcc 2.95. It should really be []. 276 */ 277}; 278 279/* 280 * bootstrap: The caches do not work without cpuarrays anymore, but the 281 * cpuarrays are allocated from the generic caches... 282 */ 283#define BOOT_CPUCACHE_ENTRIES 1 284struct arraycache_init { 285 struct array_cache cache; 286 void *entries[BOOT_CPUCACHE_ENTRIES]; 287}; 288 289/* 290 * The slab lists for all objects. 291 */ 292struct kmem_list3 { 293 struct list_head slabs_partial; /* partial list first, better asm code */ 294 struct list_head slabs_full; 295 struct list_head slabs_free; 296 unsigned long free_objects; 297 unsigned int free_limit; 298 unsigned int colour_next; /* Per-node cache coloring */ 299 spinlock_t list_lock; 300 struct array_cache *shared; /* shared per node */ 301 struct array_cache **alien; /* on other nodes */ 302 unsigned long next_reap; /* updated without locking */ 303 int free_touched; /* updated without locking */ 304}; 305 306/* 307 * Need this for bootstrapping a per node allocator. 308 */ 309#define NUM_INIT_LISTS (2 * MAX_NUMNODES + 1) 310struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS]; 311#define CACHE_CACHE 0 312#define SIZE_AC 1 313#define SIZE_L3 (1 + MAX_NUMNODES) 314 315/* 316 * This function must be completely optimized away if a constant is passed to 317 * it. Mostly the same as what is in linux/slab.h except it returns an index. 318 */ 319static __always_inline int index_of(const size_t size) 320{ 321 extern void __bad_size(void); 322 323 if (__builtin_constant_p(size)) { 324 int i = 0; 325 326#define CACHE(x) \ 327 if (size <=x) \ 328 return i; \ 329 else \ 330 i++; 331#include "linux/kmalloc_sizes.h" 332#undef CACHE 333 __bad_size(); 334 } else 335 __bad_size(); 336 return 0; 337} 338 339#define INDEX_AC index_of(sizeof(struct arraycache_init)) 340#define INDEX_L3 index_of(sizeof(struct kmem_list3)) 341 342static void kmem_list3_init(struct kmem_list3 *parent) 343{ 344 INIT_LIST_HEAD(&parent->slabs_full); 345 INIT_LIST_HEAD(&parent->slabs_partial); 346 INIT_LIST_HEAD(&parent->slabs_free); 347 parent->shared = NULL; 348 parent->alien = NULL; 349 parent->colour_next = 0; 350 spin_lock_init(&parent->list_lock); 351 parent->free_objects = 0; 352 parent->free_touched = 0; 353} 354 355#define MAKE_LIST(cachep, listp, slab, nodeid) \ 356 do { \ 357 INIT_LIST_HEAD(listp); \ 358 list_splice(&(cachep->nodelists[nodeid]->slab), listp); \ 359 } while (0) 360 361#define MAKE_ALL_LISTS(cachep, ptr, nodeid) \ 362 do { \ 363 MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \ 364 MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \ 365 MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \ 366 } while (0) 367 368/* 369 * struct kmem_cache 370 * 371 * manages a cache. 372 */ 373 374struct kmem_cache { 375/* 1) per-cpu data, touched during every alloc/free */ 376 struct array_cache *array[NR_CPUS]; 377/* 2) Cache tunables. Protected by cache_chain_mutex */ 378 unsigned int batchcount; 379 unsigned int limit; 380 unsigned int shared; 381 382 unsigned int buffer_size; 383/* 3) touched by every alloc & free from the backend */ 384 struct kmem_list3 *nodelists[MAX_NUMNODES]; 385 386 unsigned int flags; /* constant flags */ 387 unsigned int num; /* # of objs per slab */ 388 389/* 4) cache_grow/shrink */ 390 /* order of pgs per slab (2^n) */ 391 unsigned int gfporder; 392 393 /* force GFP flags, e.g. GFP_DMA */ 394 gfp_t gfpflags; 395 396 size_t colour; /* cache colouring range */ 397 unsigned int colour_off; /* colour offset */ 398 struct kmem_cache *slabp_cache; 399 unsigned int slab_size; 400 unsigned int dflags; /* dynamic flags */ 401 402 /* constructor func */ 403 void (*ctor) (void *, struct kmem_cache *, unsigned long); 404 405 /* de-constructor func */ 406 void (*dtor) (void *, struct kmem_cache *, unsigned long); 407 408/* 5) cache creation/removal */ 409 const char *name; 410 struct list_head next; 411 412/* 6) statistics */ 413#if STATS 414 unsigned long num_active; 415 unsigned long num_allocations; 416 unsigned long high_mark; 417 unsigned long grown; 418 unsigned long reaped; 419 unsigned long errors; 420 unsigned long max_freeable; 421 unsigned long node_allocs; 422 unsigned long node_frees; 423 atomic_t allochit; 424 atomic_t allocmiss; 425 atomic_t freehit; 426 atomic_t freemiss; 427#endif 428#if DEBUG 429 /* 430 * If debugging is enabled, then the allocator can add additional 431 * fields and/or padding to every object. buffer_size contains the total 432 * object size including these internal fields, the following two 433 * variables contain the offset to the user object and its size. 434 */ 435 int obj_offset; 436 int obj_size; 437#endif 438}; 439 440#define CFLGS_OFF_SLAB (0x80000000UL) 441#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB) 442 443#define BATCHREFILL_LIMIT 16 444/* 445 * Optimization question: fewer reaps means less probability for unnessary 446 * cpucache drain/refill cycles. 447 * 448 * OTOH the cpuarrays can contain lots of objects, 449 * which could lock up otherwise freeable slabs. 450 */ 451#define REAPTIMEOUT_CPUC (2*HZ) 452#define REAPTIMEOUT_LIST3 (4*HZ) 453 454#if STATS 455#define STATS_INC_ACTIVE(x) ((x)->num_active++) 456#define STATS_DEC_ACTIVE(x) ((x)->num_active--) 457#define STATS_INC_ALLOCED(x) ((x)->num_allocations++) 458#define STATS_INC_GROWN(x) ((x)->grown++) 459#define STATS_INC_REAPED(x) ((x)->reaped++) 460#define STATS_SET_HIGH(x) \ 461 do { \ 462 if ((x)->num_active > (x)->high_mark) \ 463 (x)->high_mark = (x)->num_active; \ 464 } while (0) 465#define STATS_INC_ERR(x) ((x)->errors++) 466#define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++) 467#define STATS_INC_NODEFREES(x) ((x)->node_frees++) 468#define STATS_SET_FREEABLE(x, i) \ 469 do { \ 470 if ((x)->max_freeable < i) \ 471 (x)->max_freeable = i; \ 472 } while (0) 473#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit) 474#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss) 475#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit) 476#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss) 477#else 478#define STATS_INC_ACTIVE(x) do { } while (0) 479#define STATS_DEC_ACTIVE(x) do { } while (0) 480#define STATS_INC_ALLOCED(x) do { } while (0) 481#define STATS_INC_GROWN(x) do { } while (0) 482#define STATS_INC_REAPED(x) do { } while (0) 483#define STATS_SET_HIGH(x) do { } while (0) 484#define STATS_INC_ERR(x) do { } while (0) 485#define STATS_INC_NODEALLOCS(x) do { } while (0) 486#define STATS_INC_NODEFREES(x) do { } while (0) 487#define STATS_SET_FREEABLE(x, i) do { } while (0) 488#define STATS_INC_ALLOCHIT(x) do { } while (0) 489#define STATS_INC_ALLOCMISS(x) do { } while (0) 490#define STATS_INC_FREEHIT(x) do { } while (0) 491#define STATS_INC_FREEMISS(x) do { } while (0) 492#endif 493 494#if DEBUG 495/* 496 * Magic nums for obj red zoning. 497 * Placed in the first word before and the first word after an obj. 498 */ 499#define RED_INACTIVE 0x5A2CF071UL /* when obj is inactive */ 500#define RED_ACTIVE 0x170FC2A5UL /* when obj is active */ 501 502/* ...and for poisoning */ 503#define POISON_INUSE 0x5a /* for use-uninitialised poisoning */ 504#define POISON_FREE 0x6b /* for use-after-free poisoning */ 505#define POISON_END 0xa5 /* end-byte of poisoning */ 506 507/* 508 * memory layout of objects: 509 * 0 : objp 510 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that 511 * the end of an object is aligned with the end of the real 512 * allocation. Catches writes behind the end of the allocation. 513 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1: 514 * redzone word. 515 * cachep->obj_offset: The real object. 516 * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long] 517 * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address 518 * [BYTES_PER_WORD long] 519 */ 520static int obj_offset(struct kmem_cache *cachep) 521{ 522 return cachep->obj_offset; 523} 524 525static int obj_size(struct kmem_cache *cachep) 526{ 527 return cachep->obj_size; 528} 529 530static unsigned long *dbg_redzone1(struct kmem_cache *cachep, void *objp) 531{ 532 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); 533 return (unsigned long*) (objp+obj_offset(cachep)-BYTES_PER_WORD); 534} 535 536static unsigned long *dbg_redzone2(struct kmem_cache *cachep, void *objp) 537{ 538 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); 539 if (cachep->flags & SLAB_STORE_USER) 540 return (unsigned long *)(objp + cachep->buffer_size - 541 2 * BYTES_PER_WORD); 542 return (unsigned long *)(objp + cachep->buffer_size - BYTES_PER_WORD); 543} 544 545static void **dbg_userword(struct kmem_cache *cachep, void *objp) 546{ 547 BUG_ON(!(cachep->flags & SLAB_STORE_USER)); 548 return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD); 549} 550 551#else 552 553#define obj_offset(x) 0 554#define obj_size(cachep) (cachep->buffer_size) 555#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long *)NULL;}) 556#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long *)NULL;}) 557#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;}) 558 559#endif 560 561/* 562 * Maximum size of an obj (in 2^order pages) and absolute limit for the gfp 563 * order. 564 */ 565#if defined(CONFIG_LARGE_ALLOCS) 566#define MAX_OBJ_ORDER 13 /* up to 32Mb */ 567#define MAX_GFP_ORDER 13 /* up to 32Mb */ 568#elif defined(CONFIG_MMU) 569#define MAX_OBJ_ORDER 5 /* 32 pages */ 570#define MAX_GFP_ORDER 5 /* 32 pages */ 571#else 572#define MAX_OBJ_ORDER 8 /* up to 1Mb */ 573#define MAX_GFP_ORDER 8 /* up to 1Mb */ 574#endif 575 576/* 577 * Do not go above this order unless 0 objects fit into the slab. 578 */ 579#define BREAK_GFP_ORDER_HI 1 580#define BREAK_GFP_ORDER_LO 0 581static int slab_break_gfp_order = BREAK_GFP_ORDER_LO; 582 583/* 584 * Functions for storing/retrieving the cachep and or slab from the page 585 * allocator. These are used to find the slab an obj belongs to. With kfree(), 586 * these are used to find the cache which an obj belongs to. 587 */ 588static inline void page_set_cache(struct page *page, struct kmem_cache *cache) 589{ 590 page->lru.next = (struct list_head *)cache; 591} 592 593static inline struct kmem_cache *page_get_cache(struct page *page) 594{ 595 if (unlikely(PageCompound(page))) 596 page = (struct page *)page_private(page); 597 return (struct kmem_cache *)page->lru.next; 598} 599 600static inline void page_set_slab(struct page *page, struct slab *slab) 601{ 602 page->lru.prev = (struct list_head *)slab; 603} 604 605static inline struct slab *page_get_slab(struct page *page) 606{ 607 if (unlikely(PageCompound(page))) 608 page = (struct page *)page_private(page); 609 return (struct slab *)page->lru.prev; 610} 611 612static inline struct kmem_cache *virt_to_cache(const void *obj) 613{ 614 struct page *page = virt_to_page(obj); 615 return page_get_cache(page); 616} 617 618static inline struct slab *virt_to_slab(const void *obj) 619{ 620 struct page *page = virt_to_page(obj); 621 return page_get_slab(page); 622} 623 624static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab, 625 unsigned int idx) 626{ 627 return slab->s_mem + cache->buffer_size * idx; 628} 629 630static inline unsigned int obj_to_index(struct kmem_cache *cache, 631 struct slab *slab, void *obj) 632{ 633 return (unsigned)(obj - slab->s_mem) / cache->buffer_size; 634} 635 636/* 637 * These are the default caches for kmalloc. Custom caches can have other sizes. 638 */ 639struct cache_sizes malloc_sizes[] = { 640#define CACHE(x) { .cs_size = (x) }, 641#include <linux/kmalloc_sizes.h> 642 CACHE(ULONG_MAX) 643#undef CACHE 644}; 645EXPORT_SYMBOL(malloc_sizes); 646 647/* Must match cache_sizes above. Out of line to keep cache footprint low. */ 648struct cache_names { 649 char *name; 650 char *name_dma; 651}; 652 653static struct cache_names __initdata cache_names[] = { 654#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" }, 655#include <linux/kmalloc_sizes.h> 656 {NULL,} 657#undef CACHE 658}; 659 660static struct arraycache_init initarray_cache __initdata = 661 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; 662static struct arraycache_init initarray_generic = 663 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; 664 665/* internal cache of cache description objs */ 666static struct kmem_cache cache_cache = { 667 .batchcount = 1, 668 .limit = BOOT_CPUCACHE_ENTRIES, 669 .shared = 1, 670 .buffer_size = sizeof(struct kmem_cache), 671 .name = "kmem_cache", 672#if DEBUG 673 .obj_size = sizeof(struct kmem_cache), 674#endif 675}; 676 677/* Guard access to the cache-chain. */ 678static DEFINE_MUTEX(cache_chain_mutex); 679static struct list_head cache_chain; 680 681/* 682 * vm_enough_memory() looks at this to determine how many slab-allocated pages 683 * are possibly freeable under pressure 684 * 685 * SLAB_RECLAIM_ACCOUNT turns this on per-slab 686 */ 687atomic_t slab_reclaim_pages; 688 689/* 690 * chicken and egg problem: delay the per-cpu array allocation 691 * until the general caches are up. 692 */ 693static enum { 694 NONE, 695 PARTIAL_AC, 696 PARTIAL_L3, 697 FULL 698} g_cpucache_up; 699 700static DEFINE_PER_CPU(struct work_struct, reap_work); 701 702static void free_block(struct kmem_cache *cachep, void **objpp, int len, 703 int node); 704static void enable_cpucache(struct kmem_cache *cachep); 705static void cache_reap(void *unused); 706static int __node_shrink(struct kmem_cache *cachep, int node); 707 708static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) 709{ 710 return cachep->array[smp_processor_id()]; 711} 712 713static inline struct kmem_cache *__find_general_cachep(size_t size, 714 gfp_t gfpflags) 715{ 716 struct cache_sizes *csizep = malloc_sizes; 717 718#if DEBUG 719 /* This happens if someone tries to call 720 * kmem_cache_create(), or __kmalloc(), before 721 * the generic caches are initialized. 722 */ 723 BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL); 724#endif 725 while (size > csizep->cs_size) 726 csizep++; 727 728 /* 729 * Really subtle: The last entry with cs->cs_size==ULONG_MAX 730 * has cs_{dma,}cachep==NULL. Thus no special case 731 * for large kmalloc calls required. 732 */ 733 if (unlikely(gfpflags & GFP_DMA)) 734 return csizep->cs_dmacachep; 735 return csizep->cs_cachep; 736} 737 738struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags) 739{ 740 return __find_general_cachep(size, gfpflags); 741} 742EXPORT_SYMBOL(kmem_find_general_cachep); 743 744static size_t slab_mgmt_size(size_t nr_objs, size_t align) 745{ 746 return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align); 747} 748 749/* 750 * Calculate the number of objects and left-over bytes for a given buffer size. 751 */ 752static void cache_estimate(unsigned long gfporder, size_t buffer_size, 753 size_t align, int flags, size_t *left_over, 754 unsigned int *num) 755{ 756 int nr_objs; 757 size_t mgmt_size; 758 size_t slab_size = PAGE_SIZE << gfporder; 759 760 /* 761 * The slab management structure can be either off the slab or 762 * on it. For the latter case, the memory allocated for a 763 * slab is used for: 764 * 765 * - The struct slab 766 * - One kmem_bufctl_t for each object 767 * - Padding to respect alignment of @align 768 * - @buffer_size bytes for each object 769 * 770 * If the slab management structure is off the slab, then the 771 * alignment will already be calculated into the size. Because 772 * the slabs are all pages aligned, the objects will be at the 773 * correct alignment when allocated. 774 */ 775 if (flags & CFLGS_OFF_SLAB) { 776 mgmt_size = 0; 777 nr_objs = slab_size / buffer_size; 778 779 if (nr_objs > SLAB_LIMIT) 780 nr_objs = SLAB_LIMIT; 781 } else { 782 /* 783 * Ignore padding for the initial guess. The padding 784 * is at most @align-1 bytes, and @buffer_size is at 785 * least @align. In the worst case, this result will 786 * be one greater than the number of objects that fit 787 * into the memory allocation when taking the padding 788 * into account. 789 */ 790 nr_objs = (slab_size - sizeof(struct slab)) / 791 (buffer_size + sizeof(kmem_bufctl_t)); 792 793 /* 794 * This calculated number will be either the right 795 * amount, or one greater than what we want. 796 */ 797 if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size 798 > slab_size) 799 nr_objs--; 800 801 if (nr_objs > SLAB_LIMIT) 802 nr_objs = SLAB_LIMIT; 803 804 mgmt_size = slab_mgmt_size(nr_objs, align); 805 } 806 *num = nr_objs; 807 *left_over = slab_size - nr_objs*buffer_size - mgmt_size; 808} 809 810#define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg) 811 812static void __slab_error(const char *function, struct kmem_cache *cachep, 813 char *msg) 814{ 815 printk(KERN_ERR "slab error in %s(): cache `%s': %s\n", 816 function, cachep->name, msg); 817 dump_stack(); 818} 819 820#ifdef CONFIG_NUMA 821/* 822 * Special reaping functions for NUMA systems called from cache_reap(). 823 * These take care of doing round robin flushing of alien caches (containing 824 * objects freed on different nodes from which they were allocated) and the 825 * flushing of remote pcps by calling drain_node_pages. 826 */ 827static DEFINE_PER_CPU(unsigned long, reap_node); 828 829static void init_reap_node(int cpu) 830{ 831 int node; 832 833 node = next_node(cpu_to_node(cpu), node_online_map); 834 if (node == MAX_NUMNODES) 835 node = first_node(node_online_map); 836 837 __get_cpu_var(reap_node) = node; 838} 839 840static void next_reap_node(void) 841{ 842 int node = __get_cpu_var(reap_node); 843 844 /* 845 * Also drain per cpu pages on remote zones 846 */ 847 if (node != numa_node_id()) 848 drain_node_pages(node); 849 850 node = next_node(node, node_online_map); 851 if (unlikely(node >= MAX_NUMNODES)) 852 node = first_node(node_online_map); 853 __get_cpu_var(reap_node) = node; 854} 855 856#else 857#define init_reap_node(cpu) do { } while (0) 858#define next_reap_node(void) do { } while (0) 859#endif 860 861/* 862 * Initiate the reap timer running on the target CPU. We run at around 1 to 2Hz 863 * via the workqueue/eventd. 864 * Add the CPU number into the expiration time to minimize the possibility of 865 * the CPUs getting into lockstep and contending for the global cache chain 866 * lock. 867 */ 868static void __devinit start_cpu_timer(int cpu) 869{ 870 struct work_struct *reap_work = &per_cpu(reap_work, cpu); 871 872 /* 873 * When this gets called from do_initcalls via cpucache_init(), 874 * init_workqueues() has already run, so keventd will be setup 875 * at that time. 876 */ 877 if (keventd_up() && reap_work->func == NULL) { 878 init_reap_node(cpu); 879 INIT_WORK(reap_work, cache_reap, NULL); 880 schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu); 881 } 882} 883 884static struct array_cache *alloc_arraycache(int node, int entries, 885 int batchcount) 886{ 887 int memsize = sizeof(void *) * entries + sizeof(struct array_cache); 888 struct array_cache *nc = NULL; 889 890 nc = kmalloc_node(memsize, GFP_KERNEL, node); 891 if (nc) { 892 nc->avail = 0; 893 nc->limit = entries; 894 nc->batchcount = batchcount; 895 nc->touched = 0; 896 spin_lock_init(&nc->lock); 897 } 898 return nc; 899} 900 901/* 902 * Transfer objects in one arraycache to another. 903 * Locking must be handled by the caller. 904 * 905 * Return the number of entries transferred. 906 */ 907static int transfer_objects(struct array_cache *to, 908 struct array_cache *from, unsigned int max) 909{ 910 /* Figure out how many entries to transfer */ 911 int nr = min(min(from->avail, max), to->limit - to->avail); 912 913 if (!nr) 914 return 0; 915 916 memcpy(to->entry + to->avail, from->entry + from->avail -nr, 917 sizeof(void *) *nr); 918 919 from->avail -= nr; 920 to->avail += nr; 921 to->touched = 1; 922 return nr; 923} 924 925#ifdef CONFIG_NUMA 926static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int); 927static void *alternate_node_alloc(struct kmem_cache *, gfp_t); 928 929static struct array_cache **alloc_alien_cache(int node, int limit) 930{ 931 struct array_cache **ac_ptr; 932 int memsize = sizeof(void *) * MAX_NUMNODES; 933 int i; 934 935 if (limit > 1) 936 limit = 12; 937 ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node); 938 if (ac_ptr) { 939 for_each_node(i) { 940 if (i == node || !node_online(i)) { 941 ac_ptr[i] = NULL; 942 continue; 943 } 944 ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d); 945 if (!ac_ptr[i]) { 946 for (i--; i <= 0; i--) 947 kfree(ac_ptr[i]); 948 kfree(ac_ptr); 949 return NULL; 950 } 951 } 952 } 953 return ac_ptr; 954} 955 956static void free_alien_cache(struct array_cache **ac_ptr) 957{ 958 int i; 959 960 if (!ac_ptr) 961 return; 962 for_each_node(i) 963 kfree(ac_ptr[i]); 964 kfree(ac_ptr); 965} 966 967static void __drain_alien_cache(struct kmem_cache *cachep, 968 struct array_cache *ac, int node) 969{ 970 struct kmem_list3 *rl3 = cachep->nodelists[node]; 971 972 if (ac->avail) { 973 spin_lock(&rl3->list_lock); 974 /* 975 * Stuff objects into the remote nodes shared array first. 976 * That way we could avoid the overhead of putting the objects 977 * into the free lists and getting them back later. 978 */ 979 transfer_objects(rl3->shared, ac, ac->limit); 980 981 free_block(cachep, ac->entry, ac->avail, node); 982 ac->avail = 0; 983 spin_unlock(&rl3->list_lock); 984 } 985} 986 987/* 988 * Called from cache_reap() to regularly drain alien caches round robin. 989 */ 990static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3) 991{ 992 int node = __get_cpu_var(reap_node); 993 994 if (l3->alien) { 995 struct array_cache *ac = l3->alien[node]; 996 997 if (ac && ac->avail && spin_trylock_irq(&ac->lock)) { 998 __drain_alien_cache(cachep, ac, node); 999 spin_unlock_irq(&ac->lock); 1000 } 1001 } 1002} 1003 1004static void drain_alien_cache(struct kmem_cache *cachep, 1005 struct array_cache **alien) 1006{ 1007 int i = 0; 1008 struct array_cache *ac; 1009 unsigned long flags; 1010 1011 for_each_online_node(i) { 1012 ac = alien[i]; 1013 if (ac) { 1014 spin_lock_irqsave(&ac->lock, flags); 1015 __drain_alien_cache(cachep, ac, i); 1016 spin_unlock_irqrestore(&ac->lock, flags); 1017 } 1018 } 1019} 1020#else 1021 1022#define drain_alien_cache(cachep, alien) do { } while (0) 1023#define reap_alien(cachep, l3) do { } while (0) 1024 1025static inline struct array_cache **alloc_alien_cache(int node, int limit) 1026{ 1027 return (struct array_cache **) 0x01020304ul; 1028} 1029 1030static inline void free_alien_cache(struct array_cache **ac_ptr) 1031{ 1032} 1033 1034#endif 1035 1036static int __devinit cpuup_callback(struct notifier_block *nfb, 1037 unsigned long action, void *hcpu) 1038{ 1039 long cpu = (long)hcpu; 1040 struct kmem_cache *cachep; 1041 struct kmem_list3 *l3 = NULL; 1042 int node = cpu_to_node(cpu); 1043 int memsize = sizeof(struct kmem_list3); 1044 1045 switch (action) { 1046 case CPU_UP_PREPARE: 1047 mutex_lock(&cache_chain_mutex); 1048 /* 1049 * We need to do this right in the beginning since 1050 * alloc_arraycache's are going to use this list. 1051 * kmalloc_node allows us to add the slab to the right 1052 * kmem_list3 and not this cpu's kmem_list3 1053 */ 1054 1055 list_for_each_entry(cachep, &cache_chain, next) { 1056 /* 1057 * Set up the size64 kmemlist for cpu before we can 1058 * begin anything. Make sure some other cpu on this 1059 * node has not already allocated this 1060 */ 1061 if (!cachep->nodelists[node]) { 1062 l3 = kmalloc_node(memsize, GFP_KERNEL, node); 1063 if (!l3) 1064 goto bad; 1065 kmem_list3_init(l3); 1066 l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + 1067 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 1068 1069 /* 1070 * The l3s don't come and go as CPUs come and 1071 * go. cache_chain_mutex is sufficient 1072 * protection here. 1073 */ 1074 cachep->nodelists[node] = l3; 1075 } 1076 1077 spin_lock_irq(&cachep->nodelists[node]->list_lock); 1078 cachep->nodelists[node]->free_limit = 1079 (1 + nr_cpus_node(node)) * 1080 cachep->batchcount + cachep->num; 1081 spin_unlock_irq(&cachep->nodelists[node]->list_lock); 1082 } 1083 1084 /* 1085 * Now we can go ahead with allocating the shared arrays and 1086 * array caches 1087 */ 1088 list_for_each_entry(cachep, &cache_chain, next) { 1089 struct array_cache *nc; 1090 struct array_cache *shared; 1091 struct array_cache **alien; 1092 1093 nc = alloc_arraycache(node, cachep->limit, 1094 cachep->batchcount); 1095 if (!nc) 1096 goto bad; 1097 shared = alloc_arraycache(node, 1098 cachep->shared * cachep->batchcount, 1099 0xbaadf00d); 1100 if (!shared) 1101 goto bad; 1102 1103 alien = alloc_alien_cache(node, cachep->limit); 1104 if (!alien) 1105 goto bad; 1106 cachep->array[cpu] = nc; 1107 l3 = cachep->nodelists[node]; 1108 BUG_ON(!l3); 1109 1110 spin_lock_irq(&l3->list_lock); 1111 if (!l3->shared) { 1112 /* 1113 * We are serialised from CPU_DEAD or 1114 * CPU_UP_CANCELLED by the cpucontrol lock 1115 */ 1116 l3->shared = shared; 1117 shared = NULL; 1118 } 1119#ifdef CONFIG_NUMA 1120 if (!l3->alien) { 1121 l3->alien = alien; 1122 alien = NULL; 1123 } 1124#endif 1125 spin_unlock_irq(&l3->list_lock); 1126 kfree(shared); 1127 free_alien_cache(alien); 1128 } 1129 mutex_unlock(&cache_chain_mutex); 1130 break; 1131 case CPU_ONLINE: 1132 start_cpu_timer(cpu); 1133 break; 1134#ifdef CONFIG_HOTPLUG_CPU 1135 case CPU_DEAD: 1136 /* 1137 * Even if all the cpus of a node are down, we don't free the 1138 * kmem_list3 of any cache. This to avoid a race between 1139 * cpu_down, and a kmalloc allocation from another cpu for 1140 * memory from the node of the cpu going down. The list3 1141 * structure is usually allocated from kmem_cache_create() and 1142 * gets destroyed at kmem_cache_destroy(). 1143 */ 1144 /* fall thru */ 1145 case CPU_UP_CANCELED: 1146 mutex_lock(&cache_chain_mutex); 1147 list_for_each_entry(cachep, &cache_chain, next) { 1148 struct array_cache *nc; 1149 struct array_cache *shared; 1150 struct array_cache **alien; 1151 cpumask_t mask; 1152 1153 mask = node_to_cpumask(node); 1154 /* cpu is dead; no one can alloc from it. */ 1155 nc = cachep->array[cpu]; 1156 cachep->array[cpu] = NULL; 1157 l3 = cachep->nodelists[node]; 1158 1159 if (!l3) 1160 goto free_array_cache; 1161 1162 spin_lock_irq(&l3->list_lock); 1163 1164 /* Free limit for this kmem_list3 */ 1165 l3->free_limit -= cachep->batchcount; 1166 if (nc) 1167 free_block(cachep, nc->entry, nc->avail, node); 1168 1169 if (!cpus_empty(mask)) { 1170 spin_unlock_irq(&l3->list_lock); 1171 goto free_array_cache; 1172 } 1173 1174 shared = l3->shared; 1175 if (shared) { 1176 free_block(cachep, l3->shared->entry, 1177 l3->shared->avail, node); 1178 l3->shared = NULL; 1179 } 1180 1181 alien = l3->alien; 1182 l3->alien = NULL; 1183 1184 spin_unlock_irq(&l3->list_lock); 1185 1186 kfree(shared); 1187 if (alien) { 1188 drain_alien_cache(cachep, alien); 1189 free_alien_cache(alien); 1190 } 1191free_array_cache: 1192 kfree(nc); 1193 } 1194 /* 1195 * In the previous loop, all the objects were freed to 1196 * the respective cache's slabs, now we can go ahead and 1197 * shrink each nodelist to its limit. 1198 */ 1199 list_for_each_entry(cachep, &cache_chain, next) { 1200 l3 = cachep->nodelists[node]; 1201 if (!l3) 1202 continue; 1203 spin_lock_irq(&l3->list_lock); 1204 /* free slabs belonging to this node */ 1205 __node_shrink(cachep, node); 1206 spin_unlock_irq(&l3->list_lock); 1207 } 1208 mutex_unlock(&cache_chain_mutex); 1209 break; 1210#endif 1211 } 1212 return NOTIFY_OK; 1213bad: 1214 mutex_unlock(&cache_chain_mutex); 1215 return NOTIFY_BAD; 1216} 1217 1218static struct notifier_block cpucache_notifier = { &cpuup_callback, NULL, 0 }; 1219 1220/* 1221 * swap the static kmem_list3 with kmalloced memory 1222 */ 1223static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list, 1224 int nodeid) 1225{ 1226 struct kmem_list3 *ptr; 1227 1228 BUG_ON(cachep->nodelists[nodeid] != list); 1229 ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, nodeid); 1230 BUG_ON(!ptr); 1231 1232 local_irq_disable(); 1233 memcpy(ptr, list, sizeof(struct kmem_list3)); 1234 MAKE_ALL_LISTS(cachep, ptr, nodeid); 1235 cachep->nodelists[nodeid] = ptr; 1236 local_irq_enable(); 1237} 1238 1239/* 1240 * Initialisation. Called after the page allocator have been initialised and 1241 * before smp_init(). 1242 */ 1243void __init kmem_cache_init(void) 1244{ 1245 size_t left_over; 1246 struct cache_sizes *sizes; 1247 struct cache_names *names; 1248 int i; 1249 int order; 1250 1251 for (i = 0; i < NUM_INIT_LISTS; i++) { 1252 kmem_list3_init(&initkmem_list3[i]); 1253 if (i < MAX_NUMNODES) 1254 cache_cache.nodelists[i] = NULL; 1255 } 1256 1257 /* 1258 * Fragmentation resistance on low memory - only use bigger 1259 * page orders on machines with more than 32MB of memory. 1260 */ 1261 if (num_physpages > (32 << 20) >> PAGE_SHIFT) 1262 slab_break_gfp_order = BREAK_GFP_ORDER_HI; 1263 1264 /* Bootstrap is tricky, because several objects are allocated 1265 * from caches that do not exist yet: 1266 * 1) initialize the cache_cache cache: it contains the struct 1267 * kmem_cache structures of all caches, except cache_cache itself: 1268 * cache_cache is statically allocated. 1269 * Initially an __init data area is used for the head array and the 1270 * kmem_list3 structures, it's replaced with a kmalloc allocated 1271 * array at the end of the bootstrap. 1272 * 2) Create the first kmalloc cache. 1273 * The struct kmem_cache for the new cache is allocated normally. 1274 * An __init data area is used for the head array. 1275 * 3) Create the remaining kmalloc caches, with minimally sized 1276 * head arrays. 1277 * 4) Replace the __init data head arrays for cache_cache and the first 1278 * kmalloc cache with kmalloc allocated arrays. 1279 * 5) Replace the __init data for kmem_list3 for cache_cache and 1280 * the other cache's with kmalloc allocated memory. 1281 * 6) Resize the head arrays of the kmalloc caches to their final sizes. 1282 */ 1283 1284 /* 1) create the cache_cache */ 1285 INIT_LIST_HEAD(&cache_chain); 1286 list_add(&cache_cache.next, &cache_chain); 1287 cache_cache.colour_off = cache_line_size(); 1288 cache_cache.array[smp_processor_id()] = &initarray_cache.cache; 1289 cache_cache.nodelists[numa_node_id()] = &initkmem_list3[CACHE_CACHE]; 1290 1291 cache_cache.buffer_size = ALIGN(cache_cache.buffer_size, 1292 cache_line_size()); 1293 1294 for (order = 0; order < MAX_ORDER; order++) { 1295 cache_estimate(order, cache_cache.buffer_size, 1296 cache_line_size(), 0, &left_over, &cache_cache.num); 1297 if (cache_cache.num) 1298 break; 1299 } 1300 if (!cache_cache.num) 1301 BUG(); 1302 cache_cache.gfporder = order; 1303 cache_cache.colour = left_over / cache_cache.colour_off; 1304 cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + 1305 sizeof(struct slab), cache_line_size()); 1306 1307 /* 2+3) create the kmalloc caches */ 1308 sizes = malloc_sizes; 1309 names = cache_names; 1310 1311 /* 1312 * Initialize the caches that provide memory for the array cache and the 1313 * kmem_list3 structures first. Without this, further allocations will 1314 * bug. 1315 */ 1316 1317 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name, 1318 sizes[INDEX_AC].cs_size, 1319 ARCH_KMALLOC_MINALIGN, 1320 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1321 NULL, NULL); 1322 1323 if (INDEX_AC != INDEX_L3) { 1324 sizes[INDEX_L3].cs_cachep = 1325 kmem_cache_create(names[INDEX_L3].name, 1326 sizes[INDEX_L3].cs_size, 1327 ARCH_KMALLOC_MINALIGN, 1328 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1329 NULL, NULL); 1330 } 1331 1332 while (sizes->cs_size != ULONG_MAX) { 1333 /* 1334 * For performance, all the general caches are L1 aligned. 1335 * This should be particularly beneficial on SMP boxes, as it 1336 * eliminates "false sharing". 1337 * Note for systems short on memory removing the alignment will 1338 * allow tighter packing of the smaller caches. 1339 */ 1340 if (!sizes->cs_cachep) { 1341 sizes->cs_cachep = kmem_cache_create(names->name, 1342 sizes->cs_size, 1343 ARCH_KMALLOC_MINALIGN, 1344 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1345 NULL, NULL); 1346 } 1347 1348 /* Inc off-slab bufctl limit until the ceiling is hit. */ 1349 if (!(OFF_SLAB(sizes->cs_cachep))) { 1350 offslab_limit = sizes->cs_size - sizeof(struct slab); 1351 offslab_limit /= sizeof(kmem_bufctl_t); 1352 } 1353 1354 sizes->cs_dmacachep = kmem_cache_create(names->name_dma, 1355 sizes->cs_size, 1356 ARCH_KMALLOC_MINALIGN, 1357 ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| 1358 SLAB_PANIC, 1359 NULL, NULL); 1360 sizes++; 1361 names++; 1362 } 1363 /* 4) Replace the bootstrap head arrays */ 1364 { 1365 void *ptr; 1366 1367 ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); 1368 1369 local_irq_disable(); 1370 BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache); 1371 memcpy(ptr, cpu_cache_get(&cache_cache), 1372 sizeof(struct arraycache_init)); 1373 cache_cache.array[smp_processor_id()] = ptr; 1374 local_irq_enable(); 1375 1376 ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); 1377 1378 local_irq_disable(); 1379 BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep) 1380 != &initarray_generic.cache); 1381 memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep), 1382 sizeof(struct arraycache_init)); 1383 malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] = 1384 ptr; 1385 local_irq_enable(); 1386 } 1387 /* 5) Replace the bootstrap kmem_list3's */ 1388 { 1389 int node; 1390 /* Replace the static kmem_list3 structures for the boot cpu */ 1391 init_list(&cache_cache, &initkmem_list3[CACHE_CACHE], 1392 numa_node_id()); 1393 1394 for_each_online_node(node) { 1395 init_list(malloc_sizes[INDEX_AC].cs_cachep, 1396 &initkmem_list3[SIZE_AC + node], node); 1397 1398 if (INDEX_AC != INDEX_L3) { 1399 init_list(malloc_sizes[INDEX_L3].cs_cachep, 1400 &initkmem_list3[SIZE_L3 + node], 1401 node); 1402 } 1403 } 1404 } 1405 1406 /* 6) resize the head arrays to their final sizes */ 1407 { 1408 struct kmem_cache *cachep; 1409 mutex_lock(&cache_chain_mutex); 1410 list_for_each_entry(cachep, &cache_chain, next) 1411 enable_cpucache(cachep); 1412 mutex_unlock(&cache_chain_mutex); 1413 } 1414 1415 /* Done! */ 1416 g_cpucache_up = FULL; 1417 1418 /* 1419 * Register a cpu startup notifier callback that initializes 1420 * cpu_cache_get for all new cpus 1421 */ 1422 register_cpu_notifier(&cpucache_notifier); 1423 1424 /* 1425 * The reap timers are started later, with a module init call: That part 1426 * of the kernel is not yet operational. 1427 */ 1428} 1429 1430static int __init cpucache_init(void) 1431{ 1432 int cpu; 1433 1434 /* 1435 * Register the timers that return unneeded pages to the page allocator 1436 */ 1437 for_each_online_cpu(cpu) 1438 start_cpu_timer(cpu); 1439 return 0; 1440} 1441__initcall(cpucache_init); 1442 1443/* 1444 * Interface to system's page allocator. No need to hold the cache-lock. 1445 * 1446 * If we requested dmaable memory, we will get it. Even if we 1447 * did not request dmaable memory, we might get it, but that 1448 * would be relatively rare and ignorable. 1449 */ 1450static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) 1451{ 1452 struct page *page; 1453 void *addr; 1454 int i; 1455 1456 flags |= cachep->gfpflags; 1457 page = alloc_pages_node(nodeid, flags, cachep->gfporder); 1458 if (!page) 1459 return NULL; 1460 addr = page_address(page); 1461 1462 i = (1 << cachep->gfporder); 1463 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 1464 atomic_add(i, &slab_reclaim_pages); 1465 add_page_state(nr_slab, i); 1466 while (i--) { 1467 __SetPageSlab(page); 1468 page++; 1469 } 1470 return addr; 1471} 1472 1473/* 1474 * Interface to system's page release. 1475 */ 1476static void kmem_freepages(struct kmem_cache *cachep, void *addr) 1477{ 1478 unsigned long i = (1 << cachep->gfporder); 1479 struct page *page = virt_to_page(addr); 1480 const unsigned long nr_freed = i; 1481 1482 while (i--) { 1483 BUG_ON(!PageSlab(page)); 1484 __ClearPageSlab(page); 1485 page++; 1486 } 1487 sub_page_state(nr_slab, nr_freed); 1488 if (current->reclaim_state) 1489 current->reclaim_state->reclaimed_slab += nr_freed; 1490 free_pages((unsigned long)addr, cachep->gfporder); 1491 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 1492 atomic_sub(1 << cachep->gfporder, &slab_reclaim_pages); 1493} 1494 1495static void kmem_rcu_free(struct rcu_head *head) 1496{ 1497 struct slab_rcu *slab_rcu = (struct slab_rcu *)head; 1498 struct kmem_cache *cachep = slab_rcu->cachep; 1499 1500 kmem_freepages(cachep, slab_rcu->addr); 1501 if (OFF_SLAB(cachep)) 1502 kmem_cache_free(cachep->slabp_cache, slab_rcu); 1503} 1504 1505#if DEBUG 1506 1507#ifdef CONFIG_DEBUG_PAGEALLOC 1508static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr, 1509 unsigned long caller) 1510{ 1511 int size = obj_size(cachep); 1512 1513 addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)]; 1514 1515 if (size < 5 * sizeof(unsigned long)) 1516 return; 1517 1518 *addr++ = 0x12345678; 1519 *addr++ = caller; 1520 *addr++ = smp_processor_id(); 1521 size -= 3 * sizeof(unsigned long); 1522 { 1523 unsigned long *sptr = &caller; 1524 unsigned long svalue; 1525 1526 while (!kstack_end(sptr)) { 1527 svalue = *sptr++; 1528 if (kernel_text_address(svalue)) { 1529 *addr++ = svalue; 1530 size -= sizeof(unsigned long); 1531 if (size <= sizeof(unsigned long)) 1532 break; 1533 } 1534 } 1535 1536 } 1537 *addr++ = 0x87654321; 1538} 1539#endif 1540 1541static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val) 1542{ 1543 int size = obj_size(cachep); 1544 addr = &((char *)addr)[obj_offset(cachep)]; 1545 1546 memset(addr, val, size); 1547 *(unsigned char *)(addr + size - 1) = POISON_END; 1548} 1549 1550static void dump_line(char *data, int offset, int limit) 1551{ 1552 int i; 1553 printk(KERN_ERR "%03x:", offset); 1554 for (i = 0; i < limit; i++) 1555 printk(" %02x", (unsigned char)data[offset + i]); 1556 printk("\n"); 1557} 1558#endif 1559 1560#if DEBUG 1561 1562static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines) 1563{ 1564 int i, size; 1565 char *realobj; 1566 1567 if (cachep->flags & SLAB_RED_ZONE) { 1568 printk(KERN_ERR "Redzone: 0x%lx/0x%lx.\n", 1569 *dbg_redzone1(cachep, objp), 1570 *dbg_redzone2(cachep, objp)); 1571 } 1572 1573 if (cachep->flags & SLAB_STORE_USER) { 1574 printk(KERN_ERR "Last user: [<%p>]", 1575 *dbg_userword(cachep, objp)); 1576 print_symbol("(%s)", 1577 (unsigned long)*dbg_userword(cachep, objp)); 1578 printk("\n"); 1579 } 1580 realobj = (char *)objp + obj_offset(cachep); 1581 size = obj_size(cachep); 1582 for (i = 0; i < size && lines; i += 16, lines--) { 1583 int limit; 1584 limit = 16; 1585 if (i + limit > size) 1586 limit = size - i; 1587 dump_line(realobj, i, limit); 1588 } 1589} 1590 1591static void check_poison_obj(struct kmem_cache *cachep, void *objp) 1592{ 1593 char *realobj; 1594 int size, i; 1595 int lines = 0; 1596 1597 realobj = (char *)objp + obj_offset(cachep); 1598 size = obj_size(cachep); 1599 1600 for (i = 0; i < size; i++) { 1601 char exp = POISON_FREE; 1602 if (i == size - 1) 1603 exp = POISON_END; 1604 if (realobj[i] != exp) { 1605 int limit; 1606 /* Mismatch ! */ 1607 /* Print header */ 1608 if (lines == 0) { 1609 printk(KERN_ERR 1610 "Slab corruption: start=%p, len=%d\n", 1611 realobj, size); 1612 print_objinfo(cachep, objp, 0); 1613 } 1614 /* Hexdump the affected line */ 1615 i = (i / 16) * 16; 1616 limit = 16; 1617 if (i + limit > size) 1618 limit = size - i; 1619 dump_line(realobj, i, limit); 1620 i += 16; 1621 lines++; 1622 /* Limit to 5 lines */ 1623 if (lines > 5) 1624 break; 1625 } 1626 } 1627 if (lines != 0) { 1628 /* Print some data about the neighboring objects, if they 1629 * exist: 1630 */ 1631 struct slab *slabp = virt_to_slab(objp); 1632 unsigned int objnr; 1633 1634 objnr = obj_to_index(cachep, slabp, objp); 1635 if (objnr) { 1636 objp = index_to_obj(cachep, slabp, objnr - 1); 1637 realobj = (char *)objp + obj_offset(cachep); 1638 printk(KERN_ERR "Prev obj: start=%p, len=%d\n", 1639 realobj, size); 1640 print_objinfo(cachep, objp, 2); 1641 } 1642 if (objnr + 1 < cachep->num) { 1643 objp = index_to_obj(cachep, slabp, objnr + 1); 1644 realobj = (char *)objp + obj_offset(cachep); 1645 printk(KERN_ERR "Next obj: start=%p, len=%d\n", 1646 realobj, size); 1647 print_objinfo(cachep, objp, 2); 1648 } 1649 } 1650} 1651#endif 1652 1653#if DEBUG 1654/** 1655 * slab_destroy_objs - destroy a slab and its objects 1656 * @cachep: cache pointer being destroyed 1657 * @slabp: slab pointer being destroyed 1658 * 1659 * Call the registered destructor for each object in a slab that is being 1660 * destroyed. 1661 */ 1662static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) 1663{ 1664 int i; 1665 for (i = 0; i < cachep->num; i++) { 1666 void *objp = index_to_obj(cachep, slabp, i); 1667 1668 if (cachep->flags & SLAB_POISON) { 1669#ifdef CONFIG_DEBUG_PAGEALLOC 1670 if (cachep->buffer_size % PAGE_SIZE == 0 && 1671 OFF_SLAB(cachep)) 1672 kernel_map_pages(virt_to_page(objp), 1673 cachep->buffer_size / PAGE_SIZE, 1); 1674 else 1675 check_poison_obj(cachep, objp); 1676#else 1677 check_poison_obj(cachep, objp); 1678#endif 1679 } 1680 if (cachep->flags & SLAB_RED_ZONE) { 1681 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) 1682 slab_error(cachep, "start of a freed object " 1683 "was overwritten"); 1684 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) 1685 slab_error(cachep, "end of a freed object " 1686 "was overwritten"); 1687 } 1688 if (cachep->dtor && !(cachep->flags & SLAB_POISON)) 1689 (cachep->dtor) (objp + obj_offset(cachep), cachep, 0); 1690 } 1691} 1692#else 1693static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) 1694{ 1695 if (cachep->dtor) { 1696 int i; 1697 for (i = 0; i < cachep->num; i++) { 1698 void *objp = index_to_obj(cachep, slabp, i); 1699 (cachep->dtor) (objp, cachep, 0); 1700 } 1701 } 1702} 1703#endif 1704 1705/** 1706 * slab_destroy - destroy and release all objects in a slab 1707 * @cachep: cache pointer being destroyed 1708 * @slabp: slab pointer being destroyed 1709 * 1710 * Destroy all the objs in a slab, and release the mem back to the system. 1711 * Before calling the slab must have been unlinked from the cache. The 1712 * cache-lock is not held/needed. 1713 */ 1714static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp) 1715{ 1716 void *addr = slabp->s_mem - slabp->colouroff; 1717 1718 slab_destroy_objs(cachep, slabp); 1719 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) { 1720 struct slab_rcu *slab_rcu; 1721 1722 slab_rcu = (struct slab_rcu *)slabp; 1723 slab_rcu->cachep = cachep; 1724 slab_rcu->addr = addr; 1725 call_rcu(&slab_rcu->head, kmem_rcu_free); 1726 } else { 1727 kmem_freepages(cachep, addr); 1728 if (OFF_SLAB(cachep)) 1729 kmem_cache_free(cachep->slabp_cache, slabp); 1730 } 1731} 1732 1733/* 1734 * For setting up all the kmem_list3s for cache whose buffer_size is same as 1735 * size of kmem_list3. 1736 */ 1737static void set_up_list3s(struct kmem_cache *cachep, int index) 1738{ 1739 int node; 1740 1741 for_each_online_node(node) { 1742 cachep->nodelists[node] = &initkmem_list3[index + node]; 1743 cachep->nodelists[node]->next_reap = jiffies + 1744 REAPTIMEOUT_LIST3 + 1745 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 1746 } 1747} 1748 1749/** 1750 * calculate_slab_order - calculate size (page order) of slabs 1751 * @cachep: pointer to the cache that is being created 1752 * @size: size of objects to be created in this cache. 1753 * @align: required alignment for the objects. 1754 * @flags: slab allocation flags 1755 * 1756 * Also calculates the number of objects per slab. 1757 * 1758 * This could be made much more intelligent. For now, try to avoid using 1759 * high order pages for slabs. When the gfp() functions are more friendly 1760 * towards high-order requests, this should be changed. 1761 */ 1762static size_t calculate_slab_order(struct kmem_cache *cachep, 1763 size_t size, size_t align, unsigned long flags) 1764{ 1765 size_t left_over = 0; 1766 int gfporder; 1767 1768 for (gfporder = 0; gfporder <= MAX_GFP_ORDER; gfporder++) { 1769 unsigned int num; 1770 size_t remainder; 1771 1772 cache_estimate(gfporder, size, align, flags, &remainder, &num); 1773 if (!num) 1774 continue; 1775 1776 /* More than offslab_limit objects will cause problems */ 1777 if ((flags & CFLGS_OFF_SLAB) && num > offslab_limit) 1778 break; 1779 1780 /* Found something acceptable - save it away */ 1781 cachep->num = num; 1782 cachep->gfporder = gfporder; 1783 left_over = remainder; 1784 1785 /* 1786 * A VFS-reclaimable slab tends to have most allocations 1787 * as GFP_NOFS and we really don't want to have to be allocating 1788 * higher-order pages when we are unable to shrink dcache. 1789 */ 1790 if (flags & SLAB_RECLAIM_ACCOUNT) 1791 break; 1792 1793 /* 1794 * Large number of objects is good, but very large slabs are 1795 * currently bad for the gfp()s. 1796 */ 1797 if (gfporder >= slab_break_gfp_order) 1798 break; 1799 1800 /* 1801 * Acceptable internal fragmentation? 1802 */ 1803 if (left_over * 8 <= (PAGE_SIZE << gfporder)) 1804 break; 1805 } 1806 return left_over; 1807} 1808 1809static void setup_cpu_cache(struct kmem_cache *cachep) 1810{ 1811 if (g_cpucache_up == FULL) { 1812 enable_cpucache(cachep); 1813 return; 1814 } 1815 if (g_cpucache_up == NONE) { 1816 /* 1817 * Note: the first kmem_cache_create must create the cache 1818 * that's used by kmalloc(24), otherwise the creation of 1819 * further caches will BUG(). 1820 */ 1821 cachep->array[smp_processor_id()] = &initarray_generic.cache; 1822 1823 /* 1824 * If the cache that's used by kmalloc(sizeof(kmem_list3)) is 1825 * the first cache, then we need to set up all its list3s, 1826 * otherwise the creation of further caches will BUG(). 1827 */ 1828 set_up_list3s(cachep, SIZE_AC); 1829 if (INDEX_AC == INDEX_L3) 1830 g_cpucache_up = PARTIAL_L3; 1831 else 1832 g_cpucache_up = PARTIAL_AC; 1833 } else { 1834 cachep->array[smp_processor_id()] = 1835 kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); 1836 1837 if (g_cpucache_up == PARTIAL_AC) { 1838 set_up_list3s(cachep, SIZE_L3); 1839 g_cpucache_up = PARTIAL_L3; 1840 } else { 1841 int node; 1842 for_each_online_node(node) { 1843 cachep->nodelists[node] = 1844 kmalloc_node(sizeof(struct kmem_list3), 1845 GFP_KERNEL, node); 1846 BUG_ON(!cachep->nodelists[node]); 1847 kmem_list3_init(cachep->nodelists[node]); 1848 } 1849 } 1850 } 1851 cachep->nodelists[numa_node_id()]->next_reap = 1852 jiffies + REAPTIMEOUT_LIST3 + 1853 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 1854 1855 cpu_cache_get(cachep)->avail = 0; 1856 cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES; 1857 cpu_cache_get(cachep)->batchcount = 1; 1858 cpu_cache_get(cachep)->touched = 0; 1859 cachep->batchcount = 1; 1860 cachep->limit = BOOT_CPUCACHE_ENTRIES; 1861} 1862 1863/** 1864 * kmem_cache_create - Create a cache. 1865 * @name: A string which is used in /proc/slabinfo to identify this cache. 1866 * @size: The size of objects to be created in this cache. 1867 * @align: The required alignment for the objects. 1868 * @flags: SLAB flags 1869 * @ctor: A constructor for the objects. 1870 * @dtor: A destructor for the objects. 1871 * 1872 * Returns a ptr to the cache on success, NULL on failure. 1873 * Cannot be called within a int, but can be interrupted. 1874 * The @ctor is run when new pages are allocated by the cache 1875 * and the @dtor is run before the pages are handed back. 1876 * 1877 * @name must be valid until the cache is destroyed. This implies that 1878 * the module calling this has to destroy the cache before getting unloaded. 1879 * 1880 * The flags are 1881 * 1882 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) 1883 * to catch references to uninitialised memory. 1884 * 1885 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check 1886 * for buffer overruns. 1887 * 1888 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware 1889 * cacheline. This can be beneficial if you're counting cycles as closely 1890 * as davem. 1891 */ 1892struct kmem_cache * 1893kmem_cache_create (const char *name, size_t size, size_t align, 1894 unsigned long flags, 1895 void (*ctor)(void*, struct kmem_cache *, unsigned long), 1896 void (*dtor)(void*, struct kmem_cache *, unsigned long)) 1897{ 1898 size_t left_over, slab_size, ralign; 1899 struct kmem_cache *cachep = NULL; 1900 struct list_head *p; 1901 1902 /* 1903 * Sanity checks... these are all serious usage bugs. 1904 */ 1905 if (!name || in_interrupt() || (size < BYTES_PER_WORD) || 1906 (size > (1 << MAX_OBJ_ORDER) * PAGE_SIZE) || (dtor && !ctor)) { 1907 printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__, 1908 name); 1909 BUG(); 1910 } 1911 1912 /* 1913 * Prevent CPUs from coming and going. 1914 * lock_cpu_hotplug() nests outside cache_chain_mutex 1915 */ 1916 lock_cpu_hotplug(); 1917 1918 mutex_lock(&cache_chain_mutex); 1919 1920 list_for_each(p, &cache_chain) { 1921 struct kmem_cache *pc = list_entry(p, struct kmem_cache, next); 1922 mm_segment_t old_fs = get_fs(); 1923 char tmp; 1924 int res; 1925 1926 /* 1927 * This happens when the module gets unloaded and doesn't 1928 * destroy its slab cache and no-one else reuses the vmalloc 1929 * area of the module. Print a warning. 1930 */ 1931 set_fs(KERNEL_DS); 1932 res = __get_user(tmp, pc->name); 1933 set_fs(old_fs); 1934 if (res) { 1935 printk("SLAB: cache with size %d has lost its name\n", 1936 pc->buffer_size); 1937 continue; 1938 } 1939 1940 if (!strcmp(pc->name, name)) { 1941 printk("kmem_cache_create: duplicate cache %s\n", name); 1942 dump_stack(); 1943 goto oops; 1944 } 1945 } 1946 1947#if DEBUG 1948 WARN_ON(strchr(name, ' ')); /* It confuses parsers */ 1949 if ((flags & SLAB_DEBUG_INITIAL) && !ctor) { 1950 /* No constructor, but inital state check requested */ 1951 printk(KERN_ERR "%s: No con, but init state check " 1952 "requested - %s\n", __FUNCTION__, name); 1953 flags &= ~SLAB_DEBUG_INITIAL; 1954 } 1955#if FORCED_DEBUG 1956 /* 1957 * Enable redzoning and last user accounting, except for caches with 1958 * large objects, if the increased size would increase the object size 1959 * above the next power of two: caches with object sizes just above a 1960 * power of two have a significant amount of internal fragmentation. 1961 */ 1962 if (size < 4096 || fls(size - 1) == fls(size-1 + 3 * BYTES_PER_WORD)) 1963 flags |= SLAB_RED_ZONE | SLAB_STORE_USER; 1964 if (!(flags & SLAB_DESTROY_BY_RCU)) 1965 flags |= SLAB_POISON; 1966#endif 1967 if (flags & SLAB_DESTROY_BY_RCU) 1968 BUG_ON(flags & SLAB_POISON); 1969#endif 1970 if (flags & SLAB_DESTROY_BY_RCU) 1971 BUG_ON(dtor); 1972 1973 /* 1974 * Always checks flags, a caller might be expecting debug support which 1975 * isn't available. 1976 */ 1977 if (flags & ~CREATE_MASK) 1978 BUG(); 1979 1980 /* 1981 * Check that size is in terms of words. This is needed to avoid 1982 * unaligned accesses for some archs when redzoning is used, and makes 1983 * sure any on-slab bufctl's are also correctly aligned. 1984 */ 1985 if (size & (BYTES_PER_WORD - 1)) { 1986 size += (BYTES_PER_WORD - 1); 1987 size &= ~(BYTES_PER_WORD - 1); 1988 } 1989 1990 /* calculate the final buffer alignment: */ 1991 1992 /* 1) arch recommendation: can be overridden for debug */ 1993 if (flags & SLAB_HWCACHE_ALIGN) { 1994 /* 1995 * Default alignment: as specified by the arch code. Except if 1996 * an object is really small, then squeeze multiple objects into 1997 * one cacheline. 1998 */ 1999 ralign = cache_line_size(); 2000 while (size <= ralign / 2) 2001 ralign /= 2; 2002 } else { 2003 ralign = BYTES_PER_WORD; 2004 } 2005 /* 2) arch mandated alignment: disables debug if necessary */ 2006 if (ralign < ARCH_SLAB_MINALIGN) { 2007 ralign = ARCH_SLAB_MINALIGN; 2008 if (ralign > BYTES_PER_WORD) 2009 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); 2010 } 2011 /* 3) caller mandated alignment: disables debug if necessary */ 2012 if (ralign < align) { 2013 ralign = align; 2014 if (ralign > BYTES_PER_WORD) 2015 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); 2016 } 2017 /* 2018 * 4) Store it. Note that the debug code below can reduce 2019 * the alignment to BYTES_PER_WORD. 2020 */ 2021 align = ralign; 2022 2023 /* Get cache's description obj. */ 2024 cachep = kmem_cache_zalloc(&cache_cache, SLAB_KERNEL); 2025 if (!cachep) 2026 goto oops; 2027 2028#if DEBUG 2029 cachep->obj_size = size; 2030 2031 if (flags & SLAB_RED_ZONE) { 2032 /* redzoning only works with word aligned caches */ 2033 align = BYTES_PER_WORD; 2034 2035 /* add space for red zone words */ 2036 cachep->obj_offset += BYTES_PER_WORD; 2037 size += 2 * BYTES_PER_WORD; 2038 } 2039 if (flags & SLAB_STORE_USER) { 2040 /* user store requires word alignment and 2041 * one word storage behind the end of the real 2042 * object. 2043 */ 2044 align = BYTES_PER_WORD; 2045 size += BYTES_PER_WORD; 2046 } 2047#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) 2048 if (size >= malloc_sizes[INDEX_L3 + 1].cs_size 2049 && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) { 2050 cachep->obj_offset += PAGE_SIZE - size; 2051 size = PAGE_SIZE; 2052 } 2053#endif 2054#endif 2055 2056 /* Determine if the slab management is 'on' or 'off' slab. */ 2057 if (size >= (PAGE_SIZE >> 3)) 2058 /* 2059 * Size is large, assume best to place the slab management obj 2060 * off-slab (should allow better packing of objs). 2061 */ 2062 flags |= CFLGS_OFF_SLAB; 2063 2064 size = ALIGN(size, align); 2065 2066 left_over = calculate_slab_order(cachep, size, align, flags); 2067 2068 if (!cachep->num) { 2069 printk("kmem_cache_create: couldn't create cache %s.\n", name); 2070 kmem_cache_free(&cache_cache, cachep); 2071 cachep = NULL; 2072 goto oops; 2073 } 2074 slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t) 2075 + sizeof(struct slab), align); 2076 2077 /* 2078 * If the slab has been placed off-slab, and we have enough space then 2079 * move it on-slab. This is at the expense of any extra colouring. 2080 */ 2081 if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) { 2082 flags &= ~CFLGS_OFF_SLAB; 2083 left_over -= slab_size; 2084 } 2085 2086 if (flags & CFLGS_OFF_SLAB) { 2087 /* really off slab. No need for manual alignment */ 2088 slab_size = 2089 cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab); 2090 } 2091 2092 cachep->colour_off = cache_line_size(); 2093 /* Offset must be a multiple of the alignment. */ 2094 if (cachep->colour_off < align) 2095 cachep->colour_off = align; 2096 cachep->colour = left_over / cachep->colour_off; 2097 cachep->slab_size = slab_size; 2098 cachep->flags = flags; 2099 cachep->gfpflags = 0; 2100 if (flags & SLAB_CACHE_DMA) 2101 cachep->gfpflags |= GFP_DMA; 2102 cachep->buffer_size = size; 2103 2104 if (flags & CFLGS_OFF_SLAB) 2105 cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u); 2106 cachep->ctor = ctor; 2107 cachep->dtor = dtor; 2108 cachep->name = name; 2109 2110 2111 setup_cpu_cache(cachep); 2112 2113 /* cache setup completed, link it into the list */ 2114 list_add(&cachep->next, &cache_chain); 2115oops: 2116 if (!cachep && (flags & SLAB_PANIC)) 2117 panic("kmem_cache_create(): failed to create slab `%s'\n", 2118 name); 2119 mutex_unlock(&cache_chain_mutex); 2120 unlock_cpu_hotplug(); 2121 return cachep; 2122} 2123EXPORT_SYMBOL(kmem_cache_create); 2124 2125#if DEBUG 2126static void check_irq_off(void) 2127{ 2128 BUG_ON(!irqs_disabled()); 2129} 2130 2131static void check_irq_on(void) 2132{ 2133 BUG_ON(irqs_disabled()); 2134} 2135 2136static void check_spinlock_acquired(struct kmem_cache *cachep) 2137{ 2138#ifdef CONFIG_SMP 2139 check_irq_off(); 2140 assert_spin_locked(&cachep->nodelists[numa_node_id()]->list_lock); 2141#endif 2142} 2143 2144static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node) 2145{ 2146#ifdef CONFIG_SMP 2147 check_irq_off(); 2148 assert_spin_locked(&cachep->nodelists[node]->list_lock); 2149#endif 2150} 2151 2152#else 2153#define check_irq_off() do { } while(0) 2154#define check_irq_on() do { } while(0) 2155#define check_spinlock_acquired(x) do { } while(0) 2156#define check_spinlock_acquired_node(x, y) do { } while(0) 2157#endif 2158 2159static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, 2160 struct array_cache *ac, 2161 int force, int node); 2162 2163static void do_drain(void *arg) 2164{ 2165 struct kmem_cache *cachep = arg; 2166 struct array_cache *ac; 2167 int node = numa_node_id(); 2168 2169 check_irq_off(); 2170 ac = cpu_cache_get(cachep); 2171 spin_lock(&cachep->nodelists[node]->list_lock); 2172 free_block(cachep, ac->entry, ac->avail, node); 2173 spin_unlock(&cachep->nodelists[node]->list_lock); 2174 ac->avail = 0; 2175} 2176 2177static void drain_cpu_caches(struct kmem_cache *cachep) 2178{ 2179 struct kmem_list3 *l3; 2180 int node; 2181 2182 on_each_cpu(do_drain, cachep, 1, 1); 2183 check_irq_on(); 2184 for_each_online_node(node) { 2185 l3 = cachep->nodelists[node]; 2186 if (l3) { 2187 drain_array(cachep, l3, l3->shared, 1, node); 2188 if (l3->alien) 2189 drain_alien_cache(cachep, l3->alien); 2190 } 2191 } 2192} 2193 2194static int __node_shrink(struct kmem_cache *cachep, int node) 2195{ 2196 struct slab *slabp; 2197 struct kmem_list3 *l3 = cachep->nodelists[node]; 2198 int ret; 2199 2200 for (;;) { 2201 struct list_head *p; 2202 2203 p = l3->slabs_free.prev; 2204 if (p == &l3->slabs_free) 2205 break; 2206 2207 slabp = list_entry(l3->slabs_free.prev, struct slab, list); 2208#if DEBUG 2209 if (slabp->inuse) 2210 BUG(); 2211#endif 2212 list_del(&slabp->list); 2213 2214 l3->free_objects -= cachep->num; 2215 spin_unlock_irq(&l3->list_lock); 2216 slab_destroy(cachep, slabp); 2217 spin_lock_irq(&l3->list_lock); 2218 } 2219 ret = !list_empty(&l3->slabs_full) || !list_empty(&l3->slabs_partial); 2220 return ret; 2221} 2222 2223static int __cache_shrink(struct kmem_cache *cachep) 2224{ 2225 int ret = 0, i = 0; 2226 struct kmem_list3 *l3; 2227 2228 drain_cpu_caches(cachep); 2229 2230 check_irq_on(); 2231 for_each_online_node(i) { 2232 l3 = cachep->nodelists[i]; 2233 if (l3) { 2234 spin_lock_irq(&l3->list_lock); 2235 ret += __node_shrink(cachep, i); 2236 spin_unlock_irq(&l3->list_lock); 2237 } 2238 } 2239 return (ret ? 1 : 0); 2240} 2241 2242/** 2243 * kmem_cache_shrink - Shrink a cache. 2244 * @cachep: The cache to shrink. 2245 * 2246 * Releases as many slabs as possible for a cache. 2247 * To help debugging, a zero exit status indicates all slabs were released. 2248 */ 2249int kmem_cache_shrink(struct kmem_cache *cachep) 2250{ 2251 if (!cachep || in_interrupt()) 2252 BUG(); 2253 2254 return __cache_shrink(cachep); 2255} 2256EXPORT_SYMBOL(kmem_cache_shrink); 2257 2258/** 2259 * kmem_cache_destroy - delete a cache 2260 * @cachep: the cache to destroy 2261 * 2262 * Remove a struct kmem_cache object from the slab cache. 2263 * Returns 0 on success. 2264 * 2265 * It is expected this function will be called by a module when it is 2266 * unloaded. This will remove the cache completely, and avoid a duplicate 2267 * cache being allocated each time a module is loaded and unloaded, if the 2268 * module doesn't have persistent in-kernel storage across loads and unloads. 2269 * 2270 * The cache must be empty before calling this function. 2271 * 2272 * The caller must guarantee that noone will allocate memory from the cache 2273 * during the kmem_cache_destroy(). 2274 */ 2275int kmem_cache_destroy(struct kmem_cache *cachep) 2276{ 2277 int i; 2278 struct kmem_list3 *l3; 2279 2280 if (!cachep || in_interrupt()) 2281 BUG(); 2282 2283 /* Don't let CPUs to come and go */ 2284 lock_cpu_hotplug(); 2285 2286 /* Find the cache in the chain of caches. */ 2287 mutex_lock(&cache_chain_mutex); 2288 /* 2289 * the chain is never empty, cache_cache is never destroyed 2290 */ 2291 list_del(&cachep->next); 2292 mutex_unlock(&cache_chain_mutex); 2293 2294 if (__cache_shrink(cachep)) { 2295 slab_error(cachep, "Can't free all objects"); 2296 mutex_lock(&cache_chain_mutex); 2297 list_add(&cachep->next, &cache_chain); 2298 mutex_unlock(&cache_chain_mutex); 2299 unlock_cpu_hotplug(); 2300 return 1; 2301 } 2302 2303 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) 2304 synchronize_rcu(); 2305 2306 for_each_online_cpu(i) 2307 kfree(cachep->array[i]); 2308 2309 /* NUMA: free the list3 structures */ 2310 for_each_online_node(i) { 2311 l3 = cachep->nodelists[i]; 2312 if (l3) { 2313 kfree(l3->shared); 2314 free_alien_cache(l3->alien); 2315 kfree(l3); 2316 } 2317 } 2318 kmem_cache_free(&cache_cache, cachep); 2319 unlock_cpu_hotplug(); 2320 return 0; 2321} 2322EXPORT_SYMBOL(kmem_cache_destroy); 2323 2324/* Get the memory for a slab management obj. */ 2325static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, 2326 int colour_off, gfp_t local_flags) 2327{ 2328 struct slab *slabp; 2329 2330 if (OFF_SLAB(cachep)) { 2331 /* Slab management obj is off-slab. */ 2332 slabp = kmem_cache_alloc(cachep->slabp_cache, local_flags); 2333 if (!slabp) 2334 return NULL; 2335 } else { 2336 slabp = objp + colour_off; 2337 colour_off += cachep->slab_size; 2338 } 2339 slabp->inuse = 0; 2340 slabp->colouroff = colour_off; 2341 slabp->s_mem = objp + colour_off; 2342 return slabp; 2343} 2344 2345static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp) 2346{ 2347 return (kmem_bufctl_t *) (slabp + 1); 2348} 2349 2350static void cache_init_objs(struct kmem_cache *cachep, 2351 struct slab *slabp, unsigned long ctor_flags) 2352{ 2353 int i; 2354 2355 for (i = 0; i < cachep->num; i++) { 2356 void *objp = index_to_obj(cachep, slabp, i); 2357#if DEBUG 2358 /* need to poison the objs? */ 2359 if (cachep->flags & SLAB_POISON) 2360 poison_obj(cachep, objp, POISON_FREE); 2361 if (cachep->flags & SLAB_STORE_USER) 2362 *dbg_userword(cachep, objp) = NULL; 2363 2364 if (cachep->flags & SLAB_RED_ZONE) { 2365 *dbg_redzone1(cachep, objp) = RED_INACTIVE; 2366 *dbg_redzone2(cachep, objp) = RED_INACTIVE; 2367 } 2368 /* 2369 * Constructors are not allowed to allocate memory from the same 2370 * cache which they are a constructor for. Otherwise, deadlock. 2371 * They must also be threaded. 2372 */ 2373 if (cachep->ctor && !(cachep->flags & SLAB_POISON)) 2374 cachep->ctor(objp + obj_offset(cachep), cachep, 2375 ctor_flags); 2376 2377 if (cachep->flags & SLAB_RED_ZONE) { 2378 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) 2379 slab_error(cachep, "constructor overwrote the" 2380 " end of an object"); 2381 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) 2382 slab_error(cachep, "constructor overwrote the" 2383 " start of an object"); 2384 } 2385 if ((cachep->buffer_size % PAGE_SIZE) == 0 && 2386 OFF_SLAB(cachep) && cachep->flags & SLAB_POISON) 2387 kernel_map_pages(virt_to_page(objp), 2388 cachep->buffer_size / PAGE_SIZE, 0); 2389#else 2390 if (cachep->ctor) 2391 cachep->ctor(objp, cachep, ctor_flags); 2392#endif 2393 slab_bufctl(slabp)[i] = i + 1; 2394 } 2395 slab_bufctl(slabp)[i - 1] = BUFCTL_END; 2396 slabp->free = 0; 2397} 2398 2399static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags) 2400{ 2401 if (flags & SLAB_DMA) 2402 BUG_ON(!(cachep->gfpflags & GFP_DMA)); 2403 else 2404 BUG_ON(cachep->gfpflags & GFP_DMA); 2405} 2406 2407static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp, 2408 int nodeid) 2409{ 2410 void *objp = index_to_obj(cachep, slabp, slabp->free); 2411 kmem_bufctl_t next; 2412 2413 slabp->inuse++; 2414 next = slab_bufctl(slabp)[slabp->free]; 2415#if DEBUG 2416 slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE; 2417 WARN_ON(slabp->nodeid != nodeid); 2418#endif 2419 slabp->free = next; 2420 2421 return objp; 2422} 2423 2424static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, 2425 void *objp, int nodeid) 2426{ 2427 unsigned int objnr = obj_to_index(cachep, slabp, objp); 2428 2429#if DEBUG 2430 /* Verify that the slab belongs to the intended node */ 2431 WARN_ON(slabp->nodeid != nodeid); 2432 2433 if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) { 2434 printk(KERN_ERR "slab: double free detected in cache " 2435 "'%s', objp %p\n", cachep->name, objp); 2436 BUG(); 2437 } 2438#endif 2439 slab_bufctl(slabp)[objnr] = slabp->free; 2440 slabp->free = objnr; 2441 slabp->inuse--; 2442} 2443 2444static void set_slab_attr(struct kmem_cache *cachep, struct slab *slabp, 2445 void *objp) 2446{ 2447 int i; 2448 struct page *page; 2449 2450 /* Nasty!!!!!! I hope this is OK. */ 2451 page = virt_to_page(objp); 2452 2453 i = 1; 2454 if (likely(!PageCompound(page))) 2455 i <<= cachep->gfporder; 2456 do { 2457 page_set_cache(page, cachep); 2458 page_set_slab(page, slabp); 2459 page++; 2460 } while (--i); 2461} 2462 2463/* 2464 * Grow (by 1) the number of slabs within a cache. This is called by 2465 * kmem_cache_alloc() when there are no active objs left in a cache. 2466 */ 2467static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid) 2468{ 2469 struct slab *slabp; 2470 void *objp; 2471 size_t offset; 2472 gfp_t local_flags; 2473 unsigned long ctor_flags; 2474 struct kmem_list3 *l3; 2475 2476 /* 2477 * Be lazy and only check for valid flags here, keeping it out of the 2478 * critical path in kmem_cache_alloc(). 2479 */ 2480 if (flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW)) 2481 BUG(); 2482 if (flags & SLAB_NO_GROW) 2483 return 0; 2484 2485 ctor_flags = SLAB_CTOR_CONSTRUCTOR; 2486 local_flags = (flags & SLAB_LEVEL_MASK); 2487 if (!(local_flags & __GFP_WAIT)) 2488 /* 2489 * Not allowed to sleep. Need to tell a constructor about 2490 * this - it might need to know... 2491 */ 2492 ctor_flags |= SLAB_CTOR_ATOMIC; 2493 2494 /* Take the l3 list lock to change the colour_next on this node */ 2495 check_irq_off(); 2496 l3 = cachep->nodelists[nodeid]; 2497 spin_lock(&l3->list_lock); 2498 2499 /* Get colour for the slab, and cal the next value. */ 2500 offset = l3->colour_next; 2501 l3->colour_next++; 2502 if (l3->colour_next >= cachep->colour) 2503 l3->colour_next = 0; 2504 spin_unlock(&l3->list_lock); 2505 2506 offset *= cachep->colour_off; 2507 2508 if (local_flags & __GFP_WAIT) 2509 local_irq_enable(); 2510 2511 /* 2512 * The test for missing atomic flag is performed here, rather than 2513 * the more obvious place, simply to reduce the critical path length 2514 * in kmem_cache_alloc(). If a caller is seriously mis-behaving they 2515 * will eventually be caught here (where it matters). 2516 */ 2517 kmem_flagcheck(cachep, flags); 2518 2519 /* 2520 * Get mem for the objs. Attempt to allocate a physical page from 2521 * 'nodeid'. 2522 */ 2523 objp = kmem_getpages(cachep, flags, nodeid); 2524 if (!objp) 2525 goto failed; 2526 2527 /* Get slab management. */ 2528 slabp = alloc_slabmgmt(cachep, objp, offset, local_flags); 2529 if (!slabp) 2530 goto opps1; 2531 2532 slabp->nodeid = nodeid; 2533 set_slab_attr(cachep, slabp, objp); 2534 2535 cache_init_objs(cachep, slabp, ctor_flags); 2536 2537 if (local_flags & __GFP_WAIT) 2538 local_irq_disable(); 2539 check_irq_off(); 2540 spin_lock(&l3->list_lock); 2541 2542 /* Make slab active. */ 2543 list_add_tail(&slabp->list, &(l3->slabs_free)); 2544 STATS_INC_GROWN(cachep); 2545 l3->free_objects += cachep->num; 2546 spin_unlock(&l3->list_lock); 2547 return 1; 2548opps1: 2549 kmem_freepages(cachep, objp); 2550failed: 2551 if (local_flags & __GFP_WAIT) 2552 local_irq_disable(); 2553 return 0; 2554} 2555 2556#if DEBUG 2557 2558/* 2559 * Perform extra freeing checks: 2560 * - detect bad pointers. 2561 * - POISON/RED_ZONE checking 2562 * - destructor calls, for caches with POISON+dtor 2563 */ 2564static void kfree_debugcheck(const void *objp) 2565{ 2566 struct page *page; 2567 2568 if (!virt_addr_valid(objp)) { 2569 printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n", 2570 (unsigned long)objp); 2571 BUG(); 2572 } 2573 page = virt_to_page(objp); 2574 if (!PageSlab(page)) { 2575 printk(KERN_ERR "kfree_debugcheck: bad ptr %lxh.\n", 2576 (unsigned long)objp); 2577 BUG(); 2578 } 2579} 2580 2581static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, 2582 void *caller) 2583{ 2584 struct page *page; 2585 unsigned int objnr; 2586 struct slab *slabp; 2587 2588 objp -= obj_offset(cachep); 2589 kfree_debugcheck(objp); 2590 page = virt_to_page(objp); 2591 2592 if (page_get_cache(page) != cachep) { 2593 printk(KERN_ERR "mismatch in kmem_cache_free: expected " 2594 "cache %p, got %p\n", 2595 page_get_cache(page), cachep); 2596 printk(KERN_ERR "%p is %s.\n", cachep, cachep->name); 2597 printk(KERN_ERR "%p is %s.\n", page_get_cache(page), 2598 page_get_cache(page)->name); 2599 WARN_ON(1); 2600 } 2601 slabp = page_get_slab(page); 2602 2603 if (cachep->flags & SLAB_RED_ZONE) { 2604 if (*dbg_redzone1(cachep, objp) != RED_ACTIVE || 2605 *dbg_redzone2(cachep, objp) != RED_ACTIVE) { 2606 slab_error(cachep, "double free, or memory outside" 2607 " object was overwritten"); 2608 printk(KERN_ERR "%p: redzone 1:0x%lx, " 2609 "redzone 2:0x%lx.\n", 2610 objp, *dbg_redzone1(cachep, objp), 2611 *dbg_redzone2(cachep, objp)); 2612 } 2613 *dbg_redzone1(cachep, objp) = RED_INACTIVE; 2614 *dbg_redzone2(cachep, objp) = RED_INACTIVE; 2615 } 2616 if (cachep->flags & SLAB_STORE_USER) 2617 *dbg_userword(cachep, objp) = caller; 2618 2619 objnr = obj_to_index(cachep, slabp, objp); 2620 2621 BUG_ON(objnr >= cachep->num); 2622 BUG_ON(objp != index_to_obj(cachep, slabp, objnr)); 2623 2624 if (cachep->flags & SLAB_DEBUG_INITIAL) { 2625 /* 2626 * Need to call the slab's constructor so the caller can 2627 * perform a verify of its state (debugging). Called without 2628 * the cache-lock held. 2629 */ 2630 cachep->ctor(objp + obj_offset(cachep), 2631 cachep, SLAB_CTOR_CONSTRUCTOR | SLAB_CTOR_VERIFY); 2632 } 2633 if (cachep->flags & SLAB_POISON && cachep->dtor) { 2634 /* we want to cache poison the object, 2635 * call the destruction callback 2636 */ 2637 cachep->dtor(objp + obj_offset(cachep), cachep, 0); 2638 } 2639#ifdef CONFIG_DEBUG_SLAB_LEAK 2640 slab_bufctl(slabp)[objnr] = BUFCTL_FREE; 2641#endif 2642 if (cachep->flags & SLAB_POISON) { 2643#ifdef CONFIG_DEBUG_PAGEALLOC 2644 if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) { 2645 store_stackinfo(cachep, objp, (unsigned long)caller); 2646 kernel_map_pages(virt_to_page(objp), 2647 cachep->buffer_size / PAGE_SIZE, 0); 2648 } else { 2649 poison_obj(cachep, objp, POISON_FREE); 2650 } 2651#else 2652 poison_obj(cachep, objp, POISON_FREE); 2653#endif 2654 } 2655 return objp; 2656} 2657 2658static void check_slabp(struct kmem_cache *cachep, struct slab *slabp) 2659{ 2660 kmem_bufctl_t i; 2661 int entries = 0; 2662 2663 /* Check slab's freelist to see if this obj is there. */ 2664 for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) { 2665 entries++; 2666 if (entries > cachep->num || i >= cachep->num) 2667 goto bad; 2668 } 2669 if (entries != cachep->num - slabp->inuse) { 2670bad: 2671 printk(KERN_ERR "slab: Internal list corruption detected in " 2672 "cache '%s'(%d), slabp %p(%d). Hexdump:\n", 2673 cachep->name, cachep->num, slabp, slabp->inuse); 2674 for (i = 0; 2675 i < sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t); 2676 i++) { 2677 if (i % 16 == 0) 2678 printk("\n%03x:", i); 2679 printk(" %02x", ((unsigned char *)slabp)[i]); 2680 } 2681 printk("\n"); 2682 BUG(); 2683 } 2684} 2685#else 2686#define kfree_debugcheck(x) do { } while(0) 2687#define cache_free_debugcheck(x,objp,z) (objp) 2688#define check_slabp(x,y) do { } while(0) 2689#endif 2690 2691static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) 2692{ 2693 int batchcount; 2694 struct kmem_list3 *l3; 2695 struct array_cache *ac; 2696 2697 check_irq_off(); 2698 ac = cpu_cache_get(cachep); 2699retry: 2700 batchcount = ac->batchcount; 2701 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { 2702 /* 2703 * If there was little recent activity on this cache, then 2704 * perform only a partial refill. Otherwise we could generate 2705 * refill bouncing. 2706 */ 2707 batchcount = BATCHREFILL_LIMIT; 2708 } 2709 l3 = cachep->nodelists[numa_node_id()]; 2710 2711 BUG_ON(ac->avail > 0 || !l3); 2712 spin_lock(&l3->list_lock); 2713 2714 /* See if we can refill from the shared array */ 2715 if (l3->shared && transfer_objects(ac, l3->shared, batchcount)) 2716 goto alloc_done; 2717 2718 while (batchcount > 0) { 2719 struct list_head *entry; 2720 struct slab *slabp; 2721 /* Get slab alloc is to come from. */ 2722 entry = l3->slabs_partial.next; 2723 if (entry == &l3->slabs_partial) { 2724 l3->free_touched = 1; 2725 entry = l3->slabs_free.next; 2726 if (entry == &l3->slabs_free) 2727 goto must_grow; 2728 } 2729 2730 slabp = list_entry(entry, struct slab, list); 2731 check_slabp(cachep, slabp); 2732 check_spinlock_acquired(cachep); 2733 while (slabp->inuse < cachep->num && batchcount--) { 2734 STATS_INC_ALLOCED(cachep); 2735 STATS_INC_ACTIVE(cachep); 2736 STATS_SET_HIGH(cachep); 2737 2738 ac->entry[ac->avail++] = slab_get_obj(cachep, slabp, 2739 numa_node_id()); 2740 } 2741 check_slabp(cachep, slabp); 2742 2743 /* move slabp to correct slabp list: */ 2744 list_del(&slabp->list); 2745 if (slabp->free == BUFCTL_END) 2746 list_add(&slabp->list, &l3->slabs_full); 2747 else 2748 list_add(&slabp->list, &l3->slabs_partial); 2749 } 2750 2751must_grow: 2752 l3->free_objects -= ac->avail; 2753alloc_done: 2754 spin_unlock(&l3->list_lock); 2755 2756 if (unlikely(!ac->avail)) { 2757 int x; 2758 x = cache_grow(cachep, flags, numa_node_id()); 2759 2760 /* cache_grow can reenable interrupts, then ac could change. */ 2761 ac = cpu_cache_get(cachep); 2762 if (!x && ac->avail == 0) /* no objects in sight? abort */ 2763 return NULL; 2764 2765 if (!ac->avail) /* objects refilled by interrupt? */ 2766 goto retry; 2767 } 2768 ac->touched = 1; 2769 return ac->entry[--ac->avail]; 2770} 2771 2772static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep, 2773 gfp_t flags) 2774{ 2775 might_sleep_if(flags & __GFP_WAIT); 2776#if DEBUG 2777 kmem_flagcheck(cachep, flags); 2778#endif 2779} 2780 2781#if DEBUG 2782static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, 2783 gfp_t flags, void *objp, void *caller) 2784{ 2785 if (!objp) 2786 return objp; 2787 if (cachep->flags & SLAB_POISON) { 2788#ifdef CONFIG_DEBUG_PAGEALLOC 2789 if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) 2790 kernel_map_pages(virt_to_page(objp), 2791 cachep->buffer_size / PAGE_SIZE, 1); 2792 else 2793 check_poison_obj(cachep, objp); 2794#else 2795 check_poison_obj(cachep, objp); 2796#endif 2797 poison_obj(cachep, objp, POISON_INUSE); 2798 } 2799 if (cachep->flags & SLAB_STORE_USER) 2800 *dbg_userword(cachep, objp) = caller; 2801 2802 if (cachep->flags & SLAB_RED_ZONE) { 2803 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || 2804 *dbg_redzone2(cachep, objp) != RED_INACTIVE) { 2805 slab_error(cachep, "double free, or memory outside" 2806 " object was overwritten"); 2807 printk(KERN_ERR 2808 "%p: redzone 1:0x%lx, redzone 2:0x%lx\n", 2809 objp, *dbg_redzone1(cachep, objp), 2810 *dbg_redzone2(cachep, objp)); 2811 } 2812 *dbg_redzone1(cachep, objp) = RED_ACTIVE; 2813 *dbg_redzone2(cachep, objp) = RED_ACTIVE; 2814 } 2815#ifdef CONFIG_DEBUG_SLAB_LEAK 2816 { 2817 struct slab *slabp; 2818 unsigned objnr; 2819 2820 slabp = page_get_slab(virt_to_page(objp)); 2821 objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size; 2822 slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE; 2823 } 2824#endif 2825 objp += obj_offset(cachep); 2826 if (cachep->ctor && cachep->flags & SLAB_POISON) { 2827 unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR; 2828 2829 if (!(flags & __GFP_WAIT)) 2830 ctor_flags |= SLAB_CTOR_ATOMIC; 2831 2832 cachep->ctor(objp, cachep, ctor_flags); 2833 } 2834 return objp; 2835} 2836#else 2837#define cache_alloc_debugcheck_after(a,b,objp,d) (objp) 2838#endif 2839 2840static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) 2841{ 2842 void *objp; 2843 struct array_cache *ac; 2844 2845#ifdef CONFIG_NUMA 2846 if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) { 2847 objp = alternate_node_alloc(cachep, flags); 2848 if (objp != NULL) 2849 return objp; 2850 } 2851#endif 2852 2853 check_irq_off(); 2854 ac = cpu_cache_get(cachep); 2855 if (likely(ac->avail)) { 2856 STATS_INC_ALLOCHIT(cachep); 2857 ac->touched = 1; 2858 objp = ac->entry[--ac->avail]; 2859 } else { 2860 STATS_INC_ALLOCMISS(cachep); 2861 objp = cache_alloc_refill(cachep, flags); 2862 } 2863 return objp; 2864} 2865 2866static __always_inline void *__cache_alloc(struct kmem_cache *cachep, 2867 gfp_t flags, void *caller) 2868{ 2869 unsigned long save_flags; 2870 void *objp; 2871 2872 cache_alloc_debugcheck_before(cachep, flags); 2873 2874 local_irq_save(save_flags); 2875 objp = ____cache_alloc(cachep, flags); 2876 local_irq_restore(save_flags); 2877 objp = cache_alloc_debugcheck_after(cachep, flags, objp, 2878 caller); 2879 prefetchw(objp); 2880 return objp; 2881} 2882 2883#ifdef CONFIG_NUMA 2884/* 2885 * Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY. 2886 * 2887 * If we are in_interrupt, then process context, including cpusets and 2888 * mempolicy, may not apply and should not be used for allocation policy. 2889 */ 2890static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) 2891{ 2892 int nid_alloc, nid_here; 2893 2894 if (in_interrupt()) 2895 return NULL; 2896 nid_alloc = nid_here = numa_node_id(); 2897 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) 2898 nid_alloc = cpuset_mem_spread_node(); 2899 else if (current->mempolicy) 2900 nid_alloc = slab_node(current->mempolicy); 2901 if (nid_alloc != nid_here) 2902 return __cache_alloc_node(cachep, flags, nid_alloc); 2903 return NULL; 2904} 2905 2906/* 2907 * A interface to enable slab creation on nodeid 2908 */ 2909static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, 2910 int nodeid) 2911{ 2912 struct list_head *entry; 2913 struct slab *slabp; 2914 struct kmem_list3 *l3; 2915 void *obj; 2916 int x; 2917 2918 l3 = cachep->nodelists[nodeid]; 2919 BUG_ON(!l3); 2920 2921retry: 2922 check_irq_off(); 2923 spin_lock(&l3->list_lock); 2924 entry = l3->slabs_partial.next; 2925 if (entry == &l3->slabs_partial) { 2926 l3->free_touched = 1; 2927 entry = l3->slabs_free.next; 2928 if (entry == &l3->slabs_free) 2929 goto must_grow; 2930 } 2931 2932 slabp = list_entry(entry, struct slab, list); 2933 check_spinlock_acquired_node(cachep, nodeid); 2934 check_slabp(cachep, slabp); 2935 2936 STATS_INC_NODEALLOCS(cachep); 2937 STATS_INC_ACTIVE(cachep); 2938 STATS_SET_HIGH(cachep); 2939 2940 BUG_ON(slabp->inuse == cachep->num); 2941 2942 obj = slab_get_obj(cachep, slabp, nodeid); 2943 check_slabp(cachep, slabp); 2944 l3->free_objects--; 2945 /* move slabp to correct slabp list: */ 2946 list_del(&slabp->list); 2947 2948 if (slabp->free == BUFCTL_END) 2949 list_add(&slabp->list, &l3->slabs_full); 2950 else 2951 list_add(&slabp->list, &l3->slabs_partial); 2952 2953 spin_unlock(&l3->list_lock); 2954 goto done; 2955 2956must_grow: 2957 spin_unlock(&l3->list_lock); 2958 x = cache_grow(cachep, flags, nodeid); 2959 2960 if (!x) 2961 return NULL; 2962 2963 goto retry; 2964done: 2965 return obj; 2966} 2967#endif 2968 2969/* 2970 * Caller needs to acquire correct kmem_list's list_lock 2971 */ 2972static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, 2973 int node) 2974{ 2975 int i; 2976 struct kmem_list3 *l3; 2977 2978 for (i = 0; i < nr_objects; i++) { 2979 void *objp = objpp[i]; 2980 struct slab *slabp; 2981 2982 slabp = virt_to_slab(objp); 2983 l3 = cachep->nodelists[node]; 2984 list_del(&slabp->list); 2985 check_spinlock_acquired_node(cachep, node); 2986 check_slabp(cachep, slabp); 2987 slab_put_obj(cachep, slabp, objp, node); 2988 STATS_DEC_ACTIVE(cachep); 2989 l3->free_objects++; 2990 check_slabp(cachep, slabp); 2991 2992 /* fixup slab chains */ 2993 if (slabp->inuse == 0) { 2994 if (l3->free_objects > l3->free_limit) { 2995 l3->free_objects -= cachep->num; 2996 slab_destroy(cachep, slabp); 2997 } else { 2998 list_add(&slabp->list, &l3->slabs_free); 2999 } 3000 } else { 3001 /* Unconditionally move a slab to the end of the 3002 * partial list on free - maximum time for the 3003 * other objects to be freed, too. 3004 */ 3005 list_add_tail(&slabp->list, &l3->slabs_partial); 3006 } 3007 } 3008} 3009 3010static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) 3011{ 3012 int batchcount; 3013 struct kmem_list3 *l3; 3014 int node = numa_node_id(); 3015 3016 batchcount = ac->batchcount; 3017#if DEBUG 3018 BUG_ON(!batchcount || batchcount > ac->avail); 3019#endif 3020 check_irq_off(); 3021 l3 = cachep->nodelists[node]; 3022 spin_lock(&l3->list_lock); 3023 if (l3->shared) { 3024 struct array_cache *shared_array = l3->shared; 3025 int max = shared_array->limit - shared_array->avail; 3026 if (max) { 3027 if (batchcount > max) 3028 batchcount = max; 3029 memcpy(&(shared_array->entry[shared_array->avail]), 3030 ac->entry, sizeof(void *) * batchcount); 3031 shared_array->avail += batchcount; 3032 goto free_done; 3033 } 3034 } 3035 3036 free_block(cachep, ac->entry, batchcount, node); 3037free_done: 3038#if STATS 3039 { 3040 int i = 0; 3041 struct list_head *p; 3042 3043 p = l3->slabs_free.next; 3044 while (p != &(l3->slabs_free)) { 3045 struct slab *slabp; 3046 3047 slabp = list_entry(p, struct slab, list); 3048 BUG_ON(slabp->inuse); 3049 3050 i++; 3051 p = p->next; 3052 } 3053 STATS_SET_FREEABLE(cachep, i); 3054 } 3055#endif 3056 spin_unlock(&l3->list_lock); 3057 ac->avail -= batchcount; 3058 memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail); 3059} 3060 3061/* 3062 * Release an obj back to its cache. If the obj has a constructed state, it must 3063 * be in this state _before_ it is released. Called with disabled ints. 3064 */ 3065static inline void __cache_free(struct kmem_cache *cachep, void *objp) 3066{ 3067 struct array_cache *ac = cpu_cache_get(cachep); 3068 3069 check_irq_off(); 3070 objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); 3071 3072 /* Make sure we are not freeing a object from another 3073 * node to the array cache on this cpu. 3074 */ 3075#ifdef CONFIG_NUMA 3076 { 3077 struct slab *slabp; 3078 slabp = virt_to_slab(objp); 3079 if (unlikely(slabp->nodeid != numa_node_id())) { 3080 struct array_cache *alien = NULL; 3081 int nodeid = slabp->nodeid; 3082 struct kmem_list3 *l3; 3083 3084 l3 = cachep->nodelists[numa_node_id()]; 3085 STATS_INC_NODEFREES(cachep); 3086 if (l3->alien && l3->alien[nodeid]) { 3087 alien = l3->alien[nodeid]; 3088 spin_lock(&alien->lock); 3089 if (unlikely(alien->avail == alien->limit)) 3090 __drain_alien_cache(cachep, 3091 alien, nodeid); 3092 alien->entry[alien->avail++] = objp; 3093 spin_unlock(&alien->lock); 3094 } else { 3095 spin_lock(&(cachep->nodelists[nodeid])-> 3096 list_lock); 3097 free_block(cachep, &objp, 1, nodeid); 3098 spin_unlock(&(cachep->nodelists[nodeid])-> 3099 list_lock); 3100 } 3101 return; 3102 } 3103 } 3104#endif 3105 if (likely(ac->avail < ac->limit)) { 3106 STATS_INC_FREEHIT(cachep); 3107 ac->entry[ac->avail++] = objp; 3108 return; 3109 } else { 3110 STATS_INC_FREEMISS(cachep); 3111 cache_flusharray(cachep, ac); 3112 ac->entry[ac->avail++] = objp; 3113 } 3114} 3115 3116/** 3117 * kmem_cache_alloc - Allocate an object 3118 * @cachep: The cache to allocate from. 3119 * @flags: See kmalloc(). 3120 * 3121 * Allocate an object from this cache. The flags are only relevant 3122 * if the cache has no available objects. 3123 */ 3124void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3125{ 3126 return __cache_alloc(cachep, flags, __builtin_return_address(0)); 3127} 3128EXPORT_SYMBOL(kmem_cache_alloc); 3129 3130/** 3131 * kmem_cache_alloc - Allocate an object. The memory is set to zero. 3132 * @cache: The cache to allocate from. 3133 * @flags: See kmalloc(). 3134 * 3135 * Allocate an object from this cache and set the allocated memory to zero. 3136 * The flags are only relevant if the cache has no available objects. 3137 */ 3138void *kmem_cache_zalloc(struct kmem_cache *cache, gfp_t flags) 3139{ 3140 void *ret = __cache_alloc(cache, flags, __builtin_return_address(0)); 3141 if (ret) 3142 memset(ret, 0, obj_size(cache)); 3143 return ret; 3144} 3145EXPORT_SYMBOL(kmem_cache_zalloc); 3146 3147/** 3148 * kmem_ptr_validate - check if an untrusted pointer might 3149 * be a slab entry. 3150 * @cachep: the cache we're checking against 3151 * @ptr: pointer to validate 3152 * 3153 * This verifies that the untrusted pointer looks sane: 3154 * it is _not_ a guarantee that the pointer is actually 3155 * part of the slab cache in question, but it at least 3156 * validates that the pointer can be dereferenced and 3157 * looks half-way sane. 3158 * 3159 * Currently only used for dentry validation. 3160 */ 3161int fastcall kmem_ptr_validate(struct kmem_cache *cachep, void *ptr) 3162{ 3163 unsigned long addr = (unsigned long)ptr; 3164 unsigned long min_addr = PAGE_OFFSET; 3165 unsigned long align_mask = BYTES_PER_WORD - 1; 3166 unsigned long size = cachep->buffer_size; 3167 struct page *page; 3168 3169 if (unlikely(addr < min_addr)) 3170 goto out; 3171 if (unlikely(addr > (unsigned long)high_memory - size)) 3172 goto out; 3173 if (unlikely(addr & align_mask)) 3174 goto out; 3175 if (unlikely(!kern_addr_valid(addr))) 3176 goto out; 3177 if (unlikely(!kern_addr_valid(addr + size - 1))) 3178 goto out; 3179 page = virt_to_page(ptr); 3180 if (unlikely(!PageSlab(page))) 3181 goto out; 3182 if (unlikely(page_get_cache(page) != cachep)) 3183 goto out; 3184 return 1; 3185out: 3186 return 0; 3187} 3188 3189#ifdef CONFIG_NUMA 3190/** 3191 * kmem_cache_alloc_node - Allocate an object on the specified node 3192 * @cachep: The cache to allocate from. 3193 * @flags: See kmalloc(). 3194 * @nodeid: node number of the target node. 3195 * 3196 * Identical to kmem_cache_alloc, except that this function is slow 3197 * and can sleep. And it will allocate memory on the given node, which 3198 * can improve the performance for cpu bound structures. 3199 * New and improved: it will now make sure that the object gets 3200 * put on the correct node list so that there is no false sharing. 3201 */ 3202void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) 3203{ 3204 unsigned long save_flags; 3205 void *ptr; 3206 3207 cache_alloc_debugcheck_before(cachep, flags); 3208 local_irq_save(save_flags); 3209 3210 if (nodeid == -1 || nodeid == numa_node_id() || 3211 !cachep->nodelists[nodeid]) 3212 ptr = ____cache_alloc(cachep, flags); 3213 else 3214 ptr = __cache_alloc_node(cachep, flags, nodeid); 3215 local_irq_restore(save_flags); 3216 3217 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, 3218 __builtin_return_address(0)); 3219 3220 return ptr; 3221} 3222EXPORT_SYMBOL(kmem_cache_alloc_node); 3223 3224void *kmalloc_node(size_t size, gfp_t flags, int node) 3225{ 3226 struct kmem_cache *cachep; 3227 3228 cachep = kmem_find_general_cachep(size, flags); 3229 if (unlikely(cachep == NULL)) 3230 return NULL; 3231 return kmem_cache_alloc_node(cachep, flags, node); 3232} 3233EXPORT_SYMBOL(kmalloc_node); 3234#endif 3235 3236/** 3237 * kmalloc - allocate memory 3238 * @size: how many bytes of memory are required. 3239 * @flags: the type of memory to allocate. 3240 * @caller: function caller for debug tracking of the caller 3241 * 3242 * kmalloc is the normal method of allocating memory 3243 * in the kernel. 3244 * 3245 * The @flags argument may be one of: 3246 * 3247 * %GFP_USER - Allocate memory on behalf of user. May sleep. 3248 * 3249 * %GFP_KERNEL - Allocate normal kernel ram. May sleep. 3250 * 3251 * %GFP_ATOMIC - Allocation will not sleep. Use inside interrupt handlers. 3252 * 3253 * Additionally, the %GFP_DMA flag may be set to indicate the memory 3254 * must be suitable for DMA. This can mean different things on different 3255 * platforms. For example, on i386, it means that the memory must come 3256 * from the first 16MB. 3257 */ 3258static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, 3259 void *caller) 3260{ 3261 struct kmem_cache *cachep; 3262 3263 /* If you want to save a few bytes .text space: replace 3264 * __ with kmem_. 3265 * Then kmalloc uses the uninlined functions instead of the inline 3266 * functions. 3267 */ 3268 cachep = __find_general_cachep(size, flags); 3269 if (unlikely(cachep == NULL)) 3270 return NULL; 3271 return __cache_alloc(cachep, flags, caller); 3272} 3273 3274 3275void *__kmalloc(size_t size, gfp_t flags) 3276{ 3277#ifndef CONFIG_DEBUG_SLAB 3278 return __do_kmalloc(size, flags, NULL); 3279#else 3280 return __do_kmalloc(size, flags, __builtin_return_address(0)); 3281#endif 3282} 3283EXPORT_SYMBOL(__kmalloc); 3284 3285#ifdef CONFIG_DEBUG_SLAB 3286void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller) 3287{ 3288 return __do_kmalloc(size, flags, caller); 3289} 3290EXPORT_SYMBOL(__kmalloc_track_caller); 3291#endif 3292 3293#ifdef CONFIG_SMP 3294/** 3295 * __alloc_percpu - allocate one copy of the object for every present 3296 * cpu in the system, zeroing them. 3297 * Objects should be dereferenced using the per_cpu_ptr macro only. 3298 * 3299 * @size: how many bytes of memory are required. 3300 */ 3301void *__alloc_percpu(size_t size) 3302{ 3303 int i; 3304 struct percpu_data *pdata = kmalloc(sizeof(*pdata), GFP_KERNEL); 3305 3306 if (!pdata) 3307 return NULL; 3308 3309 /* 3310 * Cannot use for_each_online_cpu since a cpu may come online 3311 * and we have no way of figuring out how to fix the array 3312 * that we have allocated then.... 3313 */ 3314 for_each_cpu(i) { 3315 int node = cpu_to_node(i); 3316 3317 if (node_online(node)) 3318 pdata->ptrs[i] = kmalloc_node(size, GFP_KERNEL, node); 3319 else 3320 pdata->ptrs[i] = kmalloc(size, GFP_KERNEL); 3321 3322 if (!pdata->ptrs[i]) 3323 goto unwind_oom; 3324 memset(pdata->ptrs[i], 0, size); 3325 } 3326 3327 /* Catch derefs w/o wrappers */ 3328 return (void *)(~(unsigned long)pdata); 3329 3330unwind_oom: 3331 while (--i >= 0) { 3332 if (!cpu_possible(i)) 3333 continue; 3334 kfree(pdata->ptrs[i]); 3335 } 3336 kfree(pdata); 3337 return NULL; 3338} 3339EXPORT_SYMBOL(__alloc_percpu); 3340#endif 3341 3342/** 3343 * kmem_cache_free - Deallocate an object 3344 * @cachep: The cache the allocation was from. 3345 * @objp: The previously allocated object. 3346 * 3347 * Free an object which was previously allocated from this 3348 * cache. 3349 */ 3350void kmem_cache_free(struct kmem_cache *cachep, void *objp) 3351{ 3352 unsigned long flags; 3353 3354 local_irq_save(flags); 3355 __cache_free(cachep, objp); 3356 local_irq_restore(flags); 3357} 3358EXPORT_SYMBOL(kmem_cache_free); 3359 3360/** 3361 * kfree - free previously allocated memory 3362 * @objp: pointer returned by kmalloc. 3363 * 3364 * If @objp is NULL, no operation is performed. 3365 * 3366 * Don't free memory not originally allocated by kmalloc() 3367 * or you will run into trouble. 3368 */ 3369void kfree(const void *objp) 3370{ 3371 struct kmem_cache *c; 3372 unsigned long flags; 3373 3374 if (unlikely(!objp)) 3375 return; 3376 local_irq_save(flags); 3377 kfree_debugcheck(objp); 3378 c = virt_to_cache(objp); 3379 mutex_debug_check_no_locks_freed(objp, obj_size(c)); 3380 __cache_free(c, (void *)objp); 3381 local_irq_restore(flags); 3382} 3383EXPORT_SYMBOL(kfree); 3384 3385#ifdef CONFIG_SMP 3386/** 3387 * free_percpu - free previously allocated percpu memory 3388 * @objp: pointer returned by alloc_percpu. 3389 * 3390 * Don't free memory not originally allocated by alloc_percpu() 3391 * The complemented objp is to check for that. 3392 */ 3393void free_percpu(const void *objp) 3394{ 3395 int i; 3396 struct percpu_data *p = (struct percpu_data *)(~(unsigned long)objp); 3397 3398 /* 3399 * We allocate for all cpus so we cannot use for online cpu here. 3400 */ 3401 for_each_cpu(i) 3402 kfree(p->ptrs[i]); 3403 kfree(p); 3404} 3405EXPORT_SYMBOL(free_percpu); 3406#endif 3407 3408unsigned int kmem_cache_size(struct kmem_cache *cachep) 3409{ 3410 return obj_size(cachep); 3411} 3412EXPORT_SYMBOL(kmem_cache_size); 3413 3414const char *kmem_cache_name(struct kmem_cache *cachep) 3415{ 3416 return cachep->name; 3417} 3418EXPORT_SYMBOL_GPL(kmem_cache_name); 3419 3420/* 3421 * This initializes kmem_list3 or resizes varioius caches for all nodes. 3422 */ 3423static int alloc_kmemlist(struct kmem_cache *cachep) 3424{ 3425 int node; 3426 struct kmem_list3 *l3; 3427 struct array_cache *new_shared; 3428 struct array_cache **new_alien; 3429 3430 for_each_online_node(node) { 3431 3432 new_alien = alloc_alien_cache(node, cachep->limit); 3433 if (!new_alien) 3434 goto fail; 3435 3436 new_shared = alloc_arraycache(node, 3437 cachep->shared*cachep->batchcount, 3438 0xbaadf00d); 3439 if (!new_shared) { 3440 free_alien_cache(new_alien); 3441 goto fail; 3442 } 3443 3444 l3 = cachep->nodelists[node]; 3445 if (l3) { 3446 struct array_cache *shared = l3->shared; 3447 3448 spin_lock_irq(&l3->list_lock); 3449 3450 if (shared) 3451 free_block(cachep, shared->entry, 3452 shared->avail, node); 3453 3454 l3->shared = new_shared; 3455 if (!l3->alien) { 3456 l3->alien = new_alien; 3457 new_alien = NULL; 3458 } 3459 l3->free_limit = (1 + nr_cpus_node(node)) * 3460 cachep->batchcount + cachep->num; 3461 spin_unlock_irq(&l3->list_lock); 3462 kfree(shared); 3463 free_alien_cache(new_alien); 3464 continue; 3465 } 3466 l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node); 3467 if (!l3) { 3468 free_alien_cache(new_alien); 3469 kfree(new_shared); 3470 goto fail; 3471 } 3472 3473 kmem_list3_init(l3); 3474 l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + 3475 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 3476 l3->shared = new_shared; 3477 l3->alien = new_alien; 3478 l3->free_limit = (1 + nr_cpus_node(node)) * 3479 cachep->batchcount + cachep->num; 3480 cachep->nodelists[node] = l3; 3481 } 3482 return 0; 3483 3484fail: 3485 if (!cachep->next.next) { 3486 /* Cache is not active yet. Roll back what we did */ 3487 node--; 3488 while (node >= 0) { 3489 if (cachep->nodelists[node]) { 3490 l3 = cachep->nodelists[node]; 3491 3492 kfree(l3->shared); 3493 free_alien_cache(l3->alien); 3494 kfree(l3); 3495 cachep->nodelists[node] = NULL; 3496 } 3497 node--; 3498 } 3499 } 3500 return -ENOMEM; 3501} 3502 3503struct ccupdate_struct { 3504 struct kmem_cache *cachep; 3505 struct array_cache *new[NR_CPUS]; 3506}; 3507 3508static void do_ccupdate_local(void *info) 3509{ 3510 struct ccupdate_struct *new = info; 3511 struct array_cache *old; 3512 3513 check_irq_off(); 3514 old = cpu_cache_get(new->cachep); 3515 3516 new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()]; 3517 new->new[smp_processor_id()] = old; 3518} 3519 3520/* Always called with the cache_chain_mutex held */ 3521static int do_tune_cpucache(struct kmem_cache *cachep, int limit, 3522 int batchcount, int shared) 3523{ 3524 struct ccupdate_struct new; 3525 int i, err; 3526 3527 memset(&new.new, 0, sizeof(new.new)); 3528 for_each_online_cpu(i) { 3529 new.new[i] = alloc_arraycache(cpu_to_node(i), limit, 3530 batchcount); 3531 if (!new.new[i]) { 3532 for (i--; i >= 0; i--) 3533 kfree(new.new[i]); 3534 return -ENOMEM; 3535 } 3536 } 3537 new.cachep = cachep; 3538 3539 on_each_cpu(do_ccupdate_local, (void *)&new, 1, 1); 3540 3541 check_irq_on(); 3542 cachep->batchcount = batchcount; 3543 cachep->limit = limit; 3544 cachep->shared = shared; 3545 3546 for_each_online_cpu(i) { 3547 struct array_cache *ccold = new.new[i]; 3548 if (!ccold) 3549 continue; 3550 spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); 3551 free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i)); 3552 spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); 3553 kfree(ccold); 3554 } 3555 3556 err = alloc_kmemlist(cachep); 3557 if (err) { 3558 printk(KERN_ERR "alloc_kmemlist failed for %s, error %d.\n", 3559 cachep->name, -err); 3560 BUG(); 3561 } 3562 return 0; 3563} 3564 3565/* Called with cache_chain_mutex held always */ 3566static void enable_cpucache(struct kmem_cache *cachep) 3567{ 3568 int err; 3569 int limit, shared; 3570 3571 /* 3572 * The head array serves three purposes: 3573 * - create a LIFO ordering, i.e. return objects that are cache-warm 3574 * - reduce the number of spinlock operations. 3575 * - reduce the number of linked list operations on the slab and 3576 * bufctl chains: array operations are cheaper. 3577 * The numbers are guessed, we should auto-tune as described by 3578 * Bonwick. 3579 */ 3580 if (cachep->buffer_size > 131072) 3581 limit = 1; 3582 else if (cachep->buffer_size > PAGE_SIZE) 3583 limit = 8; 3584 else if (cachep->buffer_size > 1024) 3585 limit = 24; 3586 else if (cachep->buffer_size > 256) 3587 limit = 54; 3588 else 3589 limit = 120; 3590 3591 /* 3592 * CPU bound tasks (e.g. network routing) can exhibit cpu bound 3593 * allocation behaviour: Most allocs on one cpu, most free operations 3594 * on another cpu. For these cases, an efficient object passing between 3595 * cpus is necessary. This is provided by a shared array. The array 3596 * replaces Bonwick's magazine layer. 3597 * On uniprocessor, it's functionally equivalent (but less efficient) 3598 * to a larger limit. Thus disabled by default. 3599 */ 3600 shared = 0; 3601#ifdef CONFIG_SMP 3602 if (cachep->buffer_size <= PAGE_SIZE) 3603 shared = 8; 3604#endif 3605 3606#if DEBUG 3607 /* 3608 * With debugging enabled, large batchcount lead to excessively long 3609 * periods with disabled local interrupts. Limit the batchcount 3610 */ 3611 if (limit > 32) 3612 limit = 32; 3613#endif 3614 err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared); 3615 if (err) 3616 printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n", 3617 cachep->name, -err); 3618} 3619 3620/* 3621 * Drain an array if it contains any elements taking the l3 lock only if 3622 * necessary. Note that the l3 listlock also protects the array_cache 3623 * if drain_array() is used on the shared array. 3624 */ 3625void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, 3626 struct array_cache *ac, int force, int node) 3627{ 3628 int tofree; 3629 3630 if (!ac || !ac->avail) 3631 return; 3632 if (ac->touched && !force) { 3633 ac->touched = 0; 3634 } else { 3635 spin_lock_irq(&l3->list_lock); 3636 if (ac->avail) { 3637 tofree = force ? ac->avail : (ac->limit + 4) / 5; 3638 if (tofree > ac->avail) 3639 tofree = (ac->avail + 1) / 2; 3640 free_block(cachep, ac->entry, tofree, node); 3641 ac->avail -= tofree; 3642 memmove(ac->entry, &(ac->entry[tofree]), 3643 sizeof(void *) * ac->avail); 3644 } 3645 spin_unlock_irq(&l3->list_lock); 3646 } 3647} 3648 3649/** 3650 * cache_reap - Reclaim memory from caches. 3651 * @unused: unused parameter 3652 * 3653 * Called from workqueue/eventd every few seconds. 3654 * Purpose: 3655 * - clear the per-cpu caches for this CPU. 3656 * - return freeable pages to the main free memory pool. 3657 * 3658 * If we cannot acquire the cache chain mutex then just give up - we'll try 3659 * again on the next iteration. 3660 */ 3661static void cache_reap(void *unused) 3662{ 3663 struct list_head *walk; 3664 struct kmem_list3 *l3; 3665 int node = numa_node_id(); 3666 3667 if (!mutex_trylock(&cache_chain_mutex)) { 3668 /* Give up. Setup the next iteration. */ 3669 schedule_delayed_work(&__get_cpu_var(reap_work), 3670 REAPTIMEOUT_CPUC); 3671 return; 3672 } 3673 3674 list_for_each(walk, &cache_chain) { 3675 struct kmem_cache *searchp; 3676 struct list_head *p; 3677 int tofree; 3678 struct slab *slabp; 3679 3680 searchp = list_entry(walk, struct kmem_cache, next); 3681 check_irq_on(); 3682 3683 /* 3684 * We only take the l3 lock if absolutely necessary and we 3685 * have established with reasonable certainty that 3686 * we can do some work if the lock was obtained. 3687 */ 3688 l3 = searchp->nodelists[node]; 3689 3690 reap_alien(searchp, l3); 3691 3692 drain_array(searchp, l3, cpu_cache_get(searchp), 0, node); 3693 3694 /* 3695 * These are racy checks but it does not matter 3696 * if we skip one check or scan twice. 3697 */ 3698 if (time_after(l3->next_reap, jiffies)) 3699 goto next; 3700 3701 l3->next_reap = jiffies + REAPTIMEOUT_LIST3; 3702 3703 drain_array(searchp, l3, l3->shared, 0, node); 3704 3705 if (l3->free_touched) { 3706 l3->free_touched = 0; 3707 goto next; 3708 } 3709 3710 tofree = (l3->free_limit + 5 * searchp->num - 1) / 3711 (5 * searchp->num); 3712 do { 3713 /* 3714 * Do not lock if there are no free blocks. 3715 */ 3716 if (list_empty(&l3->slabs_free)) 3717 break; 3718 3719 spin_lock_irq(&l3->list_lock); 3720 p = l3->slabs_free.next; 3721 if (p == &(l3->slabs_free)) { 3722 spin_unlock_irq(&l3->list_lock); 3723 break; 3724 } 3725 3726 slabp = list_entry(p, struct slab, list); 3727 BUG_ON(slabp->inuse); 3728 list_del(&slabp->list); 3729 STATS_INC_REAPED(searchp); 3730 3731 /* 3732 * Safe to drop the lock. The slab is no longer linked 3733 * to the cache. searchp cannot disappear, we hold 3734 * cache_chain_lock 3735 */ 3736 l3->free_objects -= searchp->num; 3737 spin_unlock_irq(&l3->list_lock); 3738 slab_destroy(searchp, slabp); 3739 } while (--tofree > 0); 3740next: 3741 cond_resched(); 3742 } 3743 check_irq_on(); 3744 mutex_unlock(&cache_chain_mutex); 3745 next_reap_node(); 3746 /* Set up the next iteration */ 3747 schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC); 3748} 3749 3750#ifdef CONFIG_PROC_FS 3751 3752static void print_slabinfo_header(struct seq_file *m) 3753{ 3754 /* 3755 * Output format version, so at least we can change it 3756 * without _too_ many complaints. 3757 */ 3758#if STATS 3759 seq_puts(m, "slabinfo - version: 2.1 (statistics)\n"); 3760#else 3761 seq_puts(m, "slabinfo - version: 2.1\n"); 3762#endif 3763 seq_puts(m, "# name <active_objs> <num_objs> <objsize> " 3764 "<objperslab> <pagesperslab>"); 3765 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>"); 3766 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); 3767#if STATS 3768 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> " 3769 "<error> <maxfreeable> <nodeallocs> <remotefrees>"); 3770 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>"); 3771#endif 3772 seq_putc(m, '\n'); 3773} 3774 3775static void *s_start(struct seq_file *m, loff_t *pos) 3776{ 3777 loff_t n = *pos; 3778 struct list_head *p; 3779 3780 mutex_lock(&cache_chain_mutex); 3781 if (!n) 3782 print_slabinfo_header(m); 3783 p = cache_chain.next; 3784 while (n--) { 3785 p = p->next; 3786 if (p == &cache_chain) 3787 return NULL; 3788 } 3789 return list_entry(p, struct kmem_cache, next); 3790} 3791 3792static void *s_next(struct seq_file *m, void *p, loff_t *pos) 3793{ 3794 struct kmem_cache *cachep = p; 3795 ++*pos; 3796 return cachep->next.next == &cache_chain ? 3797 NULL : list_entry(cachep->next.next, struct kmem_cache, next); 3798} 3799 3800static void s_stop(struct seq_file *m, void *p) 3801{ 3802 mutex_unlock(&cache_chain_mutex); 3803} 3804 3805static int s_show(struct seq_file *m, void *p) 3806{ 3807 struct kmem_cache *cachep = p; 3808 struct list_head *q; 3809 struct slab *slabp; 3810 unsigned long active_objs; 3811 unsigned long num_objs; 3812 unsigned long active_slabs = 0; 3813 unsigned long num_slabs, free_objects = 0, shared_avail = 0; 3814 const char *name; 3815 char *error = NULL; 3816 int node; 3817 struct kmem_list3 *l3; 3818 3819 active_objs = 0; 3820 num_slabs = 0; 3821 for_each_online_node(node) { 3822 l3 = cachep->nodelists[node]; 3823 if (!l3) 3824 continue; 3825 3826 check_irq_on(); 3827 spin_lock_irq(&l3->list_lock); 3828 3829 list_for_each(q, &l3->slabs_full) { 3830 slabp = list_entry(q, struct slab, list); 3831 if (slabp->inuse != cachep->num && !error) 3832 error = "slabs_full accounting error"; 3833 active_objs += cachep->num; 3834 active_slabs++; 3835 } 3836 list_for_each(q, &l3->slabs_partial) { 3837 slabp = list_entry(q, struct slab, list); 3838 if (slabp->inuse == cachep->num && !error) 3839 error = "slabs_partial inuse accounting error"; 3840 if (!slabp->inuse && !error) 3841 error = "slabs_partial/inuse accounting error"; 3842 active_objs += slabp->inuse; 3843 active_slabs++; 3844 } 3845 list_for_each(q, &l3->slabs_free) { 3846 slabp = list_entry(q, struct slab, list); 3847 if (slabp->inuse && !error) 3848 error = "slabs_free/inuse accounting error"; 3849 num_slabs++; 3850 } 3851 free_objects += l3->free_objects; 3852 if (l3->shared) 3853 shared_avail += l3->shared->avail; 3854 3855 spin_unlock_irq(&l3->list_lock); 3856 } 3857 num_slabs += active_slabs; 3858 num_objs = num_slabs * cachep->num; 3859 if (num_objs - active_objs != free_objects && !error) 3860 error = "free_objects accounting error"; 3861 3862 name = cachep->name; 3863 if (error) 3864 printk(KERN_ERR "slab: cache %s error: %s\n", name, error); 3865 3866 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", 3867 name, active_objs, num_objs, cachep->buffer_size, 3868 cachep->num, (1 << cachep->gfporder)); 3869 seq_printf(m, " : tunables %4u %4u %4u", 3870 cachep->limit, cachep->batchcount, cachep->shared); 3871 seq_printf(m, " : slabdata %6lu %6lu %6lu", 3872 active_slabs, num_slabs, shared_avail); 3873#if STATS 3874 { /* list3 stats */ 3875 unsigned long high = cachep->high_mark; 3876 unsigned long allocs = cachep->num_allocations; 3877 unsigned long grown = cachep->grown; 3878 unsigned long reaped = cachep->reaped; 3879 unsigned long errors = cachep->errors; 3880 unsigned long max_freeable = cachep->max_freeable; 3881 unsigned long node_allocs = cachep->node_allocs; 3882 unsigned long node_frees = cachep->node_frees; 3883 3884 seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \ 3885 %4lu %4lu %4lu %4lu", allocs, high, grown, 3886 reaped, errors, max_freeable, node_allocs, 3887 node_frees); 3888 } 3889 /* cpu stats */ 3890 { 3891 unsigned long allochit = atomic_read(&cachep->allochit); 3892 unsigned long allocmiss = atomic_read(&cachep->allocmiss); 3893 unsigned long freehit = atomic_read(&cachep->freehit); 3894 unsigned long freemiss = atomic_read(&cachep->freemiss); 3895 3896 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu", 3897 allochit, allocmiss, freehit, freemiss); 3898 } 3899#endif 3900 seq_putc(m, '\n'); 3901 return 0; 3902} 3903 3904/* 3905 * slabinfo_op - iterator that generates /proc/slabinfo 3906 * 3907 * Output layout: 3908 * cache-name 3909 * num-active-objs 3910 * total-objs 3911 * object size 3912 * num-active-slabs 3913 * total-slabs 3914 * num-pages-per-slab 3915 * + further values on SMP and with statistics enabled 3916 */ 3917 3918struct seq_operations slabinfo_op = { 3919 .start = s_start, 3920 .next = s_next, 3921 .stop = s_stop, 3922 .show = s_show, 3923}; 3924 3925#define MAX_SLABINFO_WRITE 128 3926/** 3927 * slabinfo_write - Tuning for the slab allocator 3928 * @file: unused 3929 * @buffer: user buffer 3930 * @count: data length 3931 * @ppos: unused 3932 */ 3933ssize_t slabinfo_write(struct file *file, const char __user * buffer, 3934 size_t count, loff_t *ppos) 3935{ 3936 char kbuf[MAX_SLABINFO_WRITE + 1], *tmp; 3937 int limit, batchcount, shared, res; 3938 struct list_head *p; 3939 3940 if (count > MAX_SLABINFO_WRITE) 3941 return -EINVAL; 3942 if (copy_from_user(&kbuf, buffer, count)) 3943 return -EFAULT; 3944 kbuf[MAX_SLABINFO_WRITE] = '\0'; 3945 3946 tmp = strchr(kbuf, ' '); 3947 if (!tmp) 3948 return -EINVAL; 3949 *tmp = '\0'; 3950 tmp++; 3951 if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3) 3952 return -EINVAL; 3953 3954 /* Find the cache in the chain of caches. */ 3955 mutex_lock(&cache_chain_mutex); 3956 res = -EINVAL; 3957 list_for_each(p, &cache_chain) { 3958 struct kmem_cache *cachep; 3959 3960 cachep = list_entry(p, struct kmem_cache, next); 3961 if (!strcmp(cachep->name, kbuf)) { 3962 if (limit < 1 || batchcount < 1 || 3963 batchcount > limit || shared < 0) { 3964 res = 0; 3965 } else { 3966 res = do_tune_cpucache(cachep, limit, 3967 batchcount, shared); 3968 } 3969 break; 3970 } 3971 } 3972 mutex_unlock(&cache_chain_mutex); 3973 if (res >= 0) 3974 res = count; 3975 return res; 3976} 3977 3978#ifdef CONFIG_DEBUG_SLAB_LEAK 3979 3980static void *leaks_start(struct seq_file *m, loff_t *pos) 3981{ 3982 loff_t n = *pos; 3983 struct list_head *p; 3984 3985 mutex_lock(&cache_chain_mutex); 3986 p = cache_chain.next; 3987 while (n--) { 3988 p = p->next; 3989 if (p == &cache_chain) 3990 return NULL; 3991 } 3992 return list_entry(p, struct kmem_cache, next); 3993} 3994 3995static inline int add_caller(unsigned long *n, unsigned long v) 3996{ 3997 unsigned long *p; 3998 int l; 3999 if (!v) 4000 return 1; 4001 l = n[1]; 4002 p = n + 2; 4003 while (l) { 4004 int i = l/2; 4005 unsigned long *q = p + 2 * i; 4006 if (*q == v) { 4007 q[1]++; 4008 return 1; 4009 } 4010 if (*q > v) { 4011 l = i; 4012 } else { 4013 p = q + 2; 4014 l -= i + 1; 4015 } 4016 } 4017 if (++n[1] == n[0]) 4018 return 0; 4019 memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n)); 4020 p[0] = v; 4021 p[1] = 1; 4022 return 1; 4023} 4024 4025static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s) 4026{ 4027 void *p; 4028 int i; 4029 if (n[0] == n[1]) 4030 return; 4031 for (i = 0, p = s->s_mem; i < c->num; i++, p += c->buffer_size) { 4032 if (slab_bufctl(s)[i] != BUFCTL_ACTIVE) 4033 continue; 4034 if (!add_caller(n, (unsigned long)*dbg_userword(c, p))) 4035 return; 4036 } 4037} 4038 4039static void show_symbol(struct seq_file *m, unsigned long address) 4040{ 4041#ifdef CONFIG_KALLSYMS 4042 char *modname; 4043 const char *name; 4044 unsigned long offset, size; 4045 char namebuf[KSYM_NAME_LEN+1]; 4046 4047 name = kallsyms_lookup(address, &size, &offset, &modname, namebuf); 4048 4049 if (name) { 4050 seq_printf(m, "%s+%#lx/%#lx", name, offset, size); 4051 if (modname) 4052 seq_printf(m, " [%s]", modname); 4053 return; 4054 } 4055#endif 4056 seq_printf(m, "%p", (void *)address); 4057} 4058 4059static int leaks_show(struct seq_file *m, void *p) 4060{ 4061 struct kmem_cache *cachep = p; 4062 struct list_head *q; 4063 struct slab *slabp; 4064 struct kmem_list3 *l3; 4065 const char *name; 4066 unsigned long *n = m->private; 4067 int node; 4068 int i; 4069 4070 if (!(cachep->flags & SLAB_STORE_USER)) 4071 return 0; 4072 if (!(cachep->flags & SLAB_RED_ZONE)) 4073 return 0; 4074 4075 /* OK, we can do it */ 4076 4077 n[1] = 0; 4078 4079 for_each_online_node(node) { 4080 l3 = cachep->nodelists[node]; 4081 if (!l3) 4082 continue; 4083 4084 check_irq_on(); 4085 spin_lock_irq(&l3->list_lock); 4086 4087 list_for_each(q, &l3->slabs_full) { 4088 slabp = list_entry(q, struct slab, list); 4089 handle_slab(n, cachep, slabp); 4090 } 4091 list_for_each(q, &l3->slabs_partial) { 4092 slabp = list_entry(q, struct slab, list); 4093 handle_slab(n, cachep, slabp); 4094 } 4095 spin_unlock_irq(&l3->list_lock); 4096 } 4097 name = cachep->name; 4098 if (n[0] == n[1]) { 4099 /* Increase the buffer size */ 4100 mutex_unlock(&cache_chain_mutex); 4101 m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL); 4102 if (!m->private) { 4103 /* Too bad, we are really out */ 4104 m->private = n; 4105 mutex_lock(&cache_chain_mutex); 4106 return -ENOMEM; 4107 } 4108 *(unsigned long *)m->private = n[0] * 2; 4109 kfree(n); 4110 mutex_lock(&cache_chain_mutex); 4111 /* Now make sure this entry will be retried */ 4112 m->count = m->size; 4113 return 0; 4114 } 4115 for (i = 0; i < n[1]; i++) { 4116 seq_printf(m, "%s: %lu ", name, n[2*i+3]); 4117 show_symbol(m, n[2*i+2]); 4118 seq_putc(m, '\n'); 4119 } 4120 return 0; 4121} 4122 4123struct seq_operations slabstats_op = { 4124 .start = leaks_start, 4125 .next = s_next, 4126 .stop = s_stop, 4127 .show = leaks_show, 4128}; 4129#endif 4130#endif 4131 4132/** 4133 * ksize - get the actual amount of memory allocated for a given object 4134 * @objp: Pointer to the object 4135 * 4136 * kmalloc may internally round up allocations and return more memory 4137 * than requested. ksize() can be used to determine the actual amount of 4138 * memory allocated. The caller may use this additional memory, even though 4139 * a smaller amount of memory was initially specified with the kmalloc call. 4140 * The caller must guarantee that objp points to a valid object previously 4141 * allocated with either kmalloc() or kmem_cache_alloc(). The object 4142 * must not be freed during the duration of the call. 4143 */ 4144unsigned int ksize(const void *objp) 4145{ 4146 if (unlikely(objp == NULL)) 4147 return 0; 4148 4149 return obj_size(virt_to_cache(objp)); 4150} 4151